Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
"First round of SCSI updates for the 4.6+ merge window.

This batch includes the usual quota of driver updates (bnx2fc, mp3sas,
hpsa, ncr5380, lpfc, hisi_sas, snic, aacraid, megaraid_sas). There's
also a multiqueue update for scsi_debug, assorted bug fixes and a few
other minor updates (refactor of scsi_sg_pools into generic code, alua
and VPD updates, and struct timeval conversions)"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (138 commits)
mpt3sas: Used "synchronize_irq()"API to synchronize timed-out IO & TMs
mpt3sas: Set maximum transfer length per IO to 4MB for VDs
mpt3sas: Updating mpt3sas driver version to 13.100.00.00
mpt3sas: Fix initial Reference tag field for 4K PI drives.
mpt3sas: Handle active cable exception event
mpt3sas: Update MPI header to 2.00.42
Revert "lpfc: Delete unnecessary checks before the function call mempool_destroy"
eata_pio: missing break statement
hpsa: Fix type ZBC conditional checks
scsi_lib: Decode T10 vendor IDs
scsi_dh_alua: do not fail for unknown VPD identification
scsi_debug: use locally assigned naa
scsi_debug: uuid for lu name
scsi_debug: vpd and mode page work
scsi_debug: add multiple queue support
bfa: fix bfa_fcb_itnim_alloc() error handling
megaraid_sas: Downgrade two success messages to info
cxlflash: Fix to resolve dead-lock during EEH recovery
scsi_debug: rework resp_report_luns
scsi_debug: use pdt constants
...

+4828 -6470
+7 -10
Documentation/scsi/g_NCR5380.txt
··· 23 23 24 24 If the default configuration does not work for you, you can use the kernel 25 25 command lines (eg using the lilo append command): 26 - ncr5380=port,irq,dma 27 - ncr53c400=port,irq 28 - or 29 - ncr5380=base,irq,dma 30 - ncr53c400=base,irq 26 + ncr5380=addr,irq 27 + ncr53c400=addr,irq 28 + ncr53c400a=addr,irq 29 + dtc3181e=addr,irq 31 30 32 31 The driver does not probe for any addresses or ports other than those in 33 32 the OVERRIDE or given to the kernel as above. ··· 35 36 /proc/scsi/g_NCR5380/x where x is the scsi card number as detected at boot 36 37 time. More info to come in the future. 37 38 38 - When NCR53c400 support is compiled in, BIOS parameters will be returned by 39 - the driver (the raw 5380 driver does not and I don't plan to fiddle with 40 - it!). 41 - 42 39 This driver works as a module. 43 40 When included as a module, parameters can be passed on the insmod/modprobe 44 41 command line: 45 42 ncr_irq=xx the interrupt 46 43 ncr_addr=xx the port or base address (for port or memory 47 44 mapped, resp.) 48 - ncr_dma=xx the DMA 49 45 ncr_5380=1 to set up for a NCR5380 board 50 46 ncr_53c400=1 to set up for a NCR53C400 board 47 + ncr_53c400a=1 to set up for a NCR53C400A board 48 + dtc_3181e=1 to set up for a Domex Technology Corp 3181E board 49 + hp_c2502=1 to set up for a Hewlett Packard C2502 board 51 50 e.g. 52 51 modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 53 52 for a port mapped NCR5380 board or
+8 -3
Documentation/scsi/scsi-parameters.txt
··· 27 27 aic79xx= [HW,SCSI] 28 28 See Documentation/scsi/aic79xx.txt. 29 29 30 - atascsi= [HW,SCSI] Atari SCSI 30 + atascsi= [HW,SCSI] 31 + See drivers/scsi/atari_scsi.c. 31 32 32 33 BusLogic= [HW,SCSI] 33 34 See drivers/scsi/BusLogic.c, comment before function 34 35 BusLogic_ParseDriverOptions(). 35 36 36 37 dtc3181e= [HW,SCSI] 38 + See Documentation/scsi/g_NCR5380.txt. 37 39 38 40 eata= [HW,SCSI] 39 41 ··· 53 51 ips= [HW,SCSI] Adaptec / IBM ServeRAID controller 54 52 See header of drivers/scsi/ips.c. 55 53 56 - mac5380= [HW,SCSI] Format: 57 - <can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> 54 + mac5380= [HW,SCSI] 55 + See drivers/scsi/mac_scsi.c. 58 56 59 57 max_luns= [SCSI] Maximum number of LUNs to probe. 60 58 Should be between 1 and 2^32-1. ··· 67 65 See header of drivers/scsi/NCR_D700.c. 68 66 69 67 ncr5380= [HW,SCSI] 68 + See Documentation/scsi/g_NCR5380.txt. 70 69 71 70 ncr53c400= [HW,SCSI] 71 + See Documentation/scsi/g_NCR5380.txt. 72 72 73 73 ncr53c400a= [HW,SCSI] 74 + See Documentation/scsi/g_NCR5380.txt. 74 75 75 76 ncr53c406a= [HW,SCSI] 76 77
+1 -1
MAINTAINERS
··· 7593 7593 L: linux-scsi@vger.kernel.org 7594 7594 S: Maintained 7595 7595 F: Documentation/scsi/g_NCR5380.txt 7596 + F: Documentation/scsi/dtc3x80.txt 7596 7597 F: drivers/scsi/NCR5380.* 7597 7598 F: drivers/scsi/arm/cumana_1.c 7598 7599 F: drivers/scsi/arm/oak.c 7599 - F: drivers/scsi/atari_NCR5380.c 7600 7600 F: drivers/scsi/atari_scsi.* 7601 7601 F: drivers/scsi/dmx3191d.c 7602 7602 F: drivers/scsi/dtc.*
+1 -1
drivers/ata/pata_icside.c
··· 294 294 295 295 static struct scsi_host_template pata_icside_sht = { 296 296 ATA_BASE_SHT(DRV_NAME), 297 - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, 297 + .sg_tablesize = SG_MAX_SEGMENTS, 298 298 .dma_boundary = IOMD_DMA_BOUNDARY, 299 299 }; 300 300
+3 -3
drivers/infiniband/ulp/srp/ib_srp.c
··· 81 81 82 82 module_param(indirect_sg_entries, uint, 0444); 83 83 MODULE_PARM_DESC(indirect_sg_entries, 84 - "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); 84 + "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")"); 85 85 86 86 module_param(allow_ext_sg, bool, 0444); 87 87 MODULE_PARM_DESC(allow_ext_sg, ··· 2819 2819 spin_unlock(&host->target_lock); 2820 2820 2821 2821 scsi_scan_target(&target->scsi_host->shost_gendev, 2822 - 0, target->scsi_id, SCAN_WILD_CARD, 0); 2822 + 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 2823 2823 2824 2824 if (srp_connected_ch(target) < target->ch_count || 2825 2825 target->qp_in_error) { ··· 3097 3097 3098 3098 case SRP_OPT_SG_TABLESIZE: 3099 3099 if (match_int(args, &token) || token < 1 || 3100 - token > SCSI_MAX_SG_CHAIN_SEGMENTS) { 3100 + token > SG_MAX_SEGMENTS) { 3101 3101 pr_warn("bad max sg_tablesize parameter '%s'\n", 3102 3102 p); 3103 3103 goto out;
+2 -2
drivers/message/fusion/mptsas.c
··· 2281 2281 2282 2282 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), 2283 2283 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 2284 - if (!dma_addr_out) 2284 + if (pci_dma_mapping_error(ioc->pcidev, dma_addr_out)) 2285 2285 goto put_mf; 2286 2286 ioc->add_sge(psge, flagsLength, dma_addr_out); 2287 2287 psge += ioc->SGE_size; ··· 2296 2296 flagsLength |= blk_rq_bytes(rsp) + 4; 2297 2297 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), 2298 2298 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 2299 - if (!dma_addr_in) 2299 + if (pci_dma_mapping_error(ioc->pcidev, dma_addr_in)) 2300 2300 goto unmap; 2301 2301 ioc->add_sge(psge, flagsLength, dma_addr_in); 2302 2302
+1 -1
drivers/message/fusion/mptspi.c
··· 1150 1150 } 1151 1151 shost_printk(KERN_INFO, shost, MYIOC_s_FMT 1152 1152 "Integrated RAID detects new device %d\n", ioc->name, disk); 1153 - scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1); 1153 + scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, SCSI_SCAN_RESCAN); 1154 1154 } 1155 1155 1156 1156
+2 -1
drivers/s390/scsi/zfcp_unit.c
··· 26 26 lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun); 27 27 28 28 if (rport && rport->port_state == FC_PORTSTATE_ONLINE) 29 - scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1); 29 + scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 30 + SCSI_SCAN_MANUAL); 30 31 } 31 32 32 33 static void zfcp_unit_scsi_scan_work(struct work_struct *work)
+3 -13
drivers/scsi/Kconfig
··· 17 17 tristate "SCSI device support" 18 18 depends on BLOCK 19 19 select SCSI_DMA if HAS_DMA 20 + select SG_POOL 20 21 ---help--- 21 22 If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or 22 23 any other SCSI device under Linux, say Y and make sure that you know ··· 203 202 certain enclosure conditions to be reported and is not required. 204 203 205 204 config SCSI_CONSTANTS 206 - bool "Verbose SCSI error reporting (kernel size +=75K)" 205 + bool "Verbose SCSI error reporting (kernel size += 36K)" 207 206 depends on SCSI 208 207 help 209 208 The error messages regarding your SCSI hardware will be easier to 210 209 understand if you say Y here; it will enlarge your kernel by about 211 - 75 KB. If in doubt, say Y. 210 + 36 KB. If in doubt, say Y. 212 211 213 212 config SCSI_LOGGING 214 213 bool "SCSI logging facility" ··· 813 812 814 813 To compile this driver as a module, choose M here: the 815 814 module will be called g_NCR5380_mmio. 816 - 817 - config SCSI_GENERIC_NCR53C400 818 - bool "Enable NCR53c400 extensions" 819 - depends on SCSI_GENERIC_NCR5380 820 - help 821 - This enables certain optimizations for the NCR53c400 SCSI cards. 822 - You might as well try it out. Note that this driver will only probe 823 - for the Trantor T130B in its default configuration; you might have 824 - to pass a command line option to the kernel at boot time if it does 825 - not detect your card. See the file 826 - <file:Documentation/scsi/g_NCR5380.txt> for details. 827 815 828 816 config SCSI_IPS 829 817 tristate "IBM ServeRAID support"
+326 -335
drivers/scsi/NCR5380.c
··· 29 29 * Ronald van Cuijlenborg, Alan Cox and others. 30 30 */ 31 31 32 - /* 33 - * Further development / testing that should be done : 34 - * 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete 35 - * code so that everything does the same thing that's done at the 36 - * end of a pseudo-DMA read operation. 37 - * 38 - * 2. Fix REAL_DMA (interrupt driven, polled works fine) - 39 - * basically, transfer size needs to be reduced by one 40 - * and the last byte read as is done with PSEUDO_DMA. 41 - * 42 - * 4. Test SCSI-II tagged queueing (I have no devices which support 43 - * tagged queueing) 44 - */ 32 + /* Ported to Atari by Roman Hodek and others. */ 45 33 46 - #ifndef notyet 47 - #undef REAL_DMA 48 - #endif 49 - 50 - #ifdef BOARD_REQUIRES_NO_DELAY 51 - #define io_recovery_delay(x) 52 - #else 53 - #define io_recovery_delay(x) udelay(x) 54 - #endif 34 + /* Adapted for the Sun 3 by Sam Creasey. */ 55 35 56 36 /* 57 37 * Design ··· 106 126 * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential 107 127 * transceivers. 108 128 * 109 - * DONT_USE_INTR - if defined, never use interrupts, even if we probe or 110 - * override-configure an IRQ. 111 - * 112 129 * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases. 113 130 * 114 131 * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. 115 - * 116 - * REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't 117 - * rely on phase mismatch and EOP interrupts to determine end 118 - * of phase. 119 132 * 120 133 * These macros MUST be defined : 121 134 * ··· 120 147 * specific implementation of the NCR5380 121 148 * 122 149 * Either real DMA *or* pseudo DMA may be implemented 123 - * REAL functions : 124 - * NCR5380_REAL_DMA should be defined if real DMA is to be used. 125 - * Note that the DMA setup functions should return the number of bytes 126 - * that they were able to program the controller for. 127 - * 128 - * Also note that generic i386/PC versions of these macros are 129 - * available as NCR5380_i386_dma_write_setup, 130 - * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. 131 150 * 132 151 * NCR5380_dma_write_setup(instance, src, count) - initialize 133 152 * NCR5380_dma_read_setup(instance, dst, count) - initialize 134 153 * NCR5380_dma_residual(instance); - residual count 135 - * 136 - * PSEUDO functions : 137 - * NCR5380_pwrite(instance, src, count) 138 - * NCR5380_pread(instance, dst, count); 139 154 * 140 155 * The generic driver is initialized by calling NCR5380_init(instance), 141 156 * after setting the appropriate host specific fields and ID. If the 142 157 * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, 143 158 * possible) function may be used. 144 159 */ 160 + 161 + #ifndef NCR5380_io_delay 162 + #define NCR5380_io_delay(x) 163 + #endif 164 + 165 + #ifndef NCR5380_acquire_dma_irq 166 + #define NCR5380_acquire_dma_irq(x) (1) 167 + #endif 168 + 169 + #ifndef NCR5380_release_dma_irq 170 + #define NCR5380_release_dma_irq(x) 171 + #endif 145 172 146 173 static int do_abort(struct Scsi_Host *); 147 174 static void do_reset(struct Scsi_Host *); ··· 253 280 {0, NULL} 254 281 }, 255 282 basrs[] = { 283 + {BASR_END_DMA_TRANSFER, "END OF DMA"}, 284 + {BASR_DRQ, "DRQ"}, 285 + {BASR_PARITY_ERROR, "PARITY ERROR"}, 286 + {BASR_IRQ, "IRQ"}, 287 + {BASR_PHASE_MATCH, "PHASE MATCH"}, 288 + {BASR_BUSY_ERROR, "BUSY ERROR"}, 256 289 {BASR_ATN, "ATN"}, 257 290 {BASR_ACK, "ACK"}, 258 291 {0, NULL} 259 292 }, 260 293 icrs[] = { 261 294 {ICR_ASSERT_RST, "ASSERT RST"}, 295 + {ICR_ARBITRATION_PROGRESS, "ARB. IN PROGRESS"}, 296 + {ICR_ARBITRATION_LOST, "LOST ARB."}, 262 297 {ICR_ASSERT_ACK, "ASSERT ACK"}, 263 298 {ICR_ASSERT_BSY, "ASSERT BSY"}, 264 299 {ICR_ASSERT_SEL, "ASSERT SEL"}, ··· 275 294 {0, NULL} 276 295 }, 277 296 mrs[] = { 278 - {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, 279 - {MR_TARGET, "MODE TARGET"}, 280 - {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, 281 - {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, 282 - {MR_ENABLE_EOP_INTR, "MODE EOP INTR"}, 283 - {MR_MONITOR_BSY, "MODE MONITOR BSY"}, 284 - {MR_DMA_MODE, "MODE DMA"}, 285 - {MR_ARBITRATE, "MODE ARBITRATION"}, 297 + {MR_BLOCK_DMA_MODE, "BLOCK DMA MODE"}, 298 + {MR_TARGET, "TARGET"}, 299 + {MR_ENABLE_PAR_CHECK, "PARITY CHECK"}, 300 + {MR_ENABLE_PAR_INTR, "PARITY INTR"}, 301 + {MR_ENABLE_EOP_INTR, "EOP INTR"}, 302 + {MR_MONITOR_BSY, "MONITOR BSY"}, 303 + {MR_DMA_MODE, "DMA MODE"}, 304 + {MR_ARBITRATE, "ARBITRATE"}, 286 305 {0, NULL} 287 306 }; 288 307 ··· 303 322 icr = NCR5380_read(INITIATOR_COMMAND_REG); 304 323 basr = NCR5380_read(BUS_AND_STATUS_REG); 305 324 306 - printk("STATUS_REG: %02x ", status); 325 + printk(KERN_DEBUG "SR = 0x%02x : ", status); 307 326 for (i = 0; signals[i].mask; ++i) 308 327 if (status & signals[i].mask) 309 - printk(",%s", signals[i].name); 310 - printk("\nBASR: %02x ", basr); 328 + printk(KERN_CONT "%s, ", signals[i].name); 329 + printk(KERN_CONT "\nBASR = 0x%02x : ", basr); 311 330 for (i = 0; basrs[i].mask; ++i) 312 331 if (basr & basrs[i].mask) 313 - printk(",%s", basrs[i].name); 314 - printk("\nICR: %02x ", icr); 332 + printk(KERN_CONT "%s, ", basrs[i].name); 333 + printk(KERN_CONT "\nICR = 0x%02x : ", icr); 315 334 for (i = 0; icrs[i].mask; ++i) 316 335 if (icr & icrs[i].mask) 317 - printk(",%s", icrs[i].name); 318 - printk("\nMODE: %02x ", mr); 336 + printk(KERN_CONT "%s, ", icrs[i].name); 337 + printk(KERN_CONT "\nMR = 0x%02x : ", mr); 319 338 for (i = 0; mrs[i].mask; ++i) 320 339 if (mr & mrs[i].mask) 321 - printk(",%s", mrs[i].name); 322 - printk("\n"); 340 + printk(KERN_CONT "%s, ", mrs[i].name); 341 + printk(KERN_CONT "\n"); 323 342 } 324 343 325 344 static struct { ··· 458 477 instance->base, instance->irq, 459 478 instance->can_queue, instance->cmd_per_lun, 460 479 instance->sg_tablesize, instance->this_id, 461 - hostdata->flags & FLAG_NO_DMA_FIXUP ? "NO_DMA_FIXUP " : "", 480 + hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "", 462 481 hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "", 463 482 hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "", 464 - #ifdef AUTOPROBE_IRQ 465 - "AUTOPROBE_IRQ " 466 - #endif 467 483 #ifdef DIFFERENTIAL 468 484 "DIFFERENTIAL " 469 - #endif 470 - #ifdef REAL_DMA 471 - "REAL_DMA " 472 - #endif 473 - #ifdef REAL_DMA_POLL 474 - "REAL_DMA_POLL " 475 485 #endif 476 486 #ifdef PARITY 477 487 "PARITY " 478 488 #endif 479 - #ifdef PSEUDO_DMA 480 - "PSEUDO_DMA " 481 - #endif 482 489 ""); 483 490 } 484 - 485 - #ifdef PSEUDO_DMA 486 - static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, 487 - char *buffer, int length) 488 - { 489 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 490 - 491 - hostdata->spin_max_r = 0; 492 - hostdata->spin_max_w = 0; 493 - return 0; 494 - } 495 - 496 - static int __maybe_unused NCR5380_show_info(struct seq_file *m, 497 - struct Scsi_Host *instance) 498 - { 499 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 500 - 501 - seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n", 502 - hostdata->spin_max_w, hostdata->spin_max_r); 503 - return 0; 504 - } 505 - #endif 506 491 507 492 /** 508 493 * NCR5380_init - initialise an NCR5380 ··· 490 543 int i; 491 544 unsigned long deadline; 492 545 546 + instance->max_lun = 7; 547 + 493 548 hostdata->host = instance; 494 549 hostdata->id_mask = 1 << instance->this_id; 495 550 hostdata->id_higher_mask = 0; ··· 500 551 hostdata->id_higher_mask |= i; 501 552 for (i = 0; i < 8; ++i) 502 553 hostdata->busy[i] = 0; 503 - #ifdef REAL_DMA 504 - hostdata->dmalen = 0; 505 - #endif 554 + hostdata->dma_len = 0; 555 + 506 556 spin_lock_init(&hostdata->lock); 507 557 hostdata->connected = NULL; 508 558 hostdata->sensing = NULL; ··· 667 719 668 720 cmd->result = 0; 669 721 722 + if (!NCR5380_acquire_dma_irq(instance)) 723 + return SCSI_MLQUEUE_HOST_BUSY; 724 + 670 725 spin_lock_irqsave(&hostdata->lock, flags); 671 726 672 727 /* ··· 692 741 /* Kick off command processing */ 693 742 queue_work(hostdata->work_q, &hostdata->main_task); 694 743 return 0; 744 + } 745 + 746 + static inline void maybe_release_dma_irq(struct Scsi_Host *instance) 747 + { 748 + struct NCR5380_hostdata *hostdata = shost_priv(instance); 749 + 750 + /* Caller does the locking needed to set & test these data atomically */ 751 + if (list_empty(&hostdata->disconnected) && 752 + list_empty(&hostdata->unissued) && 753 + list_empty(&hostdata->autosense) && 754 + !hostdata->connected && 755 + !hostdata->selecting) 756 + NCR5380_release_dma_irq(instance); 695 757 } 696 758 697 759 /** ··· 808 844 809 845 if (!NCR5380_select(instance, cmd)) { 810 846 dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); 847 + maybe_release_dma_irq(instance); 811 848 } else { 812 849 dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, 813 850 "main: select failed, returning %p to queue\n", cmd); 814 851 requeue_cmd(instance, cmd); 815 852 } 816 853 } 817 - if (hostdata->connected 818 - #ifdef REAL_DMA 819 - && !hostdata->dmalen 820 - #endif 821 - ) { 854 + if (hostdata->connected && !hostdata->dma_len) { 822 855 dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); 823 856 NCR5380_information_transfer(instance); 824 857 done = 0; ··· 826 865 } while (!done); 827 866 } 828 867 829 - #ifndef DONT_USE_INTR 868 + /* 869 + * NCR5380_dma_complete - finish DMA transfer 870 + * @instance: the scsi host instance 871 + * 872 + * Called by the interrupt handler when DMA finishes or a phase 873 + * mismatch occurs (which would end the DMA transfer). 874 + */ 875 + 876 + static void NCR5380_dma_complete(struct Scsi_Host *instance) 877 + { 878 + struct NCR5380_hostdata *hostdata = shost_priv(instance); 879 + int transferred; 880 + unsigned char **data; 881 + int *count; 882 + int saved_data = 0, overrun = 0; 883 + unsigned char p; 884 + 885 + if (hostdata->read_overruns) { 886 + p = hostdata->connected->SCp.phase; 887 + if (p & SR_IO) { 888 + udelay(10); 889 + if ((NCR5380_read(BUS_AND_STATUS_REG) & 890 + (BASR_PHASE_MATCH | BASR_ACK)) == 891 + (BASR_PHASE_MATCH | BASR_ACK)) { 892 + saved_data = NCR5380_read(INPUT_DATA_REG); 893 + overrun = 1; 894 + dsprintk(NDEBUG_DMA, instance, "read overrun handled\n"); 895 + } 896 + } 897 + } 898 + 899 + #ifdef CONFIG_SUN3 900 + if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { 901 + pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", 902 + instance->host_no); 903 + BUG(); 904 + } 905 + 906 + if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == 907 + (BASR_PHASE_MATCH | BASR_ACK)) { 908 + pr_err("scsi%d: BASR %02x\n", instance->host_no, 909 + NCR5380_read(BUS_AND_STATUS_REG)); 910 + pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", 911 + instance->host_no); 912 + BUG(); 913 + } 914 + #endif 915 + 916 + NCR5380_write(MODE_REG, MR_BASE); 917 + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 918 + NCR5380_read(RESET_PARITY_INTERRUPT_REG); 919 + 920 + transferred = hostdata->dma_len - NCR5380_dma_residual(instance); 921 + hostdata->dma_len = 0; 922 + 923 + data = (unsigned char **)&hostdata->connected->SCp.ptr; 924 + count = &hostdata->connected->SCp.this_residual; 925 + *data += transferred; 926 + *count -= transferred; 927 + 928 + if (hostdata->read_overruns) { 929 + int cnt, toPIO; 930 + 931 + if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { 932 + cnt = toPIO = hostdata->read_overruns; 933 + if (overrun) { 934 + dsprintk(NDEBUG_DMA, instance, 935 + "Got an input overrun, using saved byte\n"); 936 + *(*data)++ = saved_data; 937 + (*count)--; 938 + cnt--; 939 + toPIO--; 940 + } 941 + if (toPIO > 0) { 942 + dsprintk(NDEBUG_DMA, instance, 943 + "Doing %d byte PIO to 0x%p\n", cnt, *data); 944 + NCR5380_transfer_pio(instance, &p, &cnt, data); 945 + *count -= toPIO - cnt; 946 + } 947 + } 948 + } 949 + } 830 950 831 951 /** 832 952 * NCR5380_intr - generic NCR5380 irq handler ··· 943 901 * the Busy Monitor interrupt is enabled together with DMA Mode. 944 902 */ 945 903 946 - static irqreturn_t NCR5380_intr(int irq, void *dev_id) 904 + static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) 947 905 { 948 906 struct Scsi_Host *instance = dev_id; 949 907 struct NCR5380_hostdata *hostdata = shost_priv(instance); ··· 961 919 dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", 962 920 irq, basr, sr, mr); 963 921 964 - #if defined(REAL_DMA) 965 922 if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { 966 923 /* Probably End of DMA, Phase Mismatch or Loss of BSY. 967 924 * We ack IRQ after clearing Mode Register. Workarounds ··· 969 928 970 929 dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); 971 930 972 - int transferred; 973 - 974 - if (!hostdata->connected) 975 - panic("scsi%d : DMA interrupt with no connected cmd\n", 976 - instance->hostno); 977 - 978 - transferred = hostdata->dmalen - NCR5380_dma_residual(instance); 979 - hostdata->connected->SCp.this_residual -= transferred; 980 - hostdata->connected->SCp.ptr += transferred; 981 - hostdata->dmalen = 0; 982 - 983 - /* FIXME: we need to poll briefly then defer a workqueue task ! */ 984 - NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_ACK, 0, 2 * HZ); 985 - 986 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 987 - NCR5380_write(MODE_REG, MR_BASE); 988 - NCR5380_read(RESET_PARITY_INTERRUPT_REG); 989 - } else 990 - #endif /* REAL_DMA */ 991 - if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && 931 + if (hostdata->connected) { 932 + NCR5380_dma_complete(instance); 933 + queue_work(hostdata->work_q, &hostdata->main_task); 934 + } else { 935 + NCR5380_write(MODE_REG, MR_BASE); 936 + NCR5380_read(RESET_PARITY_INTERRUPT_REG); 937 + } 938 + } else if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && 992 939 (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { 993 940 /* Probably reselected */ 994 941 NCR5380_write(SELECT_ENABLE_REG, 0); ··· 995 966 NCR5380_read(RESET_PARITY_INTERRUPT_REG); 996 967 997 968 dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); 969 + #ifdef SUN3_SCSI_VME 970 + dregs->csr |= CSR_DMA_ENABLE; 971 + #endif 998 972 } 999 973 handled = 1; 1000 974 } else { 1001 975 shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); 976 + #ifdef SUN3_SCSI_VME 977 + dregs->csr |= CSR_DMA_ENABLE; 978 + #endif 1002 979 } 1003 980 1004 981 spin_unlock_irqrestore(&hostdata->lock, flags); 1005 982 1006 983 return IRQ_RETVAL(handled); 1007 984 } 1008 - 1009 - #endif 1010 985 1011 986 /* 1012 987 * Function : int NCR5380_select(struct Scsi_Host *instance, ··· 1250 1217 * was true but before BSY was false during selection, the information 1251 1218 * transfer phase should be a MESSAGE OUT phase so that we can send the 1252 1219 * IDENTIFY message. 1253 - * 1254 - * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG 1255 - * message (2 bytes) with a tag ID that we increment with every command 1256 - * until it wraps back to 0. 1257 - * 1258 - * XXX - it turns out that there are some broken SCSI-II devices, 1259 - * which claim to support tagged queuing but fail when more than 1260 - * some number of commands are issued at once. 1261 1220 */ 1262 1221 1263 1222 /* Wait for start of REQ/ACK handshake */ ··· 1272 1247 tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun); 1273 1248 1274 1249 len = 1; 1275 - cmd->tag = 0; 1276 - 1277 - /* Send message(s) */ 1278 1250 data = tmp; 1279 1251 phase = PHASE_MSGOUT; 1280 1252 NCR5380_transfer_pio(instance, &phase, &len, &data); ··· 1280 1258 1281 1259 hostdata->connected = cmd; 1282 1260 hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; 1261 + 1262 + #ifdef SUN3_SCSI_VME 1263 + dregs->csr |= CSR_INTR; 1264 + #endif 1283 1265 1284 1266 initialize_SCp(cmd); 1285 1267 ··· 1521 1495 return -1; 1522 1496 } 1523 1497 1524 - #if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL) 1525 1498 /* 1526 1499 * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, 1527 1500 * unsigned char *phase, int *count, unsigned char **data) ··· 1545 1520 unsigned char **data) 1546 1521 { 1547 1522 struct NCR5380_hostdata *hostdata = shost_priv(instance); 1548 - register int c = *count; 1549 - register unsigned char p = *phase; 1550 - register unsigned char *d = *data; 1523 + int c = *count; 1524 + unsigned char p = *phase; 1525 + unsigned char *d = *data; 1551 1526 unsigned char tmp; 1552 - int foo; 1553 - #if defined(REAL_DMA_POLL) 1554 - int cnt, toPIO; 1555 - unsigned char saved_data = 0, overrun = 0, residue; 1556 - #endif 1527 + int result = 0; 1557 1528 1558 1529 if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { 1559 1530 *phase = tmp; 1560 1531 return -1; 1561 1532 } 1562 - #if defined(REAL_DMA) || defined(REAL_DMA_POLL) 1533 + 1534 + hostdata->connected->SCp.phase = p; 1535 + 1563 1536 if (p & SR_IO) { 1564 - if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS)) 1565 - c -= 2; 1537 + if (hostdata->read_overruns) 1538 + c -= hostdata->read_overruns; 1539 + else if (hostdata->flags & FLAG_DMA_FIXUP) 1540 + --c; 1566 1541 } 1567 - hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); 1568 1542 1569 1543 dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", 1570 - (p & SR_IO) ? "receive" : "send", c, *data); 1544 + (p & SR_IO) ? "receive" : "send", c, d); 1545 + 1546 + #ifdef CONFIG_SUN3 1547 + /* send start chain */ 1548 + sun3scsi_dma_start(c, *data); 1571 1549 #endif 1572 1550 1573 1551 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); 1574 - 1575 - #ifdef REAL_DMA 1576 1552 NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | 1577 1553 MR_ENABLE_EOP_INTR); 1578 - #elif defined(REAL_DMA_POLL) 1579 - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY); 1580 - #else 1581 - /* 1582 - * Note : on my sample board, watch-dog timeouts occurred when interrupts 1583 - * were not disabled for the duration of a single DMA transfer, from 1584 - * before the setting of DMA mode to after transfer of the last byte. 1585 - */ 1586 1554 1587 - if (hostdata->flags & FLAG_NO_DMA_FIXUP) 1588 - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | 1589 - MR_ENABLE_EOP_INTR); 1590 - else 1591 - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY); 1592 - #endif /* def REAL_DMA */ 1593 - 1594 - dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)); 1555 + if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { 1556 + /* On the Medusa, it is a must to initialize the DMA before 1557 + * starting the NCR. This is also the cleaner way for the TT. 1558 + */ 1559 + if (p & SR_IO) 1560 + result = NCR5380_dma_recv_setup(instance, d, c); 1561 + else 1562 + result = NCR5380_dma_send_setup(instance, d, c); 1563 + } 1595 1564 1596 1565 /* 1597 1566 * On the PAS16 at least I/O recovery delays are not needed here. ··· 1593 1574 */ 1594 1575 1595 1576 if (p & SR_IO) { 1596 - io_recovery_delay(1); 1577 + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1578 + NCR5380_io_delay(1); 1597 1579 NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); 1598 1580 } else { 1599 - io_recovery_delay(1); 1581 + NCR5380_io_delay(1); 1600 1582 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); 1601 - io_recovery_delay(1); 1583 + NCR5380_io_delay(1); 1602 1584 NCR5380_write(START_DMA_SEND_REG, 0); 1603 - io_recovery_delay(1); 1585 + NCR5380_io_delay(1); 1604 1586 } 1605 1587 1606 - #if defined(REAL_DMA_POLL) 1607 - do { 1608 - tmp = NCR5380_read(BUS_AND_STATUS_REG); 1609 - } while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR | BASR_END_DMA_TRANSFER))); 1588 + #ifdef CONFIG_SUN3 1589 + #ifdef SUN3_SCSI_VME 1590 + dregs->csr |= CSR_DMA_ENABLE; 1591 + #endif 1592 + sun3_dma_active = 1; 1593 + #endif 1594 + 1595 + if (hostdata->flags & FLAG_LATE_DMA_SETUP) { 1596 + /* On the Falcon, the DMA setup must be done after the last 1597 + * NCR access, else the DMA setup gets trashed! 1598 + */ 1599 + if (p & SR_IO) 1600 + result = NCR5380_dma_recv_setup(instance, d, c); 1601 + else 1602 + result = NCR5380_dma_send_setup(instance, d, c); 1603 + } 1604 + 1605 + /* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */ 1606 + if (result < 0) 1607 + return result; 1608 + 1609 + /* For real DMA, result is the byte count. DMA interrupt is expected. */ 1610 + if (result > 0) { 1611 + hostdata->dma_len = result; 1612 + return 0; 1613 + } 1614 + 1615 + /* The result is zero iff pseudo DMA send/receive was completed. */ 1616 + hostdata->dma_len = c; 1610 1617 1611 1618 /* 1612 - * At this point, either we've completed DMA, or we have a phase mismatch, 1613 - * or we've unexpectedly lost BUSY (which is a real error). 1619 + * A note regarding the DMA errata workarounds for early NMOS silicon. 1614 1620 * 1615 1621 * For DMA sends, we want to wait until the last byte has been 1616 1622 * transferred out over the bus before we turn off DMA mode. Alas, there ··· 1662 1618 * properly, or the target switches to MESSAGE IN phase to signal a 1663 1619 * disconnection (either operation bringing the DMA to a clean halt). 1664 1620 * However, in order to handle scatter-receive, we must work around the 1665 - * problem. The chosen fix is to DMA N-2 bytes, then check for the 1621 + * problem. The chosen fix is to DMA fewer bytes, then check for the 1666 1622 * condition before taking the NCR5380 out of DMA mode. One or two extra 1667 1623 * bytes are transferred via PIO as necessary to fill out the original 1668 1624 * request. 1669 1625 */ 1670 1626 1671 - if (p & SR_IO) { 1672 - if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS)) { 1673 - udelay(10); 1674 - if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == 1675 - (BASR_PHASE_MATCH | BASR_ACK)) { 1676 - saved_data = NCR5380_read(INPUT_DATA_REGISTER); 1677 - overrun = 1; 1678 - } 1679 - } 1680 - } else { 1681 - int limit = 100; 1682 - while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) || (NCR5380_read(STATUS_REG) & SR_REQ)) { 1683 - if (!(tmp & BASR_PHASE_MATCH)) 1684 - break; 1685 - if (--limit < 0) 1686 - break; 1687 - } 1688 - } 1689 - 1690 - dsprintk(NDEBUG_DMA, "polled DMA transfer complete, basr 0x%02x, sr 0x%02x\n", 1691 - tmp, NCR5380_read(STATUS_REG)); 1692 - 1693 - NCR5380_write(MODE_REG, MR_BASE); 1694 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1695 - 1696 - residue = NCR5380_dma_residual(instance); 1697 - c -= residue; 1698 - *count -= c; 1699 - *data += c; 1700 - *phase = NCR5380_read(STATUS_REG) & PHASE_MASK; 1701 - 1702 - if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS) && 1703 - *phase == p && (p & SR_IO) && residue == 0) { 1704 - if (overrun) { 1705 - dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); 1706 - **data = saved_data; 1707 - *data += 1; 1708 - *count -= 1; 1709 - cnt = toPIO = 1; 1710 - } else { 1711 - printk("No overrun??\n"); 1712 - cnt = toPIO = 2; 1713 - } 1714 - dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data); 1715 - NCR5380_transfer_pio(instance, phase, &cnt, data); 1716 - *count -= toPIO - cnt; 1717 - } 1718 - 1719 - dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)); 1720 - return 0; 1721 - 1722 - #elif defined(REAL_DMA) 1723 - return 0; 1724 - #else /* defined(REAL_DMA_POLL) */ 1725 - if (p & SR_IO) { 1726 - foo = NCR5380_pread(instance, d, 1727 - hostdata->flags & FLAG_NO_DMA_FIXUP ? c : c - 1); 1728 - if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) { 1627 + if (hostdata->flags & FLAG_DMA_FIXUP) { 1628 + if (p & SR_IO) { 1729 1629 /* 1730 - * We can't disable DMA mode after successfully transferring 1731 - * what we plan to be the last byte, since that would open up 1732 - * a race condition where if the target asserted REQ before 1733 - * we got the DMA mode reset, the NCR5380 would have latched 1734 - * an additional byte into the INPUT DATA register and we'd 1735 - * have dropped it. 1736 - * 1737 - * The workaround was to transfer one fewer bytes than we 1630 + * The workaround was to transfer fewer bytes than we 1738 1631 * intended to with the pseudo-DMA read function, wait for 1739 1632 * the chip to latch the last byte, read it, and then disable 1740 1633 * pseudo-DMA mode. ··· 1687 1706 1688 1707 if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, 1689 1708 BASR_DRQ, BASR_DRQ, HZ) < 0) { 1690 - foo = -1; 1709 + result = -1; 1691 1710 shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); 1692 1711 } 1693 1712 if (NCR5380_poll_politely(instance, STATUS_REG, 1694 1713 SR_REQ, 0, HZ) < 0) { 1695 - foo = -1; 1714 + result = -1; 1696 1715 shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); 1697 1716 } 1698 - d[c - 1] = NCR5380_read(INPUT_DATA_REG); 1699 - } 1700 - } else { 1701 - foo = NCR5380_pwrite(instance, d, c); 1702 - if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) { 1717 + d[*count - 1] = NCR5380_read(INPUT_DATA_REG); 1718 + } else { 1703 1719 /* 1704 1720 * Wait for the last byte to be sent. If REQ is being asserted for 1705 1721 * the byte we're interested, we'll ACK it and it will go false. ··· 1704 1726 if (NCR5380_poll_politely2(instance, 1705 1727 BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, 1706 1728 BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) { 1707 - foo = -1; 1729 + result = -1; 1708 1730 shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); 1709 1731 } 1710 1732 } 1711 1733 } 1712 - NCR5380_write(MODE_REG, MR_BASE); 1713 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1714 - NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1715 - *data = d + c; 1716 - *count = 0; 1717 - *phase = NCR5380_read(STATUS_REG) & PHASE_MASK; 1718 - return foo; 1719 - #endif /* def REAL_DMA */ 1734 + 1735 + NCR5380_dma_complete(instance); 1736 + return result; 1720 1737 } 1721 - #endif /* defined(REAL_DMA) | defined(PSEUDO_DMA) */ 1722 1738 1723 1739 /* 1724 1740 * Function : NCR5380_information_transfer (struct Scsi_Host *instance) ··· 1742 1770 unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; 1743 1771 struct scsi_cmnd *cmd; 1744 1772 1773 + #ifdef SUN3_SCSI_VME 1774 + dregs->csr |= CSR_INTR; 1775 + #endif 1776 + 1745 1777 while ((cmd = hostdata->connected)) { 1746 1778 struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); 1747 1779 ··· 1757 1781 old_phase = phase; 1758 1782 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); 1759 1783 } 1784 + #ifdef CONFIG_SUN3 1785 + if (phase == PHASE_CMDOUT) { 1786 + void *d; 1787 + unsigned long count; 1788 + 1789 + if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { 1790 + count = cmd->SCp.buffer->length; 1791 + d = sg_virt(cmd->SCp.buffer); 1792 + } else { 1793 + count = cmd->SCp.this_residual; 1794 + d = cmd->SCp.ptr; 1795 + } 1796 + 1797 + if (sun3_dma_setup_done != cmd && 1798 + sun3scsi_dma_xfer_len(count, cmd) > 0) { 1799 + sun3scsi_dma_setup(instance, d, count, 1800 + rq_data_dir(cmd->request)); 1801 + sun3_dma_setup_done = cmd; 1802 + } 1803 + #ifdef SUN3_SCSI_VME 1804 + dregs->csr |= CSR_INTR; 1805 + #endif 1806 + } 1807 + #endif /* CONFIG_SUN3 */ 1808 + 1760 1809 if (sink && (phase != PHASE_MSGOUT)) { 1761 1810 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); 1762 1811 ··· 1832 1831 * in an unconditional loop. 1833 1832 */ 1834 1833 1835 - #if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) 1836 1834 transfersize = 0; 1837 - if (!cmd->device->borken && 1838 - !(hostdata->flags & FLAG_NO_PSEUDO_DMA)) 1835 + if (!cmd->device->borken) 1839 1836 transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); 1840 1837 1841 - if (transfersize) { 1838 + if (transfersize > 0) { 1842 1839 len = transfersize; 1843 1840 if (NCR5380_transfer_dma(instance, &phase, 1844 1841 &len, (unsigned char **)&cmd->SCp.ptr)) { ··· 1852 1853 do_abort(instance); 1853 1854 cmd->result = DID_ERROR << 16; 1854 1855 /* XXX - need to source or sink data here, as appropriate */ 1855 - } else 1856 - cmd->SCp.this_residual -= transfersize - len; 1857 - } else 1858 - #endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */ 1859 - { 1856 + } 1857 + } else { 1860 1858 /* Break up transfer into 3 ms chunks, 1861 1859 * presuming 6 accesses per handshake. 1862 1860 */ ··· 1864 1868 (unsigned char **)&cmd->SCp.ptr); 1865 1869 cmd->SCp.this_residual -= transfersize - len; 1866 1870 } 1871 + #ifdef CONFIG_SUN3 1872 + if (sun3_dma_setup_done == cmd) 1873 + sun3_dma_setup_done = NULL; 1874 + #endif 1867 1875 return; 1868 1876 case PHASE_MSGIN: 1869 1877 len = 1; ··· 1912 1912 1913 1913 /* Enable reselect interrupts */ 1914 1914 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1915 + 1916 + maybe_release_dma_irq(instance); 1915 1917 return; 1916 1918 case MESSAGE_REJECT: 1917 1919 /* Accept message by clearing ACK */ ··· 1946 1944 1947 1945 /* Enable reselect interrupts */ 1948 1946 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1947 + #ifdef SUN3_SCSI_VME 1948 + dregs->csr |= CSR_DMA_ENABLE; 1949 + #endif 1949 1950 return; 1950 1951 /* 1951 1952 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect ··· 2052 2047 hostdata->connected = NULL; 2053 2048 cmd->result = DID_ERROR << 16; 2054 2049 complete_cmd(instance, cmd); 2050 + maybe_release_dma_irq(instance); 2055 2051 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2056 2052 return; 2057 2053 } ··· 2100 2094 { 2101 2095 struct NCR5380_hostdata *hostdata = shost_priv(instance); 2102 2096 unsigned char target_mask; 2103 - unsigned char lun, phase; 2104 - int len; 2097 + unsigned char lun; 2105 2098 unsigned char msg[3]; 2106 - unsigned char *data; 2107 2099 struct NCR5380_cmd *ncmd; 2108 2100 struct scsi_cmnd *tmp; 2109 2101 ··· 2143 2139 return; 2144 2140 } 2145 2141 2146 - len = 1; 2147 - data = msg; 2148 - phase = PHASE_MSGIN; 2149 - NCR5380_transfer_pio(instance, &phase, &len, &data); 2142 + #ifdef CONFIG_SUN3 2143 + /* acknowledge toggle to MSGIN */ 2144 + NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); 2150 2145 2151 - if (len) { 2152 - do_abort(instance); 2153 - return; 2146 + /* peek at the byte without really hitting the bus */ 2147 + msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); 2148 + #else 2149 + { 2150 + int len = 1; 2151 + unsigned char *data = msg; 2152 + unsigned char phase = PHASE_MSGIN; 2153 + 2154 + NCR5380_transfer_pio(instance, &phase, &len, &data); 2155 + 2156 + if (len) { 2157 + do_abort(instance); 2158 + return; 2159 + } 2154 2160 } 2161 + #endif /* CONFIG_SUN3 */ 2155 2162 2156 2163 if (!(msg[0] & 0x80)) { 2157 2164 shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); ··· 2210 2195 return; 2211 2196 } 2212 2197 2198 + #ifdef CONFIG_SUN3 2199 + { 2200 + void *d; 2201 + unsigned long count; 2202 + 2203 + if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { 2204 + count = tmp->SCp.buffer->length; 2205 + d = sg_virt(tmp->SCp.buffer); 2206 + } else { 2207 + count = tmp->SCp.this_residual; 2208 + d = tmp->SCp.ptr; 2209 + } 2210 + 2211 + if (sun3_dma_setup_done != tmp && 2212 + sun3scsi_dma_xfer_len(count, tmp) > 0) { 2213 + sun3scsi_dma_setup(instance, d, count, 2214 + rq_data_dir(tmp->request)); 2215 + sun3_dma_setup_done = tmp; 2216 + } 2217 + } 2218 + 2219 + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); 2220 + #endif /* CONFIG_SUN3 */ 2221 + 2213 2222 /* Accept message by clearing ACK */ 2214 2223 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2215 2224 2216 2225 hostdata->connected = tmp; 2217 - dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n", 2218 - scmd_id(tmp), tmp->device->lun, tmp->tag); 2226 + dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu\n", 2227 + scmd_id(tmp), tmp->device->lun); 2219 2228 } 2220 - 2221 - /* 2222 - * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) 2223 - * 2224 - * Purpose : called by interrupt handler when DMA finishes or a phase 2225 - * mismatch occurs (which would finish the DMA transfer). 2226 - * 2227 - * Inputs : instance - this instance of the NCR5380. 2228 - * 2229 - * Returns : pointer to the scsi_cmnd structure for which the I_T_L 2230 - * nexus has been reestablished, on failure NULL is returned. 2231 - */ 2232 - 2233 - #ifdef REAL_DMA 2234 - static void NCR5380_dma_complete(NCR5380_instance * instance) { 2235 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 2236 - int transferred; 2237 - 2238 - /* 2239 - * XXX this might not be right. 2240 - * 2241 - * Wait for final byte to transfer, ie wait for ACK to go false. 2242 - * 2243 - * We should use the Last Byte Sent bit, unfortunately this is 2244 - * not available on the 5380/5381 (only the various CMOS chips) 2245 - * 2246 - * FIXME: timeout, and need to handle long timeout/irq case 2247 - */ 2248 - 2249 - NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, BASR_ACK, 0, 5*HZ); 2250 - 2251 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2252 - 2253 - /* 2254 - * The only places we should see a phase mismatch and have to send 2255 - * data from the same set of pointers will be the data transfer 2256 - * phases. So, residual, requested length are only important here. 2257 - */ 2258 - 2259 - if (!(hostdata->connected->SCp.phase & SR_CD)) { 2260 - transferred = instance->dmalen - NCR5380_dma_residual(); 2261 - hostdata->connected->SCp.this_residual -= transferred; 2262 - hostdata->connected->SCp.ptr += transferred; 2263 - } 2264 - } 2265 - #endif /* def REAL_DMA */ 2266 2229 2267 2230 /** 2268 2231 * list_find_cmd - test for presence of a command in a linked list ··· 2353 2360 if (hostdata->connected == cmd) { 2354 2361 dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); 2355 2362 hostdata->connected = NULL; 2356 - #ifdef REAL_DMA 2357 2363 hostdata->dma_len = 0; 2358 - #endif 2359 2364 if (do_abort(instance)) { 2360 2365 set_host_byte(cmd, DID_ERROR); 2361 2366 complete_cmd(instance, cmd); ··· 2379 2388 dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); 2380 2389 2381 2390 queue_work(hostdata->work_q, &hostdata->main_task); 2391 + maybe_release_dma_irq(instance); 2382 2392 spin_unlock_irqrestore(&hostdata->lock, flags); 2383 2393 2384 2394 return result; ··· 2437 2445 struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); 2438 2446 2439 2447 set_host_byte(cmd, DID_RESET); 2440 - cmd->scsi_done(cmd); 2448 + complete_cmd(instance, cmd); 2441 2449 } 2442 2450 INIT_LIST_HEAD(&hostdata->disconnected); 2443 2451 ··· 2457 2465 2458 2466 for (i = 0; i < 8; ++i) 2459 2467 hostdata->busy[i] = 0; 2460 - #ifdef REAL_DMA 2461 2468 hostdata->dma_len = 0; 2462 - #endif 2463 2469 2464 2470 queue_work(hostdata->work_q, &hostdata->main_task); 2471 + maybe_release_dma_irq(instance); 2465 2472 spin_unlock_irqrestore(&hostdata->lock, flags); 2466 2473 2467 2474 return SUCCESS;
+3 -140
drivers/scsi/NCR5380.h
··· 199 199 200 200 #define PHASE_SR_TO_TCR(phase) ((phase) >> 2) 201 201 202 - /* 203 - * "Special" value for the (unsigned char) command tag, to indicate 204 - * I_T_L nexus instead of I_T_L_Q. 205 - */ 206 - 207 - #define TAG_NONE 0xff 208 - 209 202 /* 210 203 * These are "special" values for the irq and dma_channel fields of the 211 204 * Scsi_Host structure ··· 213 220 #define NO_IRQ 0 214 221 #endif 215 222 216 - #define FLAG_NO_DMA_FIXUP 1 /* No DMA errata workarounds */ 223 + #define FLAG_DMA_FIXUP 1 /* Use DMA errata workarounds */ 217 224 #define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */ 218 225 #define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */ 219 - #define FLAG_TAGGED_QUEUING 64 /* as X3T9.2 spelled it */ 220 226 #define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */ 221 - 222 - #ifdef SUPPORT_TAGS 223 - struct tag_alloc { 224 - DECLARE_BITMAP(allocated, MAX_TAGS); 225 - int nr_allocated; 226 - int queue_size; 227 - }; 228 - #endif 229 227 230 228 struct NCR5380_hostdata { 231 229 NCR5380_implementation_fields; /* implementation specific */ 232 230 struct Scsi_Host *host; /* Host backpointer */ 233 231 unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */ 234 232 unsigned char busy[8]; /* index = target, bit = lun */ 235 - #if defined(REAL_DMA) || defined(REAL_DMA_POLL) 236 233 int dma_len; /* requested length of DMA */ 237 - #endif 238 234 unsigned char last_message; /* last message OUT */ 239 235 struct scsi_cmnd *connected; /* currently connected cmnd */ 240 236 struct scsi_cmnd *selecting; /* cmnd to be connected */ ··· 238 256 int read_overruns; /* number of bytes to cut from a 239 257 * transfer to handle chip overruns */ 240 258 struct work_struct main_task; 241 - #ifdef SUPPORT_TAGS 242 - struct tag_alloc TagAlloc[8][8]; /* 8 targets and 8 LUNs */ 243 - #endif 244 - #ifdef PSEUDO_DMA 245 - unsigned spin_max_r; 246 - unsigned spin_max_w; 247 - #endif 248 259 struct workqueue_struct *work_q; 249 260 unsigned long accesses_per_ms; /* chip register accesses per ms */ 250 261 }; ··· 280 305 #define NCR5380_dprint_phase(flg, arg) do {} while (0) 281 306 #endif 282 307 283 - #if defined(AUTOPROBE_IRQ) 284 308 static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); 285 - #endif 286 309 static int NCR5380_init(struct Scsi_Host *instance, int flags); 287 310 static int NCR5380_maybe_reset_bus(struct Scsi_Host *); 288 311 static void NCR5380_exit(struct Scsi_Host *instance); 289 312 static void NCR5380_information_transfer(struct Scsi_Host *instance); 290 - #ifndef DONT_USE_INTR 291 313 static irqreturn_t NCR5380_intr(int irq, void *dev_id); 292 - #endif 293 314 static void NCR5380_main(struct work_struct *work); 294 315 static const char *NCR5380_info(struct Scsi_Host *instance); 295 316 static void NCR5380_reselect(struct Scsi_Host *instance); 296 317 static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); 297 - #if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL) 298 318 static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); 299 - #endif 300 319 static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); 320 + static int NCR5380_poll_politely(struct Scsi_Host *, int, int, int, int); 321 + static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int); 301 322 302 - #if (defined(REAL_DMA) || defined(REAL_DMA_POLL)) 303 - 304 - #if defined(i386) || defined(__alpha__) 305 - 306 - /** 307 - * NCR5380_pc_dma_setup - setup ISA DMA 308 - * @instance: adapter to set up 309 - * @ptr: block to transfer (virtual address) 310 - * @count: number of bytes to transfer 311 - * @mode: DMA controller mode to use 312 - * 313 - * Program the DMA controller ready to perform an ISA DMA transfer 314 - * on this chip. 315 - * 316 - * Locks: takes and releases the ISA DMA lock. 317 - */ 318 - 319 - static __inline__ int NCR5380_pc_dma_setup(struct Scsi_Host *instance, unsigned char *ptr, unsigned int count, unsigned char mode) 320 - { 321 - unsigned limit; 322 - unsigned long bus_addr = virt_to_bus(ptr); 323 - unsigned long flags; 324 - 325 - if (instance->dma_channel <= 3) { 326 - if (count > 65536) 327 - count = 65536; 328 - limit = 65536 - (bus_addr & 0xFFFF); 329 - } else { 330 - if (count > 65536 * 2) 331 - count = 65536 * 2; 332 - limit = 65536 * 2 - (bus_addr & 0x1FFFF); 333 - } 334 - 335 - if (count > limit) 336 - count = limit; 337 - 338 - if ((count & 1) || (bus_addr & 1)) 339 - panic("scsi%d : attempted unaligned DMA transfer\n", instance->host_no); 340 - 341 - flags=claim_dma_lock(); 342 - disable_dma(instance->dma_channel); 343 - clear_dma_ff(instance->dma_channel); 344 - set_dma_addr(instance->dma_channel, bus_addr); 345 - set_dma_count(instance->dma_channel, count); 346 - set_dma_mode(instance->dma_channel, mode); 347 - enable_dma(instance->dma_channel); 348 - release_dma_lock(flags); 349 - 350 - return count; 351 - } 352 - 353 - /** 354 - * NCR5380_pc_dma_write_setup - setup ISA DMA write 355 - * @instance: adapter to set up 356 - * @ptr: block to transfer (virtual address) 357 - * @count: number of bytes to transfer 358 - * 359 - * Program the DMA controller ready to perform an ISA DMA write to the 360 - * SCSI controller. 361 - * 362 - * Locks: called routines take and release the ISA DMA lock. 363 - */ 364 - 365 - static __inline__ int NCR5380_pc_dma_write_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count) 366 - { 367 - return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_WRITE); 368 - } 369 - 370 - /** 371 - * NCR5380_pc_dma_read_setup - setup ISA DMA read 372 - * @instance: adapter to set up 373 - * @ptr: block to transfer (virtual address) 374 - * @count: number of bytes to transfer 375 - * 376 - * Program the DMA controller ready to perform an ISA DMA read from the 377 - * SCSI controller. 378 - * 379 - * Locks: called routines take and release the ISA DMA lock. 380 - */ 381 - 382 - static __inline__ int NCR5380_pc_dma_read_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count) 383 - { 384 - return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_READ); 385 - } 386 - 387 - /** 388 - * NCR5380_pc_dma_residual - return bytes left 389 - * @instance: adapter 390 - * 391 - * Reports the number of bytes left over after the DMA was terminated. 392 - * 393 - * Locks: takes and releases the ISA DMA lock. 394 - */ 395 - 396 - static __inline__ int NCR5380_pc_dma_residual(struct Scsi_Host *instance) 397 - { 398 - unsigned long flags; 399 - int tmp; 400 - 401 - flags = claim_dma_lock(); 402 - clear_dma_ff(instance->dma_channel); 403 - tmp = get_dma_residue(instance->dma_channel); 404 - release_dma_lock(flags); 405 - 406 - return tmp; 407 - } 408 - #endif /* defined(i386) || defined(__alpha__) */ 409 - #endif /* defined(REAL_DMA) */ 410 323 #endif /* __KERNEL__ */ 411 324 #endif /* NCR5380_H */
-22
drivers/scsi/aacraid/aachba.c
··· 555 555 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 556 556 557 557 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 558 - if (!cmd_fibcontext) 559 - return -ENOMEM; 560 558 561 559 aac_fib_init(cmd_fibcontext); 562 560 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); ··· 1035 1037 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1036 1038 1037 1039 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 1038 - if (!cmd_fibcontext) 1039 - return -ENOMEM; 1040 1040 1041 1041 aac_fib_init(cmd_fibcontext); 1042 1042 dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext); ··· 1946 1950 * Alocate and initialize a Fib 1947 1951 */ 1948 1952 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 1949 - if (!cmd_fibcontext) { 1950 - printk(KERN_WARNING "aac_read: fib allocation failed\n"); 1951 - return -1; 1952 - } 1953 1953 1954 1954 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); 1955 1955 ··· 2040 2048 * Allocate and initialize a Fib then setup a BlockWrite command 2041 2049 */ 2042 2050 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 2043 - if (!cmd_fibcontext) { 2044 - /* FIB temporarily unavailable,not catastrophic failure */ 2045 - 2046 - /* scsicmd->result = DID_ERROR << 16; 2047 - * scsicmd->scsi_done(scsicmd); 2048 - * return 0; 2049 - */ 2050 - printk(KERN_WARNING "aac_write: fib allocation failed\n"); 2051 - return -1; 2052 - } 2053 2051 2054 2052 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); 2055 2053 ··· 2265 2283 * Allocate and initialize a Fib 2266 2284 */ 2267 2285 cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd); 2268 - if (!cmd_fibcontext) 2269 - return SCSI_MLQUEUE_HOST_BUSY; 2270 2286 2271 2287 aac_fib_init(cmd_fibcontext); 2272 2288 ··· 3164 3184 * Allocate and initialize a Fib then setup a BlockWrite command 3165 3185 */ 3166 3186 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 3167 - if (!cmd_fibcontext) 3168 - return -1; 3169 3187 3170 3188 status = aac_adapter_scsi(cmd_fibcontext, scsicmd); 3171 3189
+7 -2
drivers/scsi/aacraid/aacraid.h
··· 29 29 #define AAC_INT_MODE_MSI (1<<1) 30 30 #define AAC_INT_MODE_AIF (1<<2) 31 31 #define AAC_INT_MODE_SYNC (1<<3) 32 + #define AAC_INT_MODE_MSIX (1<<16) 32 33 33 34 #define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb 34 35 #define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa ··· 63 62 #define PMC_GLOBAL_INT_BIT0 0x00000001 64 63 65 64 #ifndef AAC_DRIVER_BUILD 66 - # define AAC_DRIVER_BUILD 41052 65 + # define AAC_DRIVER_BUILD 41066 67 66 # define AAC_DRIVER_BRANCH "-ms" 68 67 #endif 69 68 #define MAXIMUM_NUM_CONTAINERS 32 ··· 721 720 }; 722 721 723 722 724 - #define Sa_MINIPORT_REVISION 1 723 + #define SA_INIT_NUM_MSIXVECTORS 1 725 724 726 725 #define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) 727 726 #define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) ··· 2065 2064 #define AifHighPriority 3 /* Highest Priority Event */ 2066 2065 #define AifEnAddJBOD 30 /* JBOD created */ 2067 2066 #define AifEnDeleteJBOD 31 /* JBOD deleted */ 2067 + 2068 + #define AifBuManagerEvent 42 /* Bu management*/ 2069 + #define AifBuCacheDataLoss 10 2070 + #define AifBuCacheDataRecover 11 2068 2071 2069 2072 #define AifCmdJobProgress 2 /* Progress report */ 2070 2073 #define AifJobCtrZero 101 /* Array Zero progress */
+27 -16
drivers/scsi/aacraid/comminit.c
··· 37 37 #include <linux/spinlock.h> 38 38 #include <linux/slab.h> 39 39 #include <linux/blkdev.h> 40 + #include <linux/delay.h> 40 41 #include <linux/completion.h> 41 42 #include <linux/mm.h> 42 43 #include <scsi/scsi_host.h> ··· 47 46 struct aac_common aac_config = { 48 47 .irq_mod = 1 49 48 }; 49 + 50 + static inline int aac_is_msix_mode(struct aac_dev *dev) 51 + { 52 + u32 status; 53 + 54 + status = src_readl(dev, MUnit.OMR); 55 + return (status & AAC_INT_MODE_MSIX); 56 + } 57 + 58 + static inline void aac_change_to_intx(struct aac_dev *dev) 59 + { 60 + aac_src_access_devreg(dev, AAC_DISABLE_MSIX); 61 + aac_src_access_devreg(dev, AAC_ENABLE_INTX); 62 + } 50 63 51 64 static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) 52 65 { ··· 106 91 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); 107 92 if (dev->max_fib_size != sizeof(struct hw_fib)) 108 93 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); 109 - init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION); 94 + init->Sa_MSIXVectors = cpu_to_le32(SA_INIT_NUM_MSIXVECTORS); 110 95 init->fsrev = cpu_to_le32(dev->fsrev); 111 96 112 97 /* ··· 393 378 msi_count = i; 394 379 } else { 395 380 dev->msi_enabled = 0; 396 - printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n", 397 - dev->name, dev->id, i); 398 - } 399 - } 400 - 401 - if (!dev->msi_enabled) { 402 - msi_count = 1; 403 - i = pci_enable_msi(dev->pdev); 404 - 405 - if (!i) { 406 - dev->msi_enabled = 1; 407 - dev->msi = 1; 408 - } else { 409 - printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n", 410 - dev->name, dev->id, i); 381 + dev_err(&dev->pdev->dev, 382 + "MSIX not supported!! Will try INTX 0x%x.\n", i); 411 383 } 412 384 } 413 385 ··· 428 426 / sizeof(struct sgentry); 429 427 dev->comm_interface = AAC_COMM_PRODUCER; 430 428 dev->raw_io_interface = dev->raw_io_64 = 0; 429 + 430 + 431 + /* 432 + * Enable INTX mode, if not done already Enabled 433 + */ 434 + if (aac_is_msix_mode(dev)) { 435 + aac_change_to_intx(dev); 436 + dev_info(&dev->pdev->dev, "Changed firmware to INTX mode"); 437 + } 431 438 432 439 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 433 440 0, 0, 0, 0, 0, 0,
+35 -4
drivers/scsi/aacraid/commsup.c
··· 637 637 } 638 638 return -EFAULT; 639 639 } 640 - /* We used to udelay() here but that absorbed 641 - * a CPU when a timeout occured. Not very 642 - * useful. */ 643 - cpu_relax(); 640 + /* 641 + * Allow other processes / CPUS to use core 642 + */ 643 + schedule(); 644 644 } 645 645 } else if (down_interruptible(&fibptr->event_wait)) { 646 646 /* Do nothing ... satisfy ··· 901 901 memset(cp, 0, 256); 902 902 } 903 903 904 + static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index) 905 + { 906 + return le32_to_cpu(((__le32 *)aifcmd->data)[index]); 907 + } 908 + 909 + 910 + static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd) 911 + { 912 + switch (aac_aif_data(aifcmd, 1)) { 913 + case AifBuCacheDataLoss: 914 + if (aac_aif_data(aifcmd, 2)) 915 + dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n", 916 + aac_aif_data(aifcmd, 2)); 917 + else 918 + dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n"); 919 + break; 920 + case AifBuCacheDataRecover: 921 + if (aac_aif_data(aifcmd, 2)) 922 + dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n", 923 + aac_aif_data(aifcmd, 2)); 924 + else 925 + dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n"); 926 + break; 927 + } 928 + } 904 929 905 930 /** 906 931 * aac_handle_aif - Handle a message from the firmware ··· 1179 1154 ADD : DELETE; 1180 1155 break; 1181 1156 } 1157 + case AifBuManagerEvent: 1158 + aac_handle_aif_bu(dev, aifcmd); 1182 1159 break; 1183 1160 } 1184 1161 ··· 2023 1996 if (difference <= 0) 2024 1997 difference = 1; 2025 1998 set_current_state(TASK_INTERRUPTIBLE); 1999 + 2000 + if (kthread_should_stop()) 2001 + break; 2002 + 2026 2003 schedule_timeout(difference); 2027 2004 2028 2005 if (kthread_should_stop())
+4 -3
drivers/scsi/aacraid/dpcsup.c
··· 392 392 if (likely(fib->callback && fib->callback_data)) { 393 393 fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; 394 394 fib->callback(fib->callback_data, fib); 395 - } else { 396 - aac_fib_complete(fib); 397 - } 395 + } else 396 + dev_info(&dev->pdev->dev, 397 + "Invalid callback_fib[%d] (*%p)(%p)\n", 398 + index, fib->callback, fib->callback_data); 398 399 } else { 399 400 unsigned long flagv; 400 401 dprintk((KERN_INFO "event_wait up\n"));
+3 -1
drivers/scsi/aacraid/linit.c
··· 1299 1299 else 1300 1300 shost->this_id = shost->max_id; 1301 1301 1302 + aac_intr_normal(aac, 0, 2, 0, NULL); 1303 + 1302 1304 /* 1303 1305 * dmb - we may need to move the setting of these parms somewhere else once 1304 1306 * we get a fib that can report the actual numbers ··· 1433 1431 /* After EEH recovery or suspend resume, max_msix count 1434 1432 * may change, therfore updating in init as well. 1435 1433 */ 1436 - aac_adapter_start(dev); 1437 1434 dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); 1435 + aac_adapter_start(dev); 1438 1436 } 1439 1437 return 0; 1440 1438
+2 -1
drivers/scsi/aacraid/src.c
··· 135 135 136 136 if (mode & AAC_INT_MODE_AIF) { 137 137 /* handle AIF */ 138 - aac_intr_normal(dev, 0, 2, 0, NULL); 138 + if (dev->aif_thread && dev->fsa_dev) 139 + aac_intr_normal(dev, 0, 2, 0, NULL); 139 140 if (dev->msi_enabled) 140 141 aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); 141 142 mode = 0;
+16 -9
drivers/scsi/arm/cumana_1.c
··· 13 13 14 14 #include <scsi/scsi_host.h> 15 15 16 - #define PSEUDO_DMA 17 - 18 16 #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 19 17 #define NCR5380_read(reg) cumanascsi_read(instance, reg) 20 18 #define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value) 21 19 22 20 #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) 21 + #define NCR5380_dma_recv_setup cumanascsi_pread 22 + #define NCR5380_dma_send_setup cumanascsi_pwrite 23 + #define NCR5380_dma_residual(instance) (0) 23 24 24 25 #define NCR5380_intr cumanascsi_intr 25 26 #define NCR5380_queue_command cumanascsi_queue_command ··· 42 41 #define L(v) (((v)<<16)|((v) & 0x0000ffff)) 43 42 #define H(v) (((v)>>16)|((v) & 0xffff0000)) 44 43 45 - static inline int 46 - NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len) 44 + static inline int cumanascsi_pwrite(struct Scsi_Host *host, 45 + unsigned char *addr, int len) 47 46 { 48 47 unsigned long *laddr; 49 48 void __iomem *dma = priv(host)->dma + 0x2000; ··· 102 101 } 103 102 end: 104 103 writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); 105 - return len; 104 + 105 + if (len) 106 + return -1; 107 + return 0; 106 108 } 107 109 108 - static inline int 109 - NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len) 110 + static inline int cumanascsi_pread(struct Scsi_Host *host, 111 + unsigned char *addr, int len) 110 112 { 111 113 unsigned long *laddr; 112 114 void __iomem *dma = priv(host)->dma + 0x2000; ··· 167 163 } 168 164 end: 169 165 writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); 170 - return len; 166 + 167 + if (len) 168 + return -1; 169 + return 0; 171 170 } 172 171 173 172 static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg) ··· 246 239 247 240 host->irq = ec->irq; 248 241 249 - ret = NCR5380_init(host, 0); 242 + ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); 250 243 if (ret) 251 244 goto out_unmap; 252 245
+1 -1
drivers/scsi/arm/cumana_2.c
··· 365 365 .eh_abort_handler = fas216_eh_abort, 366 366 .can_queue = 1, 367 367 .this_id = 7, 368 - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, 368 + .sg_tablesize = SG_MAX_SEGMENTS, 369 369 .dma_boundary = IOMD_DMA_BOUNDARY, 370 370 .use_clustering = DISABLE_CLUSTERING, 371 371 .proc_name = "cumanascsi2",
+1 -1
drivers/scsi/arm/eesox.c
··· 484 484 .eh_abort_handler = fas216_eh_abort, 485 485 .can_queue = 1, 486 486 .this_id = 7, 487 - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, 487 + .sg_tablesize = SG_MAX_SEGMENTS, 488 488 .dma_boundary = IOMD_DMA_BOUNDARY, 489 489 .use_clustering = DISABLE_CLUSTERING, 490 490 .proc_name = "eesox",
+11 -11
drivers/scsi/arm/oak.c
··· 14 14 15 15 #include <scsi/scsi_host.h> 16 16 17 - /*#define PSEUDO_DMA*/ 18 - #define DONT_USE_INTR 19 - 20 17 #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) 21 18 22 19 #define NCR5380_read(reg) \ ··· 21 24 #define NCR5380_write(reg, value) \ 22 25 writeb(value, priv(instance)->base + ((reg) << 2)) 23 26 24 - #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) 27 + #define NCR5380_dma_xfer_len(instance, cmd, phase) (0) 28 + #define NCR5380_dma_recv_setup oakscsi_pread 29 + #define NCR5380_dma_send_setup oakscsi_pwrite 30 + #define NCR5380_dma_residual(instance) (0) 25 31 26 32 #define NCR5380_queue_command oakscsi_queue_command 27 33 #define NCR5380_info oakscsi_info ··· 40 40 #define STAT ((128 + 16) << 2) 41 41 #define DATA ((128 + 8) << 2) 42 42 43 - static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *addr, 44 - int len) 43 + static inline int oakscsi_pwrite(struct Scsi_Host *instance, 44 + unsigned char *addr, int len) 45 45 { 46 46 void __iomem *base = priv(instance)->base; 47 47 48 48 printk("writing %p len %d\n",addr, len); 49 - if(!len) return -1; 50 49 51 50 while(1) 52 51 { 53 52 int status; 54 53 while (((status = readw(base + STAT)) & 0x100)==0); 55 54 } 55 + return 0; 56 56 } 57 57 58 - static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *addr, 59 - int len) 58 + static inline int oakscsi_pread(struct Scsi_Host *instance, 59 + unsigned char *addr, int len) 60 60 { 61 61 void __iomem *base = priv(instance)->base; 62 62 printk("reading %p len %d\n", addr, len); ··· 73 73 if(status & 0x200 || !timeout) 74 74 { 75 75 printk("status = %08X\n", status); 76 - return 1; 76 + return -1; 77 77 } 78 78 } 79 79 ··· 143 143 host->irq = NO_IRQ; 144 144 host->n_io_port = 255; 145 145 146 - ret = NCR5380_init(host, 0); 146 + ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); 147 147 if (ret) 148 148 goto out_unmap; 149 149
+1 -1
drivers/scsi/arm/powertec.c
··· 291 291 292 292 .can_queue = 8, 293 293 .this_id = 7, 294 - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, 294 + .sg_tablesize = SG_MAX_SEGMENTS, 295 295 .dma_boundary = IOMD_DMA_BOUNDARY, 296 296 .cmd_per_lun = 2, 297 297 .use_clustering = ENABLE_CLUSTERING,
-2676
drivers/scsi/atari_NCR5380.c
··· 1 - /* 2 - * NCR 5380 generic driver routines. These should make it *trivial* 3 - * to implement 5380 SCSI drivers under Linux with a non-trantor 4 - * architecture. 5 - * 6 - * Note that these routines also work with NR53c400 family chips. 7 - * 8 - * Copyright 1993, Drew Eckhardt 9 - * Visionary Computing 10 - * (Unix and Linux consulting and custom programming) 11 - * drew@colorado.edu 12 - * +1 (303) 666-5836 13 - * 14 - * For more information, please consult 15 - * 16 - * NCR 5380 Family 17 - * SCSI Protocol Controller 18 - * Databook 19 - * 20 - * NCR Microelectronics 21 - * 1635 Aeroplaza Drive 22 - * Colorado Springs, CO 80916 23 - * 1+ (719) 578-3400 24 - * 1+ (800) 334-5454 25 - */ 26 - 27 - /* Ported to Atari by Roman Hodek and others. */ 28 - 29 - /* Adapted for the sun3 by Sam Creasey. */ 30 - 31 - /* 32 - * Design 33 - * 34 - * This is a generic 5380 driver. To use it on a different platform, 35 - * one simply writes appropriate system specific macros (ie, data 36 - * transfer - some PC's will use the I/O bus, 68K's must use 37 - * memory mapped) and drops this file in their 'C' wrapper. 38 - * 39 - * As far as command queueing, two queues are maintained for 40 - * each 5380 in the system - commands that haven't been issued yet, 41 - * and commands that are currently executing. This means that an 42 - * unlimited number of commands may be queued, letting 43 - * more commands propagate from the higher driver levels giving higher 44 - * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, 45 - * allowing multiple commands to propagate all the way to a SCSI-II device 46 - * while a command is already executing. 47 - * 48 - * 49 - * Issues specific to the NCR5380 : 50 - * 51 - * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead 52 - * piece of hardware that requires you to sit in a loop polling for 53 - * the REQ signal as long as you are connected. Some devices are 54 - * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect 55 - * while doing long seek operations. [...] These 56 - * broken devices are the exception rather than the rule and I'd rather 57 - * spend my time optimizing for the normal case. 58 - * 59 - * Architecture : 60 - * 61 - * At the heart of the design is a coroutine, NCR5380_main, 62 - * which is started from a workqueue for each NCR5380 host in the 63 - * system. It attempts to establish I_T_L or I_T_L_Q nexuses by 64 - * removing the commands from the issue queue and calling 65 - * NCR5380_select() if a nexus is not established. 66 - * 67 - * Once a nexus is established, the NCR5380_information_transfer() 68 - * phase goes through the various phases as instructed by the target. 69 - * if the target goes into MSG IN and sends a DISCONNECT message, 70 - * the command structure is placed into the per instance disconnected 71 - * queue, and NCR5380_main tries to find more work. If the target is 72 - * idle for too long, the system will try to sleep. 73 - * 74 - * If a command has disconnected, eventually an interrupt will trigger, 75 - * calling NCR5380_intr() which will in turn call NCR5380_reselect 76 - * to reestablish a nexus. This will run main if necessary. 77 - * 78 - * On command termination, the done function will be called as 79 - * appropriate. 80 - * 81 - * SCSI pointers are maintained in the SCp field of SCSI command 82 - * structures, being initialized after the command is connected 83 - * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. 84 - * Note that in violation of the standard, an implicit SAVE POINTERS operation 85 - * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. 86 - */ 87 - 88 - /* 89 - * Using this file : 90 - * This file a skeleton Linux SCSI driver for the NCR 5380 series 91 - * of chips. To use it, you write an architecture specific functions 92 - * and macros and include this file in your driver. 93 - * 94 - * These macros control options : 95 - * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically 96 - * for commands that return with a CHECK CONDITION status. 97 - * 98 - * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential 99 - * transceivers. 100 - * 101 - * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. 102 - * 103 - * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible 104 - * 105 - * These macros MUST be defined : 106 - * 107 - * NCR5380_read(register) - read from the specified register 108 - * 109 - * NCR5380_write(register, value) - write to the specific register 110 - * 111 - * NCR5380_implementation_fields - additional fields needed for this 112 - * specific implementation of the NCR5380 113 - * 114 - * Either real DMA *or* pseudo DMA may be implemented 115 - * REAL functions : 116 - * NCR5380_REAL_DMA should be defined if real DMA is to be used. 117 - * Note that the DMA setup functions should return the number of bytes 118 - * that they were able to program the controller for. 119 - * 120 - * Also note that generic i386/PC versions of these macros are 121 - * available as NCR5380_i386_dma_write_setup, 122 - * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. 123 - * 124 - * NCR5380_dma_write_setup(instance, src, count) - initialize 125 - * NCR5380_dma_read_setup(instance, dst, count) - initialize 126 - * NCR5380_dma_residual(instance); - residual count 127 - * 128 - * PSEUDO functions : 129 - * NCR5380_pwrite(instance, src, count) 130 - * NCR5380_pread(instance, dst, count); 131 - * 132 - * The generic driver is initialized by calling NCR5380_init(instance), 133 - * after setting the appropriate host specific fields and ID. If the 134 - * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, 135 - * possible) function may be used. 136 - */ 137 - 138 - static int do_abort(struct Scsi_Host *); 139 - static void do_reset(struct Scsi_Host *); 140 - 141 - #ifdef SUPPORT_TAGS 142 - 143 - /* 144 - * Functions for handling tagged queuing 145 - * ===================================== 146 - * 147 - * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: 148 - * 149 - * Using consecutive numbers for the tags is no good idea in my eyes. There 150 - * could be wrong re-usings if the counter (8 bit!) wraps and some early 151 - * command has been preempted for a long time. My solution: a bitfield for 152 - * remembering used tags. 153 - * 154 - * There's also the problem that each target has a certain queue size, but we 155 - * cannot know it in advance :-( We just see a QUEUE_FULL status being 156 - * returned. So, in this case, the driver internal queue size assumption is 157 - * reduced to the number of active tags if QUEUE_FULL is returned by the 158 - * target. 159 - * 160 - * We're also not allowed running tagged commands as long as an untagged 161 - * command is active. And REQUEST SENSE commands after a contingent allegiance 162 - * condition _must_ be untagged. To keep track whether an untagged command has 163 - * been issued, the host->busy array is still employed, as it is without 164 - * support for tagged queuing. 165 - * 166 - * One could suspect that there are possible race conditions between 167 - * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the 168 - * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), 169 - * which already guaranteed to be running at most once. It is also the only 170 - * place where tags/LUNs are allocated. So no other allocation can slip 171 - * between that pair, there could only happen a reselection, which can free a 172 - * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes 173 - * important: the tag bit must be cleared before 'nr_allocated' is decreased. 174 - */ 175 - 176 - static void __init init_tags(struct NCR5380_hostdata *hostdata) 177 - { 178 - int target, lun; 179 - struct tag_alloc *ta; 180 - 181 - if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) 182 - return; 183 - 184 - for (target = 0; target < 8; ++target) { 185 - for (lun = 0; lun < 8; ++lun) { 186 - ta = &hostdata->TagAlloc[target][lun]; 187 - bitmap_zero(ta->allocated, MAX_TAGS); 188 - ta->nr_allocated = 0; 189 - /* At the beginning, assume the maximum queue size we could 190 - * support (MAX_TAGS). This value will be decreased if the target 191 - * returns QUEUE_FULL status. 192 - */ 193 - ta->queue_size = MAX_TAGS; 194 - } 195 - } 196 - } 197 - 198 - 199 - /* Check if we can issue a command to this LUN: First see if the LUN is marked 200 - * busy by an untagged command. If the command should use tagged queuing, also 201 - * check that there is a free tag and the target's queue won't overflow. This 202 - * function should be called with interrupts disabled to avoid race 203 - * conditions. 204 - */ 205 - 206 - static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) 207 - { 208 - u8 lun = cmd->device->lun; 209 - struct Scsi_Host *instance = cmd->device->host; 210 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 211 - 212 - if (hostdata->busy[cmd->device->id] & (1 << lun)) 213 - return 1; 214 - if (!should_be_tagged || 215 - !(hostdata->flags & FLAG_TAGGED_QUEUING) || 216 - !cmd->device->tagged_supported) 217 - return 0; 218 - if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >= 219 - hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) { 220 - dsprintk(NDEBUG_TAGS, instance, "target %d lun %d: no free tags\n", 221 - scmd_id(cmd), lun); 222 - return 1; 223 - } 224 - return 0; 225 - } 226 - 227 - 228 - /* Allocate a tag for a command (there are no checks anymore, check_lun_busy() 229 - * must be called before!), or reserve the LUN in 'busy' if the command is 230 - * untagged. 231 - */ 232 - 233 - static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) 234 - { 235 - u8 lun = cmd->device->lun; 236 - struct Scsi_Host *instance = cmd->device->host; 237 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 238 - 239 - /* If we or the target don't support tagged queuing, allocate the LUN for 240 - * an untagged command. 241 - */ 242 - if (!should_be_tagged || 243 - !(hostdata->flags & FLAG_TAGGED_QUEUING) || 244 - !cmd->device->tagged_supported) { 245 - cmd->tag = TAG_NONE; 246 - hostdata->busy[cmd->device->id] |= (1 << lun); 247 - dsprintk(NDEBUG_TAGS, instance, "target %d lun %d now allocated by untagged command\n", 248 - scmd_id(cmd), lun); 249 - } else { 250 - struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; 251 - 252 - cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); 253 - set_bit(cmd->tag, ta->allocated); 254 - ta->nr_allocated++; 255 - dsprintk(NDEBUG_TAGS, instance, "using tag %d for target %d lun %d (%d tags allocated)\n", 256 - cmd->tag, scmd_id(cmd), lun, ta->nr_allocated); 257 - } 258 - } 259 - 260 - 261 - /* Mark the tag of command 'cmd' as free, or in case of an untagged command, 262 - * unlock the LUN. 263 - */ 264 - 265 - static void cmd_free_tag(struct scsi_cmnd *cmd) 266 - { 267 - u8 lun = cmd->device->lun; 268 - struct Scsi_Host *instance = cmd->device->host; 269 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 270 - 271 - if (cmd->tag == TAG_NONE) { 272 - hostdata->busy[cmd->device->id] &= ~(1 << lun); 273 - dsprintk(NDEBUG_TAGS, instance, "target %d lun %d untagged cmd freed\n", 274 - scmd_id(cmd), lun); 275 - } else if (cmd->tag >= MAX_TAGS) { 276 - shost_printk(KERN_NOTICE, instance, 277 - "trying to free bad tag %d!\n", cmd->tag); 278 - } else { 279 - struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; 280 - clear_bit(cmd->tag, ta->allocated); 281 - ta->nr_allocated--; 282 - dsprintk(NDEBUG_TAGS, instance, "freed tag %d for target %d lun %d\n", 283 - cmd->tag, scmd_id(cmd), lun); 284 - } 285 - } 286 - 287 - 288 - static void free_all_tags(struct NCR5380_hostdata *hostdata) 289 - { 290 - int target, lun; 291 - struct tag_alloc *ta; 292 - 293 - if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) 294 - return; 295 - 296 - for (target = 0; target < 8; ++target) { 297 - for (lun = 0; lun < 8; ++lun) { 298 - ta = &hostdata->TagAlloc[target][lun]; 299 - bitmap_zero(ta->allocated, MAX_TAGS); 300 - ta->nr_allocated = 0; 301 - } 302 - } 303 - } 304 - 305 - #endif /* SUPPORT_TAGS */ 306 - 307 - /** 308 - * merge_contiguous_buffers - coalesce scatter-gather list entries 309 - * @cmd: command requesting IO 310 - * 311 - * Try to merge several scatter-gather buffers into one DMA transfer. 312 - * This is possible if the scatter buffers lie on physically 313 - * contiguous addresses. The first scatter-gather buffer's data are 314 - * assumed to be already transferred into cmd->SCp.this_residual. 315 - * Every buffer merged avoids an interrupt and a DMA setup operation. 316 - */ 317 - 318 - static void merge_contiguous_buffers(struct scsi_cmnd *cmd) 319 - { 320 - #if !defined(CONFIG_SUN3) 321 - unsigned long endaddr; 322 - #if (NDEBUG & NDEBUG_MERGING) 323 - unsigned long oldlen = cmd->SCp.this_residual; 324 - int cnt = 1; 325 - #endif 326 - 327 - for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; 328 - cmd->SCp.buffers_residual && 329 - virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { 330 - dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n", 331 - page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); 332 - #if (NDEBUG & NDEBUG_MERGING) 333 - ++cnt; 334 - #endif 335 - ++cmd->SCp.buffer; 336 - --cmd->SCp.buffers_residual; 337 - cmd->SCp.this_residual += cmd->SCp.buffer->length; 338 - endaddr += cmd->SCp.buffer->length; 339 - } 340 - #if (NDEBUG & NDEBUG_MERGING) 341 - if (oldlen != cmd->SCp.this_residual) 342 - dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", 343 - cnt, cmd->SCp.ptr, cmd->SCp.this_residual); 344 - #endif 345 - #endif /* !defined(CONFIG_SUN3) */ 346 - } 347 - 348 - /** 349 - * initialize_SCp - init the scsi pointer field 350 - * @cmd: command block to set up 351 - * 352 - * Set up the internal fields in the SCSI command. 353 - */ 354 - 355 - static inline void initialize_SCp(struct scsi_cmnd *cmd) 356 - { 357 - /* 358 - * Initialize the Scsi Pointer field so that all of the commands in the 359 - * various queues are valid. 360 - */ 361 - 362 - if (scsi_bufflen(cmd)) { 363 - cmd->SCp.buffer = scsi_sglist(cmd); 364 - cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; 365 - cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 366 - cmd->SCp.this_residual = cmd->SCp.buffer->length; 367 - 368 - merge_contiguous_buffers(cmd); 369 - } else { 370 - cmd->SCp.buffer = NULL; 371 - cmd->SCp.buffers_residual = 0; 372 - cmd->SCp.ptr = NULL; 373 - cmd->SCp.this_residual = 0; 374 - } 375 - 376 - cmd->SCp.Status = 0; 377 - cmd->SCp.Message = 0; 378 - } 379 - 380 - /** 381 - * NCR5380_poll_politely2 - wait for two chip register values 382 - * @instance: controller to poll 383 - * @reg1: 5380 register to poll 384 - * @bit1: Bitmask to check 385 - * @val1: Expected value 386 - * @reg2: Second 5380 register to poll 387 - * @bit2: Second bitmask to check 388 - * @val2: Second expected value 389 - * @wait: Time-out in jiffies 390 - * 391 - * Polls the chip in a reasonably efficient manner waiting for an 392 - * event to occur. After a short quick poll we begin to yield the CPU 393 - * (if possible). In irq contexts the time-out is arbitrarily limited. 394 - * Callers may hold locks as long as they are held in irq mode. 395 - * 396 - * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. 397 - */ 398 - 399 - static int NCR5380_poll_politely2(struct Scsi_Host *instance, 400 - int reg1, int bit1, int val1, 401 - int reg2, int bit2, int val2, int wait) 402 - { 403 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 404 - unsigned long deadline = jiffies + wait; 405 - unsigned long n; 406 - 407 - /* Busy-wait for up to 10 ms */ 408 - n = min(10000U, jiffies_to_usecs(wait)); 409 - n *= hostdata->accesses_per_ms; 410 - n /= 2000; 411 - do { 412 - if ((NCR5380_read(reg1) & bit1) == val1) 413 - return 0; 414 - if ((NCR5380_read(reg2) & bit2) == val2) 415 - return 0; 416 - cpu_relax(); 417 - } while (n--); 418 - 419 - if (irqs_disabled() || in_interrupt()) 420 - return -ETIMEDOUT; 421 - 422 - /* Repeatedly sleep for 1 ms until deadline */ 423 - while (time_is_after_jiffies(deadline)) { 424 - schedule_timeout_uninterruptible(1); 425 - if ((NCR5380_read(reg1) & bit1) == val1) 426 - return 0; 427 - if ((NCR5380_read(reg2) & bit2) == val2) 428 - return 0; 429 - } 430 - 431 - return -ETIMEDOUT; 432 - } 433 - 434 - static inline int NCR5380_poll_politely(struct Scsi_Host *instance, 435 - int reg, int bit, int val, int wait) 436 - { 437 - return NCR5380_poll_politely2(instance, reg, bit, val, 438 - reg, bit, val, wait); 439 - } 440 - 441 - #if NDEBUG 442 - static struct { 443 - unsigned char mask; 444 - const char *name; 445 - } signals[] = { 446 - {SR_DBP, "PARITY"}, 447 - {SR_RST, "RST"}, 448 - {SR_BSY, "BSY"}, 449 - {SR_REQ, "REQ"}, 450 - {SR_MSG, "MSG"}, 451 - {SR_CD, "CD"}, 452 - {SR_IO, "IO"}, 453 - {SR_SEL, "SEL"}, 454 - {0, NULL} 455 - }, 456 - basrs[] = { 457 - {BASR_ATN, "ATN"}, 458 - {BASR_ACK, "ACK"}, 459 - {0, NULL} 460 - }, 461 - icrs[] = { 462 - {ICR_ASSERT_RST, "ASSERT RST"}, 463 - {ICR_ASSERT_ACK, "ASSERT ACK"}, 464 - {ICR_ASSERT_BSY, "ASSERT BSY"}, 465 - {ICR_ASSERT_SEL, "ASSERT SEL"}, 466 - {ICR_ASSERT_ATN, "ASSERT ATN"}, 467 - {ICR_ASSERT_DATA, "ASSERT DATA"}, 468 - {0, NULL} 469 - }, 470 - mrs[] = { 471 - {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, 472 - {MR_TARGET, "MODE TARGET"}, 473 - {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, 474 - {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, 475 - {MR_ENABLE_EOP_INTR, "MODE EOP INTR"}, 476 - {MR_MONITOR_BSY, "MODE MONITOR BSY"}, 477 - {MR_DMA_MODE, "MODE DMA"}, 478 - {MR_ARBITRATE, "MODE ARBITRATION"}, 479 - {0, NULL} 480 - }; 481 - 482 - /** 483 - * NCR5380_print - print scsi bus signals 484 - * @instance: adapter state to dump 485 - * 486 - * Print the SCSI bus signals for debugging purposes 487 - */ 488 - 489 - static void NCR5380_print(struct Scsi_Host *instance) 490 - { 491 - unsigned char status, data, basr, mr, icr, i; 492 - 493 - data = NCR5380_read(CURRENT_SCSI_DATA_REG); 494 - status = NCR5380_read(STATUS_REG); 495 - mr = NCR5380_read(MODE_REG); 496 - icr = NCR5380_read(INITIATOR_COMMAND_REG); 497 - basr = NCR5380_read(BUS_AND_STATUS_REG); 498 - 499 - printk("STATUS_REG: %02x ", status); 500 - for (i = 0; signals[i].mask; ++i) 501 - if (status & signals[i].mask) 502 - printk(",%s", signals[i].name); 503 - printk("\nBASR: %02x ", basr); 504 - for (i = 0; basrs[i].mask; ++i) 505 - if (basr & basrs[i].mask) 506 - printk(",%s", basrs[i].name); 507 - printk("\nICR: %02x ", icr); 508 - for (i = 0; icrs[i].mask; ++i) 509 - if (icr & icrs[i].mask) 510 - printk(",%s", icrs[i].name); 511 - printk("\nMODE: %02x ", mr); 512 - for (i = 0; mrs[i].mask; ++i) 513 - if (mr & mrs[i].mask) 514 - printk(",%s", mrs[i].name); 515 - printk("\n"); 516 - } 517 - 518 - static struct { 519 - unsigned char value; 520 - const char *name; 521 - } phases[] = { 522 - {PHASE_DATAOUT, "DATAOUT"}, 523 - {PHASE_DATAIN, "DATAIN"}, 524 - {PHASE_CMDOUT, "CMDOUT"}, 525 - {PHASE_STATIN, "STATIN"}, 526 - {PHASE_MSGOUT, "MSGOUT"}, 527 - {PHASE_MSGIN, "MSGIN"}, 528 - {PHASE_UNKNOWN, "UNKNOWN"} 529 - }; 530 - 531 - /** 532 - * NCR5380_print_phase - show SCSI phase 533 - * @instance: adapter to dump 534 - * 535 - * Print the current SCSI phase for debugging purposes 536 - */ 537 - 538 - static void NCR5380_print_phase(struct Scsi_Host *instance) 539 - { 540 - unsigned char status; 541 - int i; 542 - 543 - status = NCR5380_read(STATUS_REG); 544 - if (!(status & SR_REQ)) 545 - shost_printk(KERN_DEBUG, instance, "REQ not asserted, phase unknown.\n"); 546 - else { 547 - for (i = 0; (phases[i].value != PHASE_UNKNOWN) && 548 - (phases[i].value != (status & PHASE_MASK)); ++i) 549 - ; 550 - shost_printk(KERN_DEBUG, instance, "phase %s\n", phases[i].name); 551 - } 552 - } 553 - #endif 554 - 555 - /** 556 - * NCR58380_info - report driver and host information 557 - * @instance: relevant scsi host instance 558 - * 559 - * For use as the host template info() handler. 560 - */ 561 - 562 - static const char *NCR5380_info(struct Scsi_Host *instance) 563 - { 564 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 565 - 566 - return hostdata->info; 567 - } 568 - 569 - static void prepare_info(struct Scsi_Host *instance) 570 - { 571 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 572 - 573 - snprintf(hostdata->info, sizeof(hostdata->info), 574 - "%s, io_port 0x%lx, n_io_port %d, " 575 - "base 0x%lx, irq %d, " 576 - "can_queue %d, cmd_per_lun %d, " 577 - "sg_tablesize %d, this_id %d, " 578 - "flags { %s%s}, " 579 - "options { %s} ", 580 - instance->hostt->name, instance->io_port, instance->n_io_port, 581 - instance->base, instance->irq, 582 - instance->can_queue, instance->cmd_per_lun, 583 - instance->sg_tablesize, instance->this_id, 584 - hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "", 585 - hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "", 586 - #ifdef DIFFERENTIAL 587 - "DIFFERENTIAL " 588 - #endif 589 - #ifdef REAL_DMA 590 - "REAL_DMA " 591 - #endif 592 - #ifdef PARITY 593 - "PARITY " 594 - #endif 595 - #ifdef SUPPORT_TAGS 596 - "SUPPORT_TAGS " 597 - #endif 598 - ""); 599 - } 600 - 601 - /** 602 - * NCR5380_init - initialise an NCR5380 603 - * @instance: adapter to configure 604 - * @flags: control flags 605 - * 606 - * Initializes *instance and corresponding 5380 chip, 607 - * with flags OR'd into the initial flags value. 608 - * 609 - * Notes : I assume that the host, hostno, and id bits have been 610 - * set correctly. I don't care about the irq and other fields. 611 - * 612 - * Returns 0 for success 613 - */ 614 - 615 - static int __init NCR5380_init(struct Scsi_Host *instance, int flags) 616 - { 617 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 618 - int i; 619 - unsigned long deadline; 620 - 621 - hostdata->host = instance; 622 - hostdata->id_mask = 1 << instance->this_id; 623 - hostdata->id_higher_mask = 0; 624 - for (i = hostdata->id_mask; i <= 0x80; i <<= 1) 625 - if (i > hostdata->id_mask) 626 - hostdata->id_higher_mask |= i; 627 - for (i = 0; i < 8; ++i) 628 - hostdata->busy[i] = 0; 629 - #ifdef SUPPORT_TAGS 630 - init_tags(hostdata); 631 - #endif 632 - #if defined (REAL_DMA) 633 - hostdata->dma_len = 0; 634 - #endif 635 - spin_lock_init(&hostdata->lock); 636 - hostdata->connected = NULL; 637 - hostdata->sensing = NULL; 638 - INIT_LIST_HEAD(&hostdata->autosense); 639 - INIT_LIST_HEAD(&hostdata->unissued); 640 - INIT_LIST_HEAD(&hostdata->disconnected); 641 - 642 - hostdata->flags = flags; 643 - 644 - INIT_WORK(&hostdata->main_task, NCR5380_main); 645 - hostdata->work_q = alloc_workqueue("ncr5380_%d", 646 - WQ_UNBOUND | WQ_MEM_RECLAIM, 647 - 1, instance->host_no); 648 - if (!hostdata->work_q) 649 - return -ENOMEM; 650 - 651 - prepare_info(instance); 652 - 653 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 654 - NCR5380_write(MODE_REG, MR_BASE); 655 - NCR5380_write(TARGET_COMMAND_REG, 0); 656 - NCR5380_write(SELECT_ENABLE_REG, 0); 657 - 658 - /* Calibrate register polling loop */ 659 - i = 0; 660 - deadline = jiffies + 1; 661 - do { 662 - cpu_relax(); 663 - } while (time_is_after_jiffies(deadline)); 664 - deadline += msecs_to_jiffies(256); 665 - do { 666 - NCR5380_read(STATUS_REG); 667 - ++i; 668 - cpu_relax(); 669 - } while (time_is_after_jiffies(deadline)); 670 - hostdata->accesses_per_ms = i / 256; 671 - 672 - return 0; 673 - } 674 - 675 - /** 676 - * NCR5380_maybe_reset_bus - Detect and correct bus wedge problems. 677 - * @instance: adapter to check 678 - * 679 - * If the system crashed, it may have crashed with a connected target and 680 - * the SCSI bus busy. Check for BUS FREE phase. If not, try to abort the 681 - * currently established nexus, which we know nothing about. Failing that 682 - * do a bus reset. 683 - * 684 - * Note that a bus reset will cause the chip to assert IRQ. 685 - * 686 - * Returns 0 if successful, otherwise -ENXIO. 687 - */ 688 - 689 - static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance) 690 - { 691 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 692 - int pass; 693 - 694 - for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) { 695 - switch (pass) { 696 - case 1: 697 - case 3: 698 - case 5: 699 - shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n"); 700 - NCR5380_poll_politely(instance, 701 - STATUS_REG, SR_BSY, 0, 5 * HZ); 702 - break; 703 - case 2: 704 - shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n"); 705 - do_abort(instance); 706 - break; 707 - case 4: 708 - shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n"); 709 - do_reset(instance); 710 - /* Wait after a reset; the SCSI standard calls for 711 - * 250ms, we wait 500ms to be on the safe side. 712 - * But some Toshiba CD-ROMs need ten times that. 713 - */ 714 - if (hostdata->flags & FLAG_TOSHIBA_DELAY) 715 - msleep(2500); 716 - else 717 - msleep(500); 718 - break; 719 - case 6: 720 - shost_printk(KERN_ERR, instance, "bus locked solid\n"); 721 - return -ENXIO; 722 - } 723 - } 724 - return 0; 725 - } 726 - 727 - /** 728 - * NCR5380_exit - remove an NCR5380 729 - * @instance: adapter to remove 730 - * 731 - * Assumes that no more work can be queued (e.g. by NCR5380_intr). 732 - */ 733 - 734 - static void NCR5380_exit(struct Scsi_Host *instance) 735 - { 736 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 737 - 738 - cancel_work_sync(&hostdata->main_task); 739 - destroy_workqueue(hostdata->work_q); 740 - } 741 - 742 - /** 743 - * complete_cmd - finish processing a command and return it to the SCSI ML 744 - * @instance: the host instance 745 - * @cmd: command to complete 746 - */ 747 - 748 - static void complete_cmd(struct Scsi_Host *instance, 749 - struct scsi_cmnd *cmd) 750 - { 751 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 752 - 753 - dsprintk(NDEBUG_QUEUES, instance, "complete_cmd: cmd %p\n", cmd); 754 - 755 - if (hostdata->sensing == cmd) { 756 - /* Autosense processing ends here */ 757 - if ((cmd->result & 0xff) != SAM_STAT_GOOD) { 758 - scsi_eh_restore_cmnd(cmd, &hostdata->ses); 759 - set_host_byte(cmd, DID_ERROR); 760 - } else 761 - scsi_eh_restore_cmnd(cmd, &hostdata->ses); 762 - hostdata->sensing = NULL; 763 - } 764 - 765 - #ifdef SUPPORT_TAGS 766 - cmd_free_tag(cmd); 767 - #else 768 - hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); 769 - #endif 770 - cmd->scsi_done(cmd); 771 - } 772 - 773 - /** 774 - * NCR5380_queue_command - queue a command 775 - * @instance: the relevant SCSI adapter 776 - * @cmd: SCSI command 777 - * 778 - * cmd is added to the per-instance issue queue, with minor 779 - * twiddling done to the host specific fields of cmd. If the 780 - * main coroutine is not running, it is restarted. 781 - */ 782 - 783 - static int NCR5380_queue_command(struct Scsi_Host *instance, 784 - struct scsi_cmnd *cmd) 785 - { 786 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 787 - struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); 788 - unsigned long flags; 789 - 790 - #if (NDEBUG & NDEBUG_NO_WRITE) 791 - switch (cmd->cmnd[0]) { 792 - case WRITE_6: 793 - case WRITE_10: 794 - shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n"); 795 - cmd->result = (DID_ERROR << 16); 796 - cmd->scsi_done(cmd); 797 - return 0; 798 - } 799 - #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ 800 - 801 - cmd->result = 0; 802 - 803 - /* 804 - * ++roman: Just disabling the NCR interrupt isn't sufficient here, 805 - * because also a timer int can trigger an abort or reset, which would 806 - * alter queues and touch the lock. 807 - */ 808 - if (!NCR5380_acquire_dma_irq(instance)) 809 - return SCSI_MLQUEUE_HOST_BUSY; 810 - 811 - spin_lock_irqsave(&hostdata->lock, flags); 812 - 813 - /* 814 - * Insert the cmd into the issue queue. Note that REQUEST SENSE 815 - * commands are added to the head of the queue since any command will 816 - * clear the contingent allegiance condition that exists and the 817 - * sense data is only guaranteed to be valid while the condition exists. 818 - */ 819 - 820 - if (cmd->cmnd[0] == REQUEST_SENSE) 821 - list_add(&ncmd->list, &hostdata->unissued); 822 - else 823 - list_add_tail(&ncmd->list, &hostdata->unissued); 824 - 825 - spin_unlock_irqrestore(&hostdata->lock, flags); 826 - 827 - dsprintk(NDEBUG_QUEUES, instance, "command %p added to %s of queue\n", 828 - cmd, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); 829 - 830 - /* Kick off command processing */ 831 - queue_work(hostdata->work_q, &hostdata->main_task); 832 - return 0; 833 - } 834 - 835 - static inline void maybe_release_dma_irq(struct Scsi_Host *instance) 836 - { 837 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 838 - 839 - /* Caller does the locking needed to set & test these data atomically */ 840 - if (list_empty(&hostdata->disconnected) && 841 - list_empty(&hostdata->unissued) && 842 - list_empty(&hostdata->autosense) && 843 - !hostdata->connected && 844 - !hostdata->selecting) 845 - NCR5380_release_dma_irq(instance); 846 - } 847 - 848 - /** 849 - * dequeue_next_cmd - dequeue a command for processing 850 - * @instance: the scsi host instance 851 - * 852 - * Priority is given to commands on the autosense queue. These commands 853 - * need autosense because of a CHECK CONDITION result. 854 - * 855 - * Returns a command pointer if a command is found for a target that is 856 - * not already busy. Otherwise returns NULL. 857 - */ 858 - 859 - static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance) 860 - { 861 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 862 - struct NCR5380_cmd *ncmd; 863 - struct scsi_cmnd *cmd; 864 - 865 - if (hostdata->sensing || list_empty(&hostdata->autosense)) { 866 - list_for_each_entry(ncmd, &hostdata->unissued, list) { 867 - cmd = NCR5380_to_scmd(ncmd); 868 - dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n", 869 - cmd, scmd_id(cmd), hostdata->busy[scmd_id(cmd)], cmd->device->lun); 870 - 871 - if ( 872 - #ifdef SUPPORT_TAGS 873 - !is_lun_busy(cmd, 1) 874 - #else 875 - !(hostdata->busy[scmd_id(cmd)] & (1 << cmd->device->lun)) 876 - #endif 877 - ) { 878 - list_del(&ncmd->list); 879 - dsprintk(NDEBUG_QUEUES, instance, 880 - "dequeue: removed %p from issue queue\n", cmd); 881 - return cmd; 882 - } 883 - } 884 - } else { 885 - /* Autosense processing begins here */ 886 - ncmd = list_first_entry(&hostdata->autosense, 887 - struct NCR5380_cmd, list); 888 - list_del(&ncmd->list); 889 - cmd = NCR5380_to_scmd(ncmd); 890 - dsprintk(NDEBUG_QUEUES, instance, 891 - "dequeue: removed %p from autosense queue\n", cmd); 892 - scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); 893 - hostdata->sensing = cmd; 894 - return cmd; 895 - } 896 - return NULL; 897 - } 898 - 899 - static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd) 900 - { 901 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 902 - struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); 903 - 904 - if (hostdata->sensing == cmd) { 905 - scsi_eh_restore_cmnd(cmd, &hostdata->ses); 906 - list_add(&ncmd->list, &hostdata->autosense); 907 - hostdata->sensing = NULL; 908 - } else 909 - list_add(&ncmd->list, &hostdata->unissued); 910 - } 911 - 912 - /** 913 - * NCR5380_main - NCR state machines 914 - * 915 - * NCR5380_main is a coroutine that runs as long as more work can 916 - * be done on the NCR5380 host adapters in a system. Both 917 - * NCR5380_queue_command() and NCR5380_intr() will try to start it 918 - * in case it is not running. 919 - */ 920 - 921 - static void NCR5380_main(struct work_struct *work) 922 - { 923 - struct NCR5380_hostdata *hostdata = 924 - container_of(work, struct NCR5380_hostdata, main_task); 925 - struct Scsi_Host *instance = hostdata->host; 926 - int done; 927 - 928 - /* 929 - * ++roman: Just disabling the NCR interrupt isn't sufficient here, 930 - * because also a timer int can trigger an abort or reset, which can 931 - * alter queues and touch the Falcon lock. 932 - */ 933 - 934 - do { 935 - done = 1; 936 - 937 - spin_lock_irq(&hostdata->lock); 938 - while (!hostdata->connected && !hostdata->selecting) { 939 - struct scsi_cmnd *cmd = dequeue_next_cmd(instance); 940 - 941 - if (!cmd) 942 - break; 943 - 944 - dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd); 945 - 946 - /* 947 - * Attempt to establish an I_T_L nexus here. 948 - * On success, instance->hostdata->connected is set. 949 - * On failure, we must add the command back to the 950 - * issue queue so we can keep trying. 951 - */ 952 - /* 953 - * REQUEST SENSE commands are issued without tagged 954 - * queueing, even on SCSI-II devices because the 955 - * contingent allegiance condition exists for the 956 - * entire unit. 957 - */ 958 - /* ++roman: ...and the standard also requires that 959 - * REQUEST SENSE command are untagged. 960 - */ 961 - 962 - #ifdef SUPPORT_TAGS 963 - cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE); 964 - #endif 965 - if (!NCR5380_select(instance, cmd)) { 966 - dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); 967 - maybe_release_dma_irq(instance); 968 - } else { 969 - dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, 970 - "main: select failed, returning %p to queue\n", cmd); 971 - requeue_cmd(instance, cmd); 972 - #ifdef SUPPORT_TAGS 973 - cmd_free_tag(cmd); 974 - #endif 975 - } 976 - } 977 - if (hostdata->connected 978 - #ifdef REAL_DMA 979 - && !hostdata->dma_len 980 - #endif 981 - ) { 982 - dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); 983 - NCR5380_information_transfer(instance); 984 - done = 0; 985 - } 986 - spin_unlock_irq(&hostdata->lock); 987 - if (!done) 988 - cond_resched(); 989 - } while (!done); 990 - } 991 - 992 - 993 - #ifdef REAL_DMA 994 - /* 995 - * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) 996 - * 997 - * Purpose : Called by interrupt handler when DMA finishes or a phase 998 - * mismatch occurs (which would finish the DMA transfer). 999 - * 1000 - * Inputs : instance - this instance of the NCR5380. 1001 - */ 1002 - 1003 - static void NCR5380_dma_complete(struct Scsi_Host *instance) 1004 - { 1005 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 1006 - int transferred; 1007 - unsigned char **data; 1008 - int *count; 1009 - int saved_data = 0, overrun = 0; 1010 - unsigned char p; 1011 - 1012 - if (hostdata->read_overruns) { 1013 - p = hostdata->connected->SCp.phase; 1014 - if (p & SR_IO) { 1015 - udelay(10); 1016 - if ((NCR5380_read(BUS_AND_STATUS_REG) & 1017 - (BASR_PHASE_MATCH|BASR_ACK)) == 1018 - (BASR_PHASE_MATCH|BASR_ACK)) { 1019 - saved_data = NCR5380_read(INPUT_DATA_REG); 1020 - overrun = 1; 1021 - dsprintk(NDEBUG_DMA, instance, "read overrun handled\n"); 1022 - } 1023 - } 1024 - } 1025 - 1026 - #if defined(CONFIG_SUN3) 1027 - if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { 1028 - pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", 1029 - instance->host_no); 1030 - BUG(); 1031 - } 1032 - 1033 - /* make sure we're not stuck in a data phase */ 1034 - if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == 1035 - (BASR_PHASE_MATCH | BASR_ACK)) { 1036 - pr_err("scsi%d: BASR %02x\n", instance->host_no, 1037 - NCR5380_read(BUS_AND_STATUS_REG)); 1038 - pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", 1039 - instance->host_no); 1040 - BUG(); 1041 - } 1042 - #endif 1043 - 1044 - NCR5380_write(MODE_REG, MR_BASE); 1045 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1046 - NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1047 - 1048 - transferred = hostdata->dma_len - NCR5380_dma_residual(instance); 1049 - hostdata->dma_len = 0; 1050 - 1051 - data = (unsigned char **)&hostdata->connected->SCp.ptr; 1052 - count = &hostdata->connected->SCp.this_residual; 1053 - *data += transferred; 1054 - *count -= transferred; 1055 - 1056 - if (hostdata->read_overruns) { 1057 - int cnt, toPIO; 1058 - 1059 - if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { 1060 - cnt = toPIO = hostdata->read_overruns; 1061 - if (overrun) { 1062 - dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); 1063 - *(*data)++ = saved_data; 1064 - (*count)--; 1065 - cnt--; 1066 - toPIO--; 1067 - } 1068 - dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); 1069 - NCR5380_transfer_pio(instance, &p, &cnt, data); 1070 - *count -= toPIO - cnt; 1071 - } 1072 - } 1073 - } 1074 - #endif /* REAL_DMA */ 1075 - 1076 - 1077 - /** 1078 - * NCR5380_intr - generic NCR5380 irq handler 1079 - * @irq: interrupt number 1080 - * @dev_id: device info 1081 - * 1082 - * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses 1083 - * from the disconnected queue, and restarting NCR5380_main() 1084 - * as required. 1085 - * 1086 - * The chip can assert IRQ in any of six different conditions. The IRQ flag 1087 - * is then cleared by reading the Reset Parity/Interrupt Register (RPIR). 1088 - * Three of these six conditions are latched in the Bus and Status Register: 1089 - * - End of DMA (cleared by ending DMA Mode) 1090 - * - Parity error (cleared by reading RPIR) 1091 - * - Loss of BSY (cleared by reading RPIR) 1092 - * Two conditions have flag bits that are not latched: 1093 - * - Bus phase mismatch (non-maskable in DMA Mode, cleared by ending DMA Mode) 1094 - * - Bus reset (non-maskable) 1095 - * The remaining condition has no flag bit at all: 1096 - * - Selection/reselection 1097 - * 1098 - * Hence, establishing the cause(s) of any interrupt is partly guesswork. 1099 - * In "The DP8490 and DP5380 Comparison Guide", National Semiconductor 1100 - * claimed that "the design of the [DP8490] interrupt logic ensures 1101 - * interrupts will not be lost (they can be on the DP5380)." 1102 - * The L5380/53C80 datasheet from LOGIC Devices has more details. 1103 - * 1104 - * Checking for bus reset by reading RST is futile because of interrupt 1105 - * latency, but a bus reset will reset chip logic. Checking for parity error 1106 - * is unnecessary because that interrupt is never enabled. A Loss of BSY 1107 - * condition will clear DMA Mode. We can tell when this occurs because the 1108 - * the Busy Monitor interrupt is enabled together with DMA Mode. 1109 - */ 1110 - 1111 - static irqreturn_t NCR5380_intr(int irq, void *dev_id) 1112 - { 1113 - struct Scsi_Host *instance = dev_id; 1114 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 1115 - int handled = 0; 1116 - unsigned char basr; 1117 - unsigned long flags; 1118 - 1119 - spin_lock_irqsave(&hostdata->lock, flags); 1120 - 1121 - basr = NCR5380_read(BUS_AND_STATUS_REG); 1122 - if (basr & BASR_IRQ) { 1123 - unsigned char mr = NCR5380_read(MODE_REG); 1124 - unsigned char sr = NCR5380_read(STATUS_REG); 1125 - 1126 - dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", 1127 - irq, basr, sr, mr); 1128 - 1129 - #if defined(REAL_DMA) 1130 - if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { 1131 - /* Probably End of DMA, Phase Mismatch or Loss of BSY. 1132 - * We ack IRQ after clearing Mode Register. Workarounds 1133 - * for End of DMA errata need to happen in DMA Mode. 1134 - */ 1135 - 1136 - dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); 1137 - 1138 - if (hostdata->connected) { 1139 - NCR5380_dma_complete(instance); 1140 - queue_work(hostdata->work_q, &hostdata->main_task); 1141 - } else { 1142 - NCR5380_write(MODE_REG, MR_BASE); 1143 - NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1144 - } 1145 - } else 1146 - #endif /* REAL_DMA */ 1147 - if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && 1148 - (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { 1149 - /* Probably reselected */ 1150 - NCR5380_write(SELECT_ENABLE_REG, 0); 1151 - NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1152 - 1153 - dsprintk(NDEBUG_INTR, instance, "interrupt with SEL and IO\n"); 1154 - 1155 - if (!hostdata->connected) { 1156 - NCR5380_reselect(instance); 1157 - queue_work(hostdata->work_q, &hostdata->main_task); 1158 - } 1159 - if (!hostdata->connected) 1160 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1161 - } else { 1162 - /* Probably Bus Reset */ 1163 - NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1164 - 1165 - dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); 1166 - #ifdef SUN3_SCSI_VME 1167 - dregs->csr |= CSR_DMA_ENABLE; 1168 - #endif 1169 - } 1170 - handled = 1; 1171 - } else { 1172 - shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); 1173 - #ifdef SUN3_SCSI_VME 1174 - dregs->csr |= CSR_DMA_ENABLE; 1175 - #endif 1176 - } 1177 - 1178 - spin_unlock_irqrestore(&hostdata->lock, flags); 1179 - 1180 - return IRQ_RETVAL(handled); 1181 - } 1182 - 1183 - /* 1184 - * Function : int NCR5380_select(struct Scsi_Host *instance, 1185 - * struct scsi_cmnd *cmd) 1186 - * 1187 - * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, 1188 - * including ARBITRATION, SELECTION, and initial message out for 1189 - * IDENTIFY and queue messages. 1190 - * 1191 - * Inputs : instance - instantiation of the 5380 driver on which this 1192 - * target lives, cmd - SCSI command to execute. 1193 - * 1194 - * Returns cmd if selection failed but should be retried, 1195 - * NULL if selection failed and should not be retried, or 1196 - * NULL if selection succeeded (hostdata->connected == cmd). 1197 - * 1198 - * Side effects : 1199 - * If bus busy, arbitration failed, etc, NCR5380_select() will exit 1200 - * with registers as they should have been on entry - ie 1201 - * SELECT_ENABLE will be set appropriately, the NCR5380 1202 - * will cease to drive any SCSI bus signals. 1203 - * 1204 - * If successful : I_T_L or I_T_L_Q nexus will be established, 1205 - * instance->connected will be set to cmd. 1206 - * SELECT interrupt will be disabled. 1207 - * 1208 - * If failed (no target) : cmd->scsi_done() will be called, and the 1209 - * cmd->result host byte set to DID_BAD_TARGET. 1210 - */ 1211 - 1212 - static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, 1213 - struct scsi_cmnd *cmd) 1214 - { 1215 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 1216 - unsigned char tmp[3], phase; 1217 - unsigned char *data; 1218 - int len; 1219 - int err; 1220 - 1221 - NCR5380_dprint(NDEBUG_ARBITRATION, instance); 1222 - dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", 1223 - instance->this_id); 1224 - 1225 - /* 1226 - * Arbitration and selection phases are slow and involve dropping the 1227 - * lock, so we have to watch out for EH. An exception handler may 1228 - * change 'selecting' to NULL. This function will then return NULL 1229 - * so that the caller will forget about 'cmd'. (During information 1230 - * transfer phases, EH may change 'connected' to NULL.) 1231 - */ 1232 - hostdata->selecting = cmd; 1233 - 1234 - /* 1235 - * Set the phase bits to 0, otherwise the NCR5380 won't drive the 1236 - * data bus during SELECTION. 1237 - */ 1238 - 1239 - NCR5380_write(TARGET_COMMAND_REG, 0); 1240 - 1241 - /* 1242 - * Start arbitration. 1243 - */ 1244 - 1245 - NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); 1246 - NCR5380_write(MODE_REG, MR_ARBITRATE); 1247 - 1248 - /* The chip now waits for BUS FREE phase. Then after the 800 ns 1249 - * Bus Free Delay, arbitration will begin. 1250 - */ 1251 - 1252 - spin_unlock_irq(&hostdata->lock); 1253 - err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0, 1254 - INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, 1255 - ICR_ARBITRATION_PROGRESS, HZ); 1256 - spin_lock_irq(&hostdata->lock); 1257 - if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) { 1258 - /* Reselection interrupt */ 1259 - goto out; 1260 - } 1261 - if (!hostdata->selecting) { 1262 - /* Command was aborted */ 1263 - NCR5380_write(MODE_REG, MR_BASE); 1264 - goto out; 1265 - } 1266 - if (err < 0) { 1267 - NCR5380_write(MODE_REG, MR_BASE); 1268 - shost_printk(KERN_ERR, instance, 1269 - "select: arbitration timeout\n"); 1270 - goto out; 1271 - } 1272 - spin_unlock_irq(&hostdata->lock); 1273 - 1274 - /* The SCSI-2 arbitration delay is 2.4 us */ 1275 - udelay(3); 1276 - 1277 - /* Check for lost arbitration */ 1278 - if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || 1279 - (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || 1280 - (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { 1281 - NCR5380_write(MODE_REG, MR_BASE); 1282 - dsprintk(NDEBUG_ARBITRATION, instance, "lost arbitration, deasserting MR_ARBITRATE\n"); 1283 - spin_lock_irq(&hostdata->lock); 1284 - goto out; 1285 - } 1286 - 1287 - /* After/during arbitration, BSY should be asserted. 1288 - * IBM DPES-31080 Version S31Q works now 1289 - * Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) 1290 - */ 1291 - NCR5380_write(INITIATOR_COMMAND_REG, 1292 - ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); 1293 - 1294 - /* 1295 - * Again, bus clear + bus settle time is 1.2us, however, this is 1296 - * a minimum so we'll udelay ceil(1.2) 1297 - */ 1298 - 1299 - if (hostdata->flags & FLAG_TOSHIBA_DELAY) 1300 - udelay(15); 1301 - else 1302 - udelay(2); 1303 - 1304 - spin_lock_irq(&hostdata->lock); 1305 - 1306 - /* NCR5380_reselect() clears MODE_REG after a reselection interrupt */ 1307 - if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) 1308 - goto out; 1309 - 1310 - if (!hostdata->selecting) { 1311 - NCR5380_write(MODE_REG, MR_BASE); 1312 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1313 - goto out; 1314 - } 1315 - 1316 - dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n"); 1317 - 1318 - /* 1319 - * Now that we have won arbitration, start Selection process, asserting 1320 - * the host and target ID's on the SCSI bus. 1321 - */ 1322 - 1323 - NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask | (1 << scmd_id(cmd))); 1324 - 1325 - /* 1326 - * Raise ATN while SEL is true before BSY goes false from arbitration, 1327 - * since this is the only way to guarantee that we'll get a MESSAGE OUT 1328 - * phase immediately after selection. 1329 - */ 1330 - 1331 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY | 1332 - ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL); 1333 - NCR5380_write(MODE_REG, MR_BASE); 1334 - 1335 - /* 1336 - * Reselect interrupts must be turned off prior to the dropping of BSY, 1337 - * otherwise we will trigger an interrupt. 1338 - */ 1339 - NCR5380_write(SELECT_ENABLE_REG, 0); 1340 - 1341 - spin_unlock_irq(&hostdata->lock); 1342 - 1343 - /* 1344 - * The initiator shall then wait at least two deskew delays and release 1345 - * the BSY signal. 1346 - */ 1347 - udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ 1348 - 1349 - /* Reset BSY */ 1350 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | 1351 - ICR_ASSERT_ATN | ICR_ASSERT_SEL); 1352 - 1353 - /* 1354 - * Something weird happens when we cease to drive BSY - looks 1355 - * like the board/chip is letting us do another read before the 1356 - * appropriate propagation delay has expired, and we're confusing 1357 - * a BSY signal from ourselves as the target's response to SELECTION. 1358 - * 1359 - * A small delay (the 'C++' frontend breaks the pipeline with an 1360 - * unnecessary jump, making it work on my 386-33/Trantor T128, the 1361 - * tighter 'C' code breaks and requires this) solves the problem - 1362 - * the 1 us delay is arbitrary, and only used because this delay will 1363 - * be the same on other platforms and since it works here, it should 1364 - * work there. 1365 - * 1366 - * wingel suggests that this could be due to failing to wait 1367 - * one deskew delay. 1368 - */ 1369 - 1370 - udelay(1); 1371 - 1372 - dsprintk(NDEBUG_SELECTION, instance, "selecting target %d\n", scmd_id(cmd)); 1373 - 1374 - /* 1375 - * The SCSI specification calls for a 250 ms timeout for the actual 1376 - * selection. 1377 - */ 1378 - 1379 - err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY, 1380 - msecs_to_jiffies(250)); 1381 - 1382 - if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { 1383 - spin_lock_irq(&hostdata->lock); 1384 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1385 - NCR5380_reselect(instance); 1386 - if (!hostdata->connected) 1387 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1388 - shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n"); 1389 - goto out; 1390 - } 1391 - 1392 - if (err < 0) { 1393 - spin_lock_irq(&hostdata->lock); 1394 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1395 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1396 - /* Can't touch cmd if it has been reclaimed by the scsi ML */ 1397 - if (hostdata->selecting) { 1398 - cmd->result = DID_BAD_TARGET << 16; 1399 - complete_cmd(instance, cmd); 1400 - dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n"); 1401 - cmd = NULL; 1402 - } 1403 - goto out; 1404 - } 1405 - 1406 - /* 1407 - * No less than two deskew delays after the initiator detects the 1408 - * BSY signal is true, it shall release the SEL signal and may 1409 - * change the DATA BUS. -wingel 1410 - */ 1411 - 1412 - udelay(1); 1413 - 1414 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 1415 - 1416 - /* 1417 - * Since we followed the SCSI spec, and raised ATN while SEL 1418 - * was true but before BSY was false during selection, the information 1419 - * transfer phase should be a MESSAGE OUT phase so that we can send the 1420 - * IDENTIFY message. 1421 - * 1422 - * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG 1423 - * message (2 bytes) with a tag ID that we increment with every command 1424 - * until it wraps back to 0. 1425 - * 1426 - * XXX - it turns out that there are some broken SCSI-II devices, 1427 - * which claim to support tagged queuing but fail when more than 1428 - * some number of commands are issued at once. 1429 - */ 1430 - 1431 - /* Wait for start of REQ/ACK handshake */ 1432 - 1433 - err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); 1434 - spin_lock_irq(&hostdata->lock); 1435 - if (err < 0) { 1436 - shost_printk(KERN_ERR, instance, "select: REQ timeout\n"); 1437 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1438 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1439 - goto out; 1440 - } 1441 - if (!hostdata->selecting) { 1442 - do_abort(instance); 1443 - goto out; 1444 - } 1445 - 1446 - dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n", 1447 - scmd_id(cmd)); 1448 - tmp[0] = IDENTIFY(1, cmd->device->lun); 1449 - 1450 - #ifdef SUPPORT_TAGS 1451 - if (cmd->tag != TAG_NONE) { 1452 - tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; 1453 - tmp[2] = cmd->tag; 1454 - len = 3; 1455 - } else 1456 - len = 1; 1457 - #else 1458 - len = 1; 1459 - cmd->tag = 0; 1460 - #endif /* SUPPORT_TAGS */ 1461 - 1462 - /* Send message(s) */ 1463 - data = tmp; 1464 - phase = PHASE_MSGOUT; 1465 - NCR5380_transfer_pio(instance, &phase, &len, &data); 1466 - dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n"); 1467 - /* XXX need to handle errors here */ 1468 - 1469 - hostdata->connected = cmd; 1470 - #ifndef SUPPORT_TAGS 1471 - hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; 1472 - #endif 1473 - #ifdef SUN3_SCSI_VME 1474 - dregs->csr |= CSR_INTR; 1475 - #endif 1476 - 1477 - initialize_SCp(cmd); 1478 - 1479 - cmd = NULL; 1480 - 1481 - out: 1482 - if (!hostdata->selecting) 1483 - return NULL; 1484 - hostdata->selecting = NULL; 1485 - return cmd; 1486 - } 1487 - 1488 - /* 1489 - * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, 1490 - * unsigned char *phase, int *count, unsigned char **data) 1491 - * 1492 - * Purpose : transfers data in given phase using polled I/O 1493 - * 1494 - * Inputs : instance - instance of driver, *phase - pointer to 1495 - * what phase is expected, *count - pointer to number of 1496 - * bytes to transfer, **data - pointer to data pointer. 1497 - * 1498 - * Returns : -1 when different phase is entered without transferring 1499 - * maximum number of bytes, 0 if all bytes are transferred or exit 1500 - * is in same phase. 1501 - * 1502 - * Also, *phase, *count, *data are modified in place. 1503 - * 1504 - * XXX Note : handling for bus free may be useful. 1505 - */ 1506 - 1507 - /* 1508 - * Note : this code is not as quick as it could be, however it 1509 - * IS 100% reliable, and for the actual data transfer where speed 1510 - * counts, we will always do a pseudo DMA or DMA transfer. 1511 - */ 1512 - 1513 - static int NCR5380_transfer_pio(struct Scsi_Host *instance, 1514 - unsigned char *phase, int *count, 1515 - unsigned char **data) 1516 - { 1517 - unsigned char p = *phase, tmp; 1518 - int c = *count; 1519 - unsigned char *d = *data; 1520 - 1521 - /* 1522 - * The NCR5380 chip will only drive the SCSI bus when the 1523 - * phase specified in the appropriate bits of the TARGET COMMAND 1524 - * REGISTER match the STATUS REGISTER 1525 - */ 1526 - 1527 - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); 1528 - 1529 - do { 1530 - /* 1531 - * Wait for assertion of REQ, after which the phase bits will be 1532 - * valid 1533 - */ 1534 - 1535 - if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0) 1536 - break; 1537 - 1538 - dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); 1539 - 1540 - /* Check for phase mismatch */ 1541 - if ((NCR5380_read(STATUS_REG) & PHASE_MASK) != p) { 1542 - dsprintk(NDEBUG_PIO, instance, "phase mismatch\n"); 1543 - NCR5380_dprint_phase(NDEBUG_PIO, instance); 1544 - break; 1545 - } 1546 - 1547 - /* Do actual transfer from SCSI bus to / from memory */ 1548 - if (!(p & SR_IO)) 1549 - NCR5380_write(OUTPUT_DATA_REG, *d); 1550 - else 1551 - *d = NCR5380_read(CURRENT_SCSI_DATA_REG); 1552 - 1553 - ++d; 1554 - 1555 - /* 1556 - * The SCSI standard suggests that in MSGOUT phase, the initiator 1557 - * should drop ATN on the last byte of the message phase 1558 - * after REQ has been asserted for the handshake but before 1559 - * the initiator raises ACK. 1560 - */ 1561 - 1562 - if (!(p & SR_IO)) { 1563 - if (!((p & SR_MSG) && c > 1)) { 1564 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); 1565 - NCR5380_dprint(NDEBUG_PIO, instance); 1566 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1567 - ICR_ASSERT_DATA | ICR_ASSERT_ACK); 1568 - } else { 1569 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1570 - ICR_ASSERT_DATA | ICR_ASSERT_ATN); 1571 - NCR5380_dprint(NDEBUG_PIO, instance); 1572 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1573 - ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); 1574 - } 1575 - } else { 1576 - NCR5380_dprint(NDEBUG_PIO, instance); 1577 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); 1578 - } 1579 - 1580 - if (NCR5380_poll_politely(instance, 1581 - STATUS_REG, SR_REQ, 0, 5 * HZ) < 0) 1582 - break; 1583 - 1584 - dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n"); 1585 - 1586 - /* 1587 - * We have several special cases to consider during REQ/ACK handshaking : 1588 - * 1. We were in MSGOUT phase, and we are on the last byte of the 1589 - * message. ATN must be dropped as ACK is dropped. 1590 - * 1591 - * 2. We are in a MSGIN phase, and we are on the last byte of the 1592 - * message. We must exit with ACK asserted, so that the calling 1593 - * code may raise ATN before dropping ACK to reject the message. 1594 - * 1595 - * 3. ACK and ATN are clear and the target may proceed as normal. 1596 - */ 1597 - if (!(p == PHASE_MSGIN && c == 1)) { 1598 - if (p == PHASE_MSGOUT && c > 1) 1599 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 1600 - else 1601 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1602 - } 1603 - } while (--c); 1604 - 1605 - dsprintk(NDEBUG_PIO, instance, "residual %d\n", c); 1606 - 1607 - *count = c; 1608 - *data = d; 1609 - tmp = NCR5380_read(STATUS_REG); 1610 - /* The phase read from the bus is valid if either REQ is (already) 1611 - * asserted or if ACK hasn't been released yet. The latter applies if 1612 - * we're in MSG IN, DATA IN or STATUS and all bytes have been received. 1613 - */ 1614 - if ((tmp & SR_REQ) || ((tmp & SR_IO) && c == 0)) 1615 - *phase = tmp & PHASE_MASK; 1616 - else 1617 - *phase = PHASE_UNKNOWN; 1618 - 1619 - if (!c || (*phase == p)) 1620 - return 0; 1621 - else 1622 - return -1; 1623 - } 1624 - 1625 - /** 1626 - * do_reset - issue a reset command 1627 - * @instance: adapter to reset 1628 - * 1629 - * Issue a reset sequence to the NCR5380 and try and get the bus 1630 - * back into sane shape. 1631 - * 1632 - * This clears the reset interrupt flag because there may be no handler for 1633 - * it. When the driver is initialized, the NCR5380_intr() handler has not yet 1634 - * been installed. And when in EH we may have released the ST DMA interrupt. 1635 - */ 1636 - 1637 - static void do_reset(struct Scsi_Host *instance) 1638 - { 1639 - unsigned long flags; 1640 - 1641 - local_irq_save(flags); 1642 - NCR5380_write(TARGET_COMMAND_REG, 1643 - PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); 1644 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); 1645 - udelay(50); 1646 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1647 - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1648 - local_irq_restore(flags); 1649 - } 1650 - 1651 - /** 1652 - * do_abort - abort the currently established nexus by going to 1653 - * MESSAGE OUT phase and sending an ABORT message. 1654 - * @instance: relevant scsi host instance 1655 - * 1656 - * Returns 0 on success, -1 on failure. 1657 - */ 1658 - 1659 - static int do_abort(struct Scsi_Host *instance) 1660 - { 1661 - unsigned char *msgptr, phase, tmp; 1662 - int len; 1663 - int rc; 1664 - 1665 - /* Request message out phase */ 1666 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 1667 - 1668 - /* 1669 - * Wait for the target to indicate a valid phase by asserting 1670 - * REQ. Once this happens, we'll have either a MSGOUT phase 1671 - * and can immediately send the ABORT message, or we'll have some 1672 - * other phase and will have to source/sink data. 1673 - * 1674 - * We really don't care what value was on the bus or what value 1675 - * the target sees, so we just handshake. 1676 - */ 1677 - 1678 - rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ); 1679 - if (rc < 0) 1680 - goto timeout; 1681 - 1682 - tmp = NCR5380_read(STATUS_REG) & PHASE_MASK; 1683 - 1684 - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); 1685 - 1686 - if (tmp != PHASE_MSGOUT) { 1687 - NCR5380_write(INITIATOR_COMMAND_REG, 1688 - ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); 1689 - rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ); 1690 - if (rc < 0) 1691 - goto timeout; 1692 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 1693 - } 1694 - 1695 - tmp = ABORT; 1696 - msgptr = &tmp; 1697 - len = 1; 1698 - phase = PHASE_MSGOUT; 1699 - NCR5380_transfer_pio(instance, &phase, &len, &msgptr); 1700 - 1701 - /* 1702 - * If we got here, and the command completed successfully, 1703 - * we're about to go into bus free state. 1704 - */ 1705 - 1706 - return len ? -1 : 0; 1707 - 1708 - timeout: 1709 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1710 - return -1; 1711 - } 1712 - 1713 - #if defined(REAL_DMA) 1714 - /* 1715 - * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, 1716 - * unsigned char *phase, int *count, unsigned char **data) 1717 - * 1718 - * Purpose : transfers data in given phase using either real 1719 - * or pseudo DMA. 1720 - * 1721 - * Inputs : instance - instance of driver, *phase - pointer to 1722 - * what phase is expected, *count - pointer to number of 1723 - * bytes to transfer, **data - pointer to data pointer. 1724 - * 1725 - * Returns : -1 when different phase is entered without transferring 1726 - * maximum number of bytes, 0 if all bytes or transferred or exit 1727 - * is in same phase. 1728 - * 1729 - * Also, *phase, *count, *data are modified in place. 1730 - */ 1731 - 1732 - 1733 - static int NCR5380_transfer_dma(struct Scsi_Host *instance, 1734 - unsigned char *phase, int *count, 1735 - unsigned char **data) 1736 - { 1737 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 1738 - register int c = *count; 1739 - register unsigned char p = *phase; 1740 - 1741 - #if defined(CONFIG_SUN3) 1742 - /* sanity check */ 1743 - if (!sun3_dma_setup_done) { 1744 - pr_err("scsi%d: transfer_dma without setup!\n", 1745 - instance->host_no); 1746 - BUG(); 1747 - } 1748 - hostdata->dma_len = c; 1749 - 1750 - dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", 1751 - (p & SR_IO) ? "receive" : "send", c, *data); 1752 - 1753 - /* netbsd turns off ints here, why not be safe and do it too */ 1754 - 1755 - /* send start chain */ 1756 - sun3scsi_dma_start(c, *data); 1757 - 1758 - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); 1759 - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | 1760 - MR_ENABLE_EOP_INTR); 1761 - if (p & SR_IO) { 1762 - NCR5380_write(INITIATOR_COMMAND_REG, 0); 1763 - NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); 1764 - } else { 1765 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); 1766 - NCR5380_write(START_DMA_SEND_REG, 0); 1767 - } 1768 - 1769 - #ifdef SUN3_SCSI_VME 1770 - dregs->csr |= CSR_DMA_ENABLE; 1771 - #endif 1772 - 1773 - sun3_dma_active = 1; 1774 - 1775 - #else /* !defined(CONFIG_SUN3) */ 1776 - register unsigned char *d = *data; 1777 - unsigned char tmp; 1778 - 1779 - if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { 1780 - *phase = tmp; 1781 - return -1; 1782 - } 1783 - 1784 - if (hostdata->read_overruns && (p & SR_IO)) 1785 - c -= hostdata->read_overruns; 1786 - 1787 - dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", 1788 - (p & SR_IO) ? "receive" : "send", c, d); 1789 - 1790 - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); 1791 - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | 1792 - MR_ENABLE_EOP_INTR); 1793 - 1794 - if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { 1795 - /* On the Medusa, it is a must to initialize the DMA before 1796 - * starting the NCR. This is also the cleaner way for the TT. 1797 - */ 1798 - hostdata->dma_len = (p & SR_IO) ? 1799 - NCR5380_dma_read_setup(instance, d, c) : 1800 - NCR5380_dma_write_setup(instance, d, c); 1801 - } 1802 - 1803 - if (p & SR_IO) 1804 - NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); 1805 - else { 1806 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); 1807 - NCR5380_write(START_DMA_SEND_REG, 0); 1808 - } 1809 - 1810 - if (hostdata->flags & FLAG_LATE_DMA_SETUP) { 1811 - /* On the Falcon, the DMA setup must be done after the last */ 1812 - /* NCR access, else the DMA setup gets trashed! 1813 - */ 1814 - hostdata->dma_len = (p & SR_IO) ? 1815 - NCR5380_dma_read_setup(instance, d, c) : 1816 - NCR5380_dma_write_setup(instance, d, c); 1817 - } 1818 - #endif /* !defined(CONFIG_SUN3) */ 1819 - 1820 - return 0; 1821 - } 1822 - #endif /* defined(REAL_DMA) */ 1823 - 1824 - /* 1825 - * Function : NCR5380_information_transfer (struct Scsi_Host *instance) 1826 - * 1827 - * Purpose : run through the various SCSI phases and do as the target 1828 - * directs us to. Operates on the currently connected command, 1829 - * instance->connected. 1830 - * 1831 - * Inputs : instance, instance for which we are doing commands 1832 - * 1833 - * Side effects : SCSI things happen, the disconnected queue will be 1834 - * modified if a command disconnects, *instance->connected will 1835 - * change. 1836 - * 1837 - * XXX Note : we need to watch for bus free or a reset condition here 1838 - * to recover from an unexpected bus free condition. 1839 - */ 1840 - 1841 - static void NCR5380_information_transfer(struct Scsi_Host *instance) 1842 - { 1843 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 1844 - unsigned char msgout = NOP; 1845 - int sink = 0; 1846 - int len; 1847 - int transfersize; 1848 - unsigned char *data; 1849 - unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; 1850 - struct scsi_cmnd *cmd; 1851 - 1852 - #ifdef SUN3_SCSI_VME 1853 - dregs->csr |= CSR_INTR; 1854 - #endif 1855 - 1856 - while ((cmd = hostdata->connected)) { 1857 - struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); 1858 - 1859 - tmp = NCR5380_read(STATUS_REG); 1860 - /* We only have a valid SCSI phase when REQ is asserted */ 1861 - if (tmp & SR_REQ) { 1862 - phase = (tmp & PHASE_MASK); 1863 - if (phase != old_phase) { 1864 - old_phase = phase; 1865 - NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); 1866 - } 1867 - #if defined(CONFIG_SUN3) 1868 - if (phase == PHASE_CMDOUT) { 1869 - #if defined(REAL_DMA) 1870 - void *d; 1871 - unsigned long count; 1872 - 1873 - if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { 1874 - count = cmd->SCp.buffer->length; 1875 - d = sg_virt(cmd->SCp.buffer); 1876 - } else { 1877 - count = cmd->SCp.this_residual; 1878 - d = cmd->SCp.ptr; 1879 - } 1880 - /* this command setup for dma yet? */ 1881 - if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) { 1882 - if (cmd->request->cmd_type == REQ_TYPE_FS) { 1883 - sun3scsi_dma_setup(instance, d, count, 1884 - rq_data_dir(cmd->request)); 1885 - sun3_dma_setup_done = cmd; 1886 - } 1887 - } 1888 - #endif 1889 - #ifdef SUN3_SCSI_VME 1890 - dregs->csr |= CSR_INTR; 1891 - #endif 1892 - } 1893 - #endif /* CONFIG_SUN3 */ 1894 - 1895 - if (sink && (phase != PHASE_MSGOUT)) { 1896 - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); 1897 - 1898 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | 1899 - ICR_ASSERT_ACK); 1900 - while (NCR5380_read(STATUS_REG) & SR_REQ) 1901 - ; 1902 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1903 - ICR_ASSERT_ATN); 1904 - sink = 0; 1905 - continue; 1906 - } 1907 - 1908 - switch (phase) { 1909 - case PHASE_DATAOUT: 1910 - #if (NDEBUG & NDEBUG_NO_DATAOUT) 1911 - shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n"); 1912 - sink = 1; 1913 - do_abort(instance); 1914 - cmd->result = DID_ERROR << 16; 1915 - complete_cmd(instance, cmd); 1916 - hostdata->connected = NULL; 1917 - return; 1918 - #endif 1919 - case PHASE_DATAIN: 1920 - /* 1921 - * If there is no room left in the current buffer in the 1922 - * scatter-gather list, move onto the next one. 1923 - */ 1924 - 1925 - if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { 1926 - ++cmd->SCp.buffer; 1927 - --cmd->SCp.buffers_residual; 1928 - cmd->SCp.this_residual = cmd->SCp.buffer->length; 1929 - cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 1930 - merge_contiguous_buffers(cmd); 1931 - dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n", 1932 - cmd->SCp.this_residual, 1933 - cmd->SCp.buffers_residual); 1934 - } 1935 - 1936 - /* 1937 - * The preferred transfer method is going to be 1938 - * PSEUDO-DMA for systems that are strictly PIO, 1939 - * since we can let the hardware do the handshaking. 1940 - * 1941 - * For this to work, we need to know the transfersize 1942 - * ahead of time, since the pseudo-DMA code will sit 1943 - * in an unconditional loop. 1944 - */ 1945 - 1946 - /* ++roman: I suggest, this should be 1947 - * #if def(REAL_DMA) 1948 - * instead of leaving REAL_DMA out. 1949 - */ 1950 - 1951 - #if defined(REAL_DMA) 1952 - #if !defined(CONFIG_SUN3) 1953 - transfersize = 0; 1954 - if (!cmd->device->borken) 1955 - #endif 1956 - transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); 1957 - 1958 - if (transfersize >= DMA_MIN_SIZE) { 1959 - len = transfersize; 1960 - cmd->SCp.phase = phase; 1961 - if (NCR5380_transfer_dma(instance, &phase, 1962 - &len, (unsigned char **)&cmd->SCp.ptr)) { 1963 - /* 1964 - * If the watchdog timer fires, all future 1965 - * accesses to this device will use the 1966 - * polled-IO. 1967 - */ 1968 - scmd_printk(KERN_INFO, cmd, 1969 - "switching to slow handshake\n"); 1970 - cmd->device->borken = 1; 1971 - sink = 1; 1972 - do_abort(instance); 1973 - cmd->result = DID_ERROR << 16; 1974 - /* XXX - need to source or sink data here, as appropriate */ 1975 - } else { 1976 - #ifdef REAL_DMA 1977 - /* ++roman: When using real DMA, 1978 - * information_transfer() should return after 1979 - * starting DMA since it has nothing more to 1980 - * do. 1981 - */ 1982 - return; 1983 - #else 1984 - cmd->SCp.this_residual -= transfersize - len; 1985 - #endif 1986 - } 1987 - } else 1988 - #endif /* defined(REAL_DMA) */ 1989 - { 1990 - /* Break up transfer into 3 ms chunks, 1991 - * presuming 6 accesses per handshake. 1992 - */ 1993 - transfersize = min((unsigned long)cmd->SCp.this_residual, 1994 - hostdata->accesses_per_ms / 2); 1995 - len = transfersize; 1996 - NCR5380_transfer_pio(instance, &phase, &len, 1997 - (unsigned char **)&cmd->SCp.ptr); 1998 - cmd->SCp.this_residual -= transfersize - len; 1999 - } 2000 - #if defined(CONFIG_SUN3) && defined(REAL_DMA) 2001 - /* if we had intended to dma that command clear it */ 2002 - if (sun3_dma_setup_done == cmd) 2003 - sun3_dma_setup_done = NULL; 2004 - #endif 2005 - return; 2006 - case PHASE_MSGIN: 2007 - len = 1; 2008 - data = &tmp; 2009 - NCR5380_transfer_pio(instance, &phase, &len, &data); 2010 - cmd->SCp.Message = tmp; 2011 - 2012 - switch (tmp) { 2013 - case ABORT: 2014 - case COMMAND_COMPLETE: 2015 - /* Accept message by clearing ACK */ 2016 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2017 - dsprintk(NDEBUG_QUEUES, instance, 2018 - "COMMAND COMPLETE %p target %d lun %llu\n", 2019 - cmd, scmd_id(cmd), cmd->device->lun); 2020 - 2021 - hostdata->connected = NULL; 2022 - #ifdef SUPPORT_TAGS 2023 - cmd_free_tag(cmd); 2024 - if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { 2025 - u8 lun = cmd->device->lun; 2026 - struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; 2027 - 2028 - dsprintk(NDEBUG_TAGS, instance, 2029 - "QUEUE_FULL %p target %d lun %d nr_allocated %d\n", 2030 - cmd, scmd_id(cmd), lun, ta->nr_allocated); 2031 - if (ta->queue_size > ta->nr_allocated) 2032 - ta->queue_size = ta->nr_allocated; 2033 - } 2034 - #endif 2035 - 2036 - cmd->result &= ~0xffff; 2037 - cmd->result |= cmd->SCp.Status; 2038 - cmd->result |= cmd->SCp.Message << 8; 2039 - 2040 - if (cmd->cmnd[0] == REQUEST_SENSE) 2041 - complete_cmd(instance, cmd); 2042 - else { 2043 - if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION || 2044 - cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) { 2045 - dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n", 2046 - cmd); 2047 - list_add_tail(&ncmd->list, 2048 - &hostdata->autosense); 2049 - } else 2050 - complete_cmd(instance, cmd); 2051 - } 2052 - 2053 - /* 2054 - * Restore phase bits to 0 so an interrupted selection, 2055 - * arbitration can resume. 2056 - */ 2057 - NCR5380_write(TARGET_COMMAND_REG, 0); 2058 - 2059 - /* Enable reselect interrupts */ 2060 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2061 - 2062 - maybe_release_dma_irq(instance); 2063 - return; 2064 - case MESSAGE_REJECT: 2065 - /* Accept message by clearing ACK */ 2066 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2067 - switch (hostdata->last_message) { 2068 - case HEAD_OF_QUEUE_TAG: 2069 - case ORDERED_QUEUE_TAG: 2070 - case SIMPLE_QUEUE_TAG: 2071 - /* The target obviously doesn't support tagged 2072 - * queuing, even though it announced this ability in 2073 - * its INQUIRY data ?!? (maybe only this LUN?) Ok, 2074 - * clear 'tagged_supported' and lock the LUN, since 2075 - * the command is treated as untagged further on. 2076 - */ 2077 - cmd->device->tagged_supported = 0; 2078 - hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 2079 - cmd->tag = TAG_NONE; 2080 - dsprintk(NDEBUG_TAGS, instance, "target %d lun %llu rejected QUEUE_TAG message; tagged queuing disabled\n", 2081 - scmd_id(cmd), cmd->device->lun); 2082 - break; 2083 - } 2084 - break; 2085 - case DISCONNECT: 2086 - /* Accept message by clearing ACK */ 2087 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2088 - hostdata->connected = NULL; 2089 - list_add(&ncmd->list, &hostdata->disconnected); 2090 - dsprintk(NDEBUG_INFORMATION | NDEBUG_QUEUES, 2091 - instance, "connected command %p for target %d lun %llu moved to disconnected queue\n", 2092 - cmd, scmd_id(cmd), cmd->device->lun); 2093 - 2094 - /* 2095 - * Restore phase bits to 0 so an interrupted selection, 2096 - * arbitration can resume. 2097 - */ 2098 - NCR5380_write(TARGET_COMMAND_REG, 0); 2099 - 2100 - /* Enable reselect interrupts */ 2101 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2102 - #ifdef SUN3_SCSI_VME 2103 - dregs->csr |= CSR_DMA_ENABLE; 2104 - #endif 2105 - return; 2106 - /* 2107 - * The SCSI data pointer is *IMPLICITLY* saved on a disconnect 2108 - * operation, in violation of the SCSI spec so we can safely 2109 - * ignore SAVE/RESTORE pointers calls. 2110 - * 2111 - * Unfortunately, some disks violate the SCSI spec and 2112 - * don't issue the required SAVE_POINTERS message before 2113 - * disconnecting, and we have to break spec to remain 2114 - * compatible. 2115 - */ 2116 - case SAVE_POINTERS: 2117 - case RESTORE_POINTERS: 2118 - /* Accept message by clearing ACK */ 2119 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2120 - break; 2121 - case EXTENDED_MESSAGE: 2122 - /* 2123 - * Start the message buffer with the EXTENDED_MESSAGE 2124 - * byte, since spi_print_msg() wants the whole thing. 2125 - */ 2126 - extended_msg[0] = EXTENDED_MESSAGE; 2127 - /* Accept first byte by clearing ACK */ 2128 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2129 - 2130 - spin_unlock_irq(&hostdata->lock); 2131 - 2132 - dsprintk(NDEBUG_EXTENDED, instance, "receiving extended message\n"); 2133 - 2134 - len = 2; 2135 - data = extended_msg + 1; 2136 - phase = PHASE_MSGIN; 2137 - NCR5380_transfer_pio(instance, &phase, &len, &data); 2138 - dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n", 2139 - (int)extended_msg[1], 2140 - (int)extended_msg[2]); 2141 - 2142 - if (!len && extended_msg[1] > 0 && 2143 - extended_msg[1] <= sizeof(extended_msg) - 2) { 2144 - /* Accept third byte by clearing ACK */ 2145 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2146 - len = extended_msg[1] - 1; 2147 - data = extended_msg + 3; 2148 - phase = PHASE_MSGIN; 2149 - 2150 - NCR5380_transfer_pio(instance, &phase, &len, &data); 2151 - dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n", 2152 - len); 2153 - 2154 - switch (extended_msg[2]) { 2155 - case EXTENDED_SDTR: 2156 - case EXTENDED_WDTR: 2157 - case EXTENDED_MODIFY_DATA_POINTER: 2158 - case EXTENDED_EXTENDED_IDENTIFY: 2159 - tmp = 0; 2160 - } 2161 - } else if (len) { 2162 - shost_printk(KERN_ERR, instance, "error receiving extended message\n"); 2163 - tmp = 0; 2164 - } else { 2165 - shost_printk(KERN_NOTICE, instance, "extended message code %02x length %d is too long\n", 2166 - extended_msg[2], extended_msg[1]); 2167 - tmp = 0; 2168 - } 2169 - 2170 - spin_lock_irq(&hostdata->lock); 2171 - if (!hostdata->connected) 2172 - return; 2173 - 2174 - /* Fall through to reject message */ 2175 - 2176 - /* 2177 - * If we get something weird that we aren't expecting, 2178 - * reject it. 2179 - */ 2180 - default: 2181 - if (!tmp) { 2182 - shost_printk(KERN_ERR, instance, "rejecting message "); 2183 - spi_print_msg(extended_msg); 2184 - printk("\n"); 2185 - } else if (tmp != EXTENDED_MESSAGE) 2186 - scmd_printk(KERN_INFO, cmd, 2187 - "rejecting unknown message %02x\n", 2188 - tmp); 2189 - else 2190 - scmd_printk(KERN_INFO, cmd, 2191 - "rejecting unknown extended message code %02x, length %d\n", 2192 - extended_msg[1], extended_msg[0]); 2193 - 2194 - msgout = MESSAGE_REJECT; 2195 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 2196 - break; 2197 - } /* switch (tmp) */ 2198 - break; 2199 - case PHASE_MSGOUT: 2200 - len = 1; 2201 - data = &msgout; 2202 - hostdata->last_message = msgout; 2203 - NCR5380_transfer_pio(instance, &phase, &len, &data); 2204 - if (msgout == ABORT) { 2205 - hostdata->connected = NULL; 2206 - cmd->result = DID_ERROR << 16; 2207 - complete_cmd(instance, cmd); 2208 - maybe_release_dma_irq(instance); 2209 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2210 - return; 2211 - } 2212 - msgout = NOP; 2213 - break; 2214 - case PHASE_CMDOUT: 2215 - len = cmd->cmd_len; 2216 - data = cmd->cmnd; 2217 - /* 2218 - * XXX for performance reasons, on machines with a 2219 - * PSEUDO-DMA architecture we should probably 2220 - * use the dma transfer function. 2221 - */ 2222 - NCR5380_transfer_pio(instance, &phase, &len, &data); 2223 - break; 2224 - case PHASE_STATIN: 2225 - len = 1; 2226 - data = &tmp; 2227 - NCR5380_transfer_pio(instance, &phase, &len, &data); 2228 - cmd->SCp.Status = tmp; 2229 - break; 2230 - default: 2231 - shost_printk(KERN_ERR, instance, "unknown phase\n"); 2232 - NCR5380_dprint(NDEBUG_ANY, instance); 2233 - } /* switch(phase) */ 2234 - } else { 2235 - spin_unlock_irq(&hostdata->lock); 2236 - NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); 2237 - spin_lock_irq(&hostdata->lock); 2238 - } 2239 - } 2240 - } 2241 - 2242 - /* 2243 - * Function : void NCR5380_reselect (struct Scsi_Host *instance) 2244 - * 2245 - * Purpose : does reselection, initializing the instance->connected 2246 - * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q 2247 - * nexus has been reestablished, 2248 - * 2249 - * Inputs : instance - this instance of the NCR5380. 2250 - */ 2251 - 2252 - 2253 - /* it might eventually prove necessary to do a dma setup on 2254 - reselection, but it doesn't seem to be needed now -- sam */ 2255 - 2256 - static void NCR5380_reselect(struct Scsi_Host *instance) 2257 - { 2258 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 2259 - unsigned char target_mask; 2260 - unsigned char lun; 2261 - #ifdef SUPPORT_TAGS 2262 - unsigned char tag; 2263 - #endif 2264 - unsigned char msg[3]; 2265 - int __maybe_unused len; 2266 - unsigned char __maybe_unused *data, __maybe_unused phase; 2267 - struct NCR5380_cmd *ncmd; 2268 - struct scsi_cmnd *tmp; 2269 - 2270 - /* 2271 - * Disable arbitration, etc. since the host adapter obviously 2272 - * lost, and tell an interrupted NCR5380_select() to restart. 2273 - */ 2274 - 2275 - NCR5380_write(MODE_REG, MR_BASE); 2276 - 2277 - target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); 2278 - 2279 - dsprintk(NDEBUG_RESELECTION, instance, "reselect\n"); 2280 - 2281 - /* 2282 - * At this point, we have detected that our SCSI ID is on the bus, 2283 - * SEL is true and BSY was false for at least one bus settle delay 2284 - * (400 ns). 2285 - * 2286 - * We must assert BSY ourselves, until the target drops the SEL 2287 - * signal. 2288 - */ 2289 - 2290 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); 2291 - if (NCR5380_poll_politely(instance, 2292 - STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { 2293 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2294 - return; 2295 - } 2296 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2297 - 2298 - /* 2299 - * Wait for target to go into MSGIN. 2300 - */ 2301 - 2302 - if (NCR5380_poll_politely(instance, 2303 - STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { 2304 - do_abort(instance); 2305 - return; 2306 - } 2307 - 2308 - #if defined(CONFIG_SUN3) && defined(REAL_DMA) 2309 - /* acknowledge toggle to MSGIN */ 2310 - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); 2311 - 2312 - /* peek at the byte without really hitting the bus */ 2313 - msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); 2314 - #else 2315 - len = 1; 2316 - data = msg; 2317 - phase = PHASE_MSGIN; 2318 - NCR5380_transfer_pio(instance, &phase, &len, &data); 2319 - 2320 - if (len) { 2321 - do_abort(instance); 2322 - return; 2323 - } 2324 - #endif 2325 - 2326 - if (!(msg[0] & 0x80)) { 2327 - shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); 2328 - spi_print_msg(msg); 2329 - printk("\n"); 2330 - do_abort(instance); 2331 - return; 2332 - } 2333 - lun = msg[0] & 0x07; 2334 - 2335 - #if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3) 2336 - /* If the phase is still MSGIN, the target wants to send some more 2337 - * messages. In case it supports tagged queuing, this is probably a 2338 - * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. 2339 - */ 2340 - tag = TAG_NONE; 2341 - if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) { 2342 - /* Accept previous IDENTIFY message by clearing ACK */ 2343 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2344 - len = 2; 2345 - data = msg + 1; 2346 - if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && 2347 - msg[1] == SIMPLE_QUEUE_TAG) 2348 - tag = msg[2]; 2349 - dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n", 2350 - target_mask, lun, tag); 2351 - } 2352 - #endif 2353 - 2354 - /* 2355 - * Find the command corresponding to the I_T_L or I_T_L_Q nexus we 2356 - * just reestablished, and remove it from the disconnected queue. 2357 - */ 2358 - 2359 - tmp = NULL; 2360 - list_for_each_entry(ncmd, &hostdata->disconnected, list) { 2361 - struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); 2362 - 2363 - if (target_mask == (1 << scmd_id(cmd)) && 2364 - lun == (u8)cmd->device->lun 2365 - #ifdef SUPPORT_TAGS 2366 - && (tag == cmd->tag) 2367 - #endif 2368 - ) { 2369 - list_del(&ncmd->list); 2370 - tmp = cmd; 2371 - break; 2372 - } 2373 - } 2374 - 2375 - if (tmp) { 2376 - dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, 2377 - "reselect: removed %p from disconnected queue\n", tmp); 2378 - } else { 2379 - 2380 - #ifdef SUPPORT_TAGS 2381 - shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d tag %d not in disconnected queue.\n", 2382 - target_mask, lun, tag); 2383 - #else 2384 - shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", 2385 - target_mask, lun); 2386 - #endif 2387 - /* 2388 - * Since we have an established nexus that we can't do anything 2389 - * with, we must abort it. 2390 - */ 2391 - do_abort(instance); 2392 - return; 2393 - } 2394 - 2395 - #if defined(CONFIG_SUN3) && defined(REAL_DMA) 2396 - /* engage dma setup for the command we just saw */ 2397 - { 2398 - void *d; 2399 - unsigned long count; 2400 - 2401 - if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { 2402 - count = tmp->SCp.buffer->length; 2403 - d = sg_virt(tmp->SCp.buffer); 2404 - } else { 2405 - count = tmp->SCp.this_residual; 2406 - d = tmp->SCp.ptr; 2407 - } 2408 - /* setup this command for dma if not already */ 2409 - if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) { 2410 - sun3scsi_dma_setup(instance, d, count, 2411 - rq_data_dir(tmp->request)); 2412 - sun3_dma_setup_done = tmp; 2413 - } 2414 - } 2415 - 2416 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); 2417 - #endif 2418 - 2419 - /* Accept message by clearing ACK */ 2420 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2421 - 2422 - #if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3) 2423 - /* If the phase is still MSGIN, the target wants to send some more 2424 - * messages. In case it supports tagged queuing, this is probably a 2425 - * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. 2426 - */ 2427 - tag = TAG_NONE; 2428 - if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { 2429 - /* Accept previous IDENTIFY message by clearing ACK */ 2430 - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2431 - len = 2; 2432 - data = msg + 1; 2433 - if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && 2434 - msg[1] == SIMPLE_QUEUE_TAG) 2435 - tag = msg[2]; 2436 - dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n" 2437 - target_mask, lun, tag); 2438 - } 2439 - #endif 2440 - 2441 - hostdata->connected = tmp; 2442 - dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n", 2443 - scmd_id(tmp), tmp->device->lun, tmp->tag); 2444 - } 2445 - 2446 - 2447 - /** 2448 - * list_find_cmd - test for presence of a command in a linked list 2449 - * @haystack: list of commands 2450 - * @needle: command to search for 2451 - */ 2452 - 2453 - static bool list_find_cmd(struct list_head *haystack, 2454 - struct scsi_cmnd *needle) 2455 - { 2456 - struct NCR5380_cmd *ncmd; 2457 - 2458 - list_for_each_entry(ncmd, haystack, list) 2459 - if (NCR5380_to_scmd(ncmd) == needle) 2460 - return true; 2461 - return false; 2462 - } 2463 - 2464 - /** 2465 - * list_remove_cmd - remove a command from linked list 2466 - * @haystack: list of commands 2467 - * @needle: command to remove 2468 - */ 2469 - 2470 - static bool list_del_cmd(struct list_head *haystack, 2471 - struct scsi_cmnd *needle) 2472 - { 2473 - if (list_find_cmd(haystack, needle)) { 2474 - struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle); 2475 - 2476 - list_del(&ncmd->list); 2477 - return true; 2478 - } 2479 - return false; 2480 - } 2481 - 2482 - /** 2483 - * NCR5380_abort - scsi host eh_abort_handler() method 2484 - * @cmd: the command to be aborted 2485 - * 2486 - * Try to abort a given command by removing it from queues and/or sending 2487 - * the target an abort message. This may not succeed in causing a target 2488 - * to abort the command. Nonetheless, the low-level driver must forget about 2489 - * the command because the mid-layer reclaims it and it may be re-issued. 2490 - * 2491 - * The normal path taken by a command is as follows. For EH we trace this 2492 - * same path to locate and abort the command. 2493 - * 2494 - * unissued -> selecting -> [unissued -> selecting ->]... connected -> 2495 - * [disconnected -> connected ->]... 2496 - * [autosense -> connected ->] done 2497 - * 2498 - * If cmd was not found at all then presumably it has already been completed, 2499 - * in which case return SUCCESS to try to avoid further EH measures. 2500 - * 2501 - * If the command has not completed yet, we must not fail to find it. 2502 - * We have no option but to forget the aborted command (even if it still 2503 - * lacks sense data). The mid-layer may re-issue a command that is in error 2504 - * recovery (see scsi_send_eh_cmnd), but the logic and data structures in 2505 - * this driver are such that a command can appear on one queue only. 2506 - * 2507 - * The lock protects driver data structures, but EH handlers also use it 2508 - * to serialize their own execution and prevent their own re-entry. 2509 - */ 2510 - 2511 - static int NCR5380_abort(struct scsi_cmnd *cmd) 2512 - { 2513 - struct Scsi_Host *instance = cmd->device->host; 2514 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 2515 - unsigned long flags; 2516 - int result = SUCCESS; 2517 - 2518 - spin_lock_irqsave(&hostdata->lock, flags); 2519 - 2520 - #if (NDEBUG & NDEBUG_ANY) 2521 - scmd_printk(KERN_INFO, cmd, __func__); 2522 - #endif 2523 - NCR5380_dprint(NDEBUG_ANY, instance); 2524 - NCR5380_dprint_phase(NDEBUG_ANY, instance); 2525 - 2526 - if (list_del_cmd(&hostdata->unissued, cmd)) { 2527 - dsprintk(NDEBUG_ABORT, instance, 2528 - "abort: removed %p from issue queue\n", cmd); 2529 - cmd->result = DID_ABORT << 16; 2530 - cmd->scsi_done(cmd); /* No tag or busy flag to worry about */ 2531 - goto out; 2532 - } 2533 - 2534 - if (hostdata->selecting == cmd) { 2535 - dsprintk(NDEBUG_ABORT, instance, 2536 - "abort: cmd %p == selecting\n", cmd); 2537 - hostdata->selecting = NULL; 2538 - cmd->result = DID_ABORT << 16; 2539 - complete_cmd(instance, cmd); 2540 - goto out; 2541 - } 2542 - 2543 - if (list_del_cmd(&hostdata->disconnected, cmd)) { 2544 - dsprintk(NDEBUG_ABORT, instance, 2545 - "abort: removed %p from disconnected list\n", cmd); 2546 - /* Can't call NCR5380_select() and send ABORT because that 2547 - * means releasing the lock. Need a bus reset. 2548 - */ 2549 - set_host_byte(cmd, DID_ERROR); 2550 - complete_cmd(instance, cmd); 2551 - result = FAILED; 2552 - goto out; 2553 - } 2554 - 2555 - if (hostdata->connected == cmd) { 2556 - dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); 2557 - hostdata->connected = NULL; 2558 - #ifdef REAL_DMA 2559 - hostdata->dma_len = 0; 2560 - #endif 2561 - if (do_abort(instance)) { 2562 - set_host_byte(cmd, DID_ERROR); 2563 - complete_cmd(instance, cmd); 2564 - result = FAILED; 2565 - goto out; 2566 - } 2567 - set_host_byte(cmd, DID_ABORT); 2568 - complete_cmd(instance, cmd); 2569 - goto out; 2570 - } 2571 - 2572 - if (list_del_cmd(&hostdata->autosense, cmd)) { 2573 - dsprintk(NDEBUG_ABORT, instance, 2574 - "abort: removed %p from sense queue\n", cmd); 2575 - set_host_byte(cmd, DID_ERROR); 2576 - complete_cmd(instance, cmd); 2577 - } 2578 - 2579 - out: 2580 - if (result == FAILED) 2581 - dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); 2582 - else 2583 - dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); 2584 - 2585 - queue_work(hostdata->work_q, &hostdata->main_task); 2586 - maybe_release_dma_irq(instance); 2587 - spin_unlock_irqrestore(&hostdata->lock, flags); 2588 - 2589 - return result; 2590 - } 2591 - 2592 - 2593 - /** 2594 - * NCR5380_bus_reset - reset the SCSI bus 2595 - * @cmd: SCSI command undergoing EH 2596 - * 2597 - * Returns SUCCESS 2598 - */ 2599 - 2600 - static int NCR5380_bus_reset(struct scsi_cmnd *cmd) 2601 - { 2602 - struct Scsi_Host *instance = cmd->device->host; 2603 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 2604 - int i; 2605 - unsigned long flags; 2606 - struct NCR5380_cmd *ncmd; 2607 - 2608 - spin_lock_irqsave(&hostdata->lock, flags); 2609 - 2610 - #if (NDEBUG & NDEBUG_ANY) 2611 - scmd_printk(KERN_INFO, cmd, __func__); 2612 - #endif 2613 - NCR5380_dprint(NDEBUG_ANY, instance); 2614 - NCR5380_dprint_phase(NDEBUG_ANY, instance); 2615 - 2616 - do_reset(instance); 2617 - 2618 - /* reset NCR registers */ 2619 - NCR5380_write(MODE_REG, MR_BASE); 2620 - NCR5380_write(TARGET_COMMAND_REG, 0); 2621 - NCR5380_write(SELECT_ENABLE_REG, 0); 2622 - 2623 - /* After the reset, there are no more connected or disconnected commands 2624 - * and no busy units; so clear the low-level status here to avoid 2625 - * conflicts when the mid-level code tries to wake up the affected 2626 - * commands! 2627 - */ 2628 - 2629 - if (list_del_cmd(&hostdata->unissued, cmd)) { 2630 - cmd->result = DID_RESET << 16; 2631 - cmd->scsi_done(cmd); 2632 - } 2633 - 2634 - if (hostdata->selecting) { 2635 - hostdata->selecting->result = DID_RESET << 16; 2636 - complete_cmd(instance, hostdata->selecting); 2637 - hostdata->selecting = NULL; 2638 - } 2639 - 2640 - list_for_each_entry(ncmd, &hostdata->disconnected, list) { 2641 - struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); 2642 - 2643 - set_host_byte(cmd, DID_RESET); 2644 - cmd->scsi_done(cmd); 2645 - } 2646 - INIT_LIST_HEAD(&hostdata->disconnected); 2647 - 2648 - list_for_each_entry(ncmd, &hostdata->autosense, list) { 2649 - struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); 2650 - 2651 - set_host_byte(cmd, DID_RESET); 2652 - cmd->scsi_done(cmd); 2653 - } 2654 - INIT_LIST_HEAD(&hostdata->autosense); 2655 - 2656 - if (hostdata->connected) { 2657 - set_host_byte(hostdata->connected, DID_RESET); 2658 - complete_cmd(instance, hostdata->connected); 2659 - hostdata->connected = NULL; 2660 - } 2661 - 2662 - #ifdef SUPPORT_TAGS 2663 - free_all_tags(hostdata); 2664 - #endif 2665 - for (i = 0; i < 8; ++i) 2666 - hostdata->busy[i] = 0; 2667 - #ifdef REAL_DMA 2668 - hostdata->dma_len = 0; 2669 - #endif 2670 - 2671 - queue_work(hostdata->work_q, &hostdata->main_task); 2672 - maybe_release_dma_irq(instance); 2673 - spin_unlock_irqrestore(&hostdata->lock, flags); 2674 - 2675 - return SUCCESS; 2676 - }
+33 -111
drivers/scsi/atari_scsi.c
··· 14 14 * 15 15 */ 16 16 17 - 18 - /**************************************************************************/ 19 - /* */ 20 - /* Notes for Falcon SCSI: */ 21 - /* ---------------------- */ 22 - /* */ 23 - /* Since the Falcon SCSI uses the ST-DMA chip, that is shared among */ 24 - /* several device drivers, locking and unlocking the access to this */ 25 - /* chip is required. But locking is not possible from an interrupt, */ 26 - /* since it puts the process to sleep if the lock is not available. */ 27 - /* This prevents "late" locking of the DMA chip, i.e. locking it just */ 28 - /* before using it, since in case of disconnection-reconnection */ 29 - /* commands, the DMA is started from the reselection interrupt. */ 30 - /* */ 31 - /* Two possible schemes for ST-DMA-locking would be: */ 32 - /* 1) The lock is taken for each command separately and disconnecting */ 33 - /* is forbidden (i.e. can_queue = 1). */ 34 - /* 2) The DMA chip is locked when the first command comes in and */ 35 - /* released when the last command is finished and all queues are */ 36 - /* empty. */ 37 - /* The first alternative would result in bad performance, since the */ 38 - /* interleaving of commands would not be used. The second is unfair to */ 39 - /* other drivers using the ST-DMA, because the queues will seldom be */ 40 - /* totally empty if there is a lot of disk traffic. */ 41 - /* */ 42 - /* For this reasons I decided to employ a more elaborate scheme: */ 43 - /* - First, we give up the lock every time we can (for fairness), this */ 44 - /* means every time a command finishes and there are no other commands */ 45 - /* on the disconnected queue. */ 46 - /* - If there are others waiting to lock the DMA chip, we stop */ 47 - /* issuing commands, i.e. moving them onto the issue queue. */ 48 - /* Because of that, the disconnected queue will run empty in a */ 49 - /* while. Instead we go to sleep on a 'fairness_queue'. */ 50 - /* - If the lock is released, all processes waiting on the fairness */ 51 - /* queue will be woken. The first of them tries to re-lock the DMA, */ 52 - /* the others wait for the first to finish this task. After that, */ 53 - /* they can all run on and do their commands... */ 54 - /* This sounds complicated (and it is it :-(), but it seems to be a */ 55 - /* good compromise between fairness and performance: As long as no one */ 56 - /* else wants to work with the ST-DMA chip, SCSI can go along as */ 57 - /* usual. If now someone else comes, this behaviour is changed to a */ 58 - /* "fairness mode": just already initiated commands are finished and */ 59 - /* then the lock is released. The other one waiting will probably win */ 60 - /* the race for locking the DMA, since it was waiting for longer. And */ 61 - /* after it has finished, SCSI can go ahead again. Finally: I hope I */ 62 - /* have not produced any deadlock possibilities! */ 63 - /* */ 64 - /**************************************************************************/ 65 - 17 + /* 18 + * Notes for Falcon SCSI DMA 19 + * 20 + * The 5380 device is one of several that all share the DMA chip. Hence 21 + * "locking" and "unlocking" access to this chip is required. 22 + * 23 + * Two possible schemes for ST DMA acquisition by atari_scsi are: 24 + * 1) The lock is taken for each command separately (i.e. can_queue == 1). 25 + * 2) The lock is taken when the first command arrives and released 26 + * when the last command is finished (i.e. can_queue > 1). 27 + * 28 + * The first alternative limits SCSI bus utilization, since interleaving 29 + * commands is not possible. The second gives better performance but is 30 + * unfair to other drivers needing to use the ST DMA chip. In order to 31 + * allow the IDE and floppy drivers equal access to the ST DMA chip 32 + * the default is can_queue == 1. 33 + */ 66 34 67 35 #include <linux/module.h> 68 36 #include <linux/types.h> ··· 51 83 52 84 #include <scsi/scsi_host.h> 53 85 54 - /* Definitions for the core NCR5380 driver. */ 55 - 56 - #define REAL_DMA 57 - #define SUPPORT_TAGS 58 - #define MAX_TAGS 32 59 86 #define DMA_MIN_SIZE 32 87 + 88 + /* Definitions for the core NCR5380 driver. */ 60 89 61 90 #define NCR5380_implementation_fields /* none */ 62 91 ··· 64 99 #define NCR5380_abort atari_scsi_abort 65 100 #define NCR5380_info atari_scsi_info 66 101 67 - #define NCR5380_dma_read_setup(instance, data, count) \ 102 + #define NCR5380_dma_recv_setup(instance, data, count) \ 68 103 atari_scsi_dma_setup(instance, data, count, 0) 69 - #define NCR5380_dma_write_setup(instance, data, count) \ 104 + #define NCR5380_dma_send_setup(instance, data, count) \ 70 105 atari_scsi_dma_setup(instance, data, count, 1) 71 106 #define NCR5380_dma_residual(instance) \ 72 107 atari_scsi_dma_residual(instance) ··· 124 159 return adr; 125 160 } 126 161 127 - #ifdef REAL_DMA 128 162 static void atari_scsi_fetch_restbytes(void); 129 - #endif 130 163 131 164 static unsigned char (*atari_scsi_reg_read)(unsigned char reg); 132 165 static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value); 133 166 134 - #ifdef REAL_DMA 135 167 static unsigned long atari_dma_residual, atari_dma_startaddr; 136 168 static short atari_dma_active; 137 169 /* pointer to the dribble buffer */ ··· 147 185 /* mask for address bits that can't be used with the ST-DMA */ 148 186 static unsigned long atari_dma_stram_mask; 149 187 #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) 150 - #endif 151 188 152 189 static int setup_can_queue = -1; 153 190 module_param(setup_can_queue, int, 0); ··· 154 193 module_param(setup_cmd_per_lun, int, 0); 155 194 static int setup_sg_tablesize = -1; 156 195 module_param(setup_sg_tablesize, int, 0); 157 - static int setup_use_tagged_queuing = -1; 158 - module_param(setup_use_tagged_queuing, int, 0); 159 196 static int setup_hostid = -1; 160 197 module_param(setup_hostid, int, 0); 161 198 static int setup_toshiba_delay = -1; 162 199 module_param(setup_toshiba_delay, int, 0); 163 200 164 - 165 - #if defined(REAL_DMA) 166 201 167 202 static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) 168 203 { ··· 212 255 } 213 256 #endif 214 257 215 - #endif 216 - 217 258 218 259 static irqreturn_t scsi_tt_intr(int irq, void *dev) 219 260 { 220 - #ifdef REAL_DMA 221 261 struct Scsi_Host *instance = dev; 222 262 struct NCR5380_hostdata *hostdata = shost_priv(instance); 223 263 int dma_stat; ··· 296 342 tt_scsi_dma.dma_ctrl = 0; 297 343 } 298 344 299 - #endif /* REAL_DMA */ 300 - 301 345 NCR5380_intr(irq, dev); 302 346 303 347 return IRQ_HANDLED; ··· 304 352 305 353 static irqreturn_t scsi_falcon_intr(int irq, void *dev) 306 354 { 307 - #ifdef REAL_DMA 308 355 struct Scsi_Host *instance = dev; 309 356 struct NCR5380_hostdata *hostdata = shost_priv(instance); 310 357 int dma_stat; ··· 356 405 atari_dma_orig_addr = NULL; 357 406 } 358 407 359 - #endif /* REAL_DMA */ 360 - 361 408 NCR5380_intr(irq, dev); 362 409 363 410 return IRQ_HANDLED; 364 411 } 365 412 366 413 367 - #ifdef REAL_DMA 368 414 static void atari_scsi_fetch_restbytes(void) 369 415 { 370 416 int nr; ··· 384 436 *dst++ = *src++; 385 437 } 386 438 } 387 - #endif /* REAL_DMA */ 388 439 389 440 390 441 /* This function releases the lock on the DMA chip if there is no ··· 409 462 static int falcon_get_lock(struct Scsi_Host *instance) 410 463 { 411 464 if (IS_A_TT()) 465 + return 1; 466 + 467 + if (stdma_is_locked_by(scsi_falcon_intr) && 468 + instance->hostt->can_queue > 1) 412 469 return 1; 413 470 414 471 if (in_interrupt()) ··· 446 495 setup_sg_tablesize = ints[3]; 447 496 if (ints[0] >= 4) 448 497 setup_hostid = ints[4]; 449 - if (ints[0] >= 5) 450 - setup_use_tagged_queuing = ints[5]; 498 + /* ints[5] (use_tagged_queuing) is ignored */ 451 499 /* ints[6] (use_pdma) is ignored */ 452 500 if (ints[0] >= 7) 453 501 setup_toshiba_delay = ints[7]; ··· 457 507 __setup("atascsi=", atari_scsi_setup); 458 508 #endif /* !MODULE */ 459 509 460 - 461 - #if defined(REAL_DMA) 462 510 463 511 static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, 464 512 void *data, unsigned long count, ··· 492 544 * because the hardware does bus snooping (fine!). 493 545 */ 494 546 dma_cache_maintenance(addr, count, dir); 495 - 496 - if (count == 0) 497 - printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n"); 498 547 499 548 if (IS_A_TT()) { 500 549 tt_scsi_dma.dma_ctrl = dir; ··· 568 623 struct scsi_cmnd *cmd, int write_flag) 569 624 { 570 625 unsigned long possible_len, limit; 626 + 627 + if (wanted_len < DMA_MIN_SIZE) 628 + return 0; 571 629 572 630 if (IS_A_TT()) 573 631 /* TT SCSI DMA can transfer arbitrary #bytes */ ··· 651 703 } 652 704 653 705 654 - #endif /* REAL_DMA */ 655 - 656 - 657 706 /* NCR5380 register access functions 658 707 * 659 708 * There are separate functions for TT and Falcon, because the access ··· 681 736 } 682 737 683 738 684 - #include "atari_NCR5380.c" 739 + #include "NCR5380.c" 685 740 686 741 static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) 687 742 { ··· 690 745 691 746 local_irq_save(flags); 692 747 693 - #ifdef REAL_DMA 694 748 /* Abort a maybe active DMA transfer */ 695 749 if (IS_A_TT()) { 696 750 tt_scsi_dma.dma_ctrl = 0; ··· 698 754 atari_dma_active = 0; 699 755 atari_dma_orig_addr = NULL; 700 756 } 701 - #endif 702 757 703 758 rv = NCR5380_bus_reset(cmd); 704 759 ··· 724 781 .eh_abort_handler = atari_scsi_abort, 725 782 .eh_bus_reset_handler = atari_scsi_bus_reset, 726 783 .this_id = 7, 784 + .cmd_per_lun = 2, 727 785 .use_clustering = DISABLE_CLUSTERING, 728 786 .cmd_size = NCR5380_CMD_SIZE, 729 787 }; ··· 748 804 atari_scsi_reg_write = atari_scsi_falcon_reg_write; 749 805 } 750 806 751 - /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. 752 - * Higher values should work, too; try it! 753 - * (But cmd_per_lun costs memory!) 754 - * 755 - * But there seems to be a bug somewhere that requires CAN_QUEUE to be 756 - * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since 757 - * changed CMD_PER_LUN... 758 - * 759 - * Note: The Falcon currently uses 8/1 setting due to unsolved problems 760 - * with cmd_per_lun != 1 761 - */ 762 807 if (ATARIHW_PRESENT(TT_SCSI)) { 763 808 atari_scsi_template.can_queue = 16; 764 - atari_scsi_template.cmd_per_lun = 8; 765 809 atari_scsi_template.sg_tablesize = SG_ALL; 766 810 } else { 767 - atari_scsi_template.can_queue = 8; 768 - atari_scsi_template.cmd_per_lun = 1; 811 + atari_scsi_template.can_queue = 1; 769 812 atari_scsi_template.sg_tablesize = SG_NONE; 770 813 } 771 814 ··· 781 850 } 782 851 } 783 852 784 - 785 - #ifdef REAL_DMA 786 853 /* If running on a Falcon and if there's TT-Ram (i.e., more than one 787 854 * memory block, since there's always ST-Ram in a Falcon), then 788 855 * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers ··· 796 867 atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); 797 868 atari_dma_orig_addr = 0; 798 869 } 799 - #endif 800 870 801 871 instance = scsi_host_alloc(&atari_scsi_template, 802 872 sizeof(struct NCR5380_hostdata)); ··· 807 879 instance->irq = irq->start; 808 880 809 881 host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP; 810 - #ifdef SUPPORT_TAGS 811 - host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; 812 - #endif 813 882 host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; 814 883 815 884 error = NCR5380_init(instance, host_flags); ··· 822 897 goto fail_irq; 823 898 } 824 899 tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */ 825 - #ifdef REAL_DMA 900 + 826 901 tt_scsi_dma.dma_ctrl = 0; 827 902 atari_dma_residual = 0; 828 903 ··· 844 919 845 920 hostdata->read_overruns = 4; 846 921 } 847 - #endif 848 922 } else { 849 923 /* Nothing to do for the interrupt: the ST-DMA is initialized 850 924 * already. 851 925 */ 852 - #ifdef REAL_DMA 853 926 atari_dma_residual = 0; 854 927 atari_dma_active = 0; 855 928 atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 856 929 : 0xff000000); 857 - #endif 858 930 } 859 931 860 932 NCR5380_maybe_reset_bus(instance);
+2 -2
drivers/scsi/bfa/bfa_fcs.h
··· 874 874 /* 875 875 * itnim callbacks 876 876 */ 877 - void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, 878 - struct bfad_itnim_s **itnim_drv); 877 + int bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, 878 + struct bfad_itnim_s **itnim_drv); 879 879 void bfa_fcb_itnim_free(struct bfad_s *bfad, 880 880 struct bfad_itnim_s *itnim_drv); 881 881 void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
+3 -2
drivers/scsi/bfa/bfa_fcs_fcpim.c
··· 588 588 struct bfa_fcs_lport_s *port = rport->port; 589 589 struct bfa_fcs_itnim_s *itnim; 590 590 struct bfad_itnim_s *itnim_drv; 591 + int ret; 591 592 592 593 /* 593 594 * call bfad to allocate the itnim 594 595 */ 595 - bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); 596 - if (itnim == NULL) { 596 + ret = bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); 597 + if (ret) { 597 598 bfa_trc(port->fcs, rport->pwwn); 598 599 return NULL; 599 600 }
+3 -2
drivers/scsi/bfa/bfad_im.c
··· 440 440 * BFA FCS itnim alloc callback, after successful PRLI 441 441 * Context: Interrupt 442 442 */ 443 - void 443 + int 444 444 bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, 445 445 struct bfad_itnim_s **itnim_drv) 446 446 { 447 447 *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC); 448 448 if (*itnim_drv == NULL) 449 - return; 449 + return -ENOMEM; 450 450 451 451 (*itnim_drv)->im = bfad->im; 452 452 *itnim = &(*itnim_drv)->fcs_itnim; ··· 457 457 */ 458 458 INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler); 459 459 bfad->bfad_flags |= BFAD_RPORT_ONLINE; 460 + return 0; 460 461 } 461 462 462 463 /*
+2 -1
drivers/scsi/bnx2fc/bnx2fc.h
··· 65 65 #include "bnx2fc_constants.h" 66 66 67 67 #define BNX2FC_NAME "bnx2fc" 68 - #define BNX2FC_VERSION "2.9.6" 68 + #define BNX2FC_VERSION "2.10.3" 69 69 70 70 #define PFX "bnx2fc: " 71 71 ··· 261 261 u8 vlan_enabled; 262 262 int vlan_id; 263 263 bool enabled; 264 + u8 tm_timeout; 264 265 }; 265 266 266 267 #define bnx2fc_from_ctlr(x) \
+99 -1
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 107 107 "\t\t0x10 - fcoe L2 fame related logs.\n" 108 108 "\t\t0xff - LOG all messages."); 109 109 110 + uint bnx2fc_devloss_tmo; 111 + module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO); 112 + MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports " 113 + "attached via bnx2fc."); 114 + 115 + uint bnx2fc_max_luns = BNX2FC_MAX_LUN; 116 + module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO); 117 + MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default " 118 + "0xffff."); 119 + 120 + uint bnx2fc_queue_depth; 121 + module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO); 122 + MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices " 123 + "attached via bnx2fc."); 124 + 125 + uint bnx2fc_log_fka; 126 + module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR); 127 + MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is " 128 + "initiating a FIP keep alive when debug logging is enabled."); 129 + 110 130 static int bnx2fc_cpu_callback(struct notifier_block *nfb, 111 131 unsigned long action, void *hcpu); 112 132 /* notification function for CPU hotplug events */ ··· 712 692 int rc = 0; 713 693 714 694 shost->max_cmd_len = BNX2FC_MAX_CMD_LEN; 715 - shost->max_lun = BNX2FC_MAX_LUN; 695 + shost->max_lun = bnx2fc_max_luns; 716 696 shost->max_id = BNX2FC_MAX_FCP_TGT; 717 697 shost->max_channel = 0; 718 698 if (lport->vport) ··· 1081 1061 */ 1082 1062 static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 1083 1063 { 1064 + struct fip_header *fiph; 1065 + struct ethhdr *eth_hdr; 1066 + u16 op; 1067 + u8 sub; 1068 + 1069 + fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); 1070 + eth_hdr = (struct ethhdr *)skb_mac_header(skb); 1071 + op = ntohs(fiph->fip_op); 1072 + sub = fiph->fip_subcode; 1073 + 1074 + if (op == FIP_OP_CTRL && sub == FIP_SC_SOL && bnx2fc_log_fka) 1075 + BNX2FC_MISC_DBG("Sending FKA from %pM to %pM.\n", 1076 + eth_hdr->h_source, eth_hdr->h_dest); 1077 + 1084 1078 skb->dev = bnx2fc_from_ctlr(fip)->netdev; 1085 1079 dev_queue_xmit(skb); 1086 1080 } ··· 1135 1101 netdev->name); 1136 1102 return -EIO; 1137 1103 } 1104 + 1105 + if (bnx2fc_devloss_tmo) 1106 + fc_host_dev_loss_tmo(vn_port->host) = bnx2fc_devloss_tmo; 1138 1107 1139 1108 if (disabled) { 1140 1109 fc_vport_set_state(vport, FC_VPORT_DISABLED); ··· 1531 1494 goto shost_err; 1532 1495 } 1533 1496 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; 1497 + 1498 + if (bnx2fc_devloss_tmo) 1499 + fc_host_dev_loss_tmo(shost) = bnx2fc_devloss_tmo; 1534 1500 1535 1501 /* Allocate exchange manager */ 1536 1502 if (!npiv) ··· 2039 1999 return; 2040 2000 } 2041 2001 2002 + pr_info(PFX "FCoE initialized for %s.\n", dev->netdev->name); 2003 + 2042 2004 /* Add HBA to the adapter list */ 2043 2005 mutex_lock(&bnx2fc_dev_lock); 2044 2006 list_add_tail(&hba->list, &adapter_list); ··· 2335 2293 ctlr = bnx2fc_to_ctlr(interface); 2336 2294 cdev = fcoe_ctlr_to_ctlr_dev(ctlr); 2337 2295 interface->vlan_id = vlan_id; 2296 + interface->tm_timeout = BNX2FC_TM_TIMEOUT; 2338 2297 2339 2298 interface->timer_work_queue = 2340 2299 create_singlethread_workqueue("bnx2fc_timer_wq"); ··· 2655 2612 return NOTIFY_OK; 2656 2613 } 2657 2614 2615 + static int bnx2fc_slave_configure(struct scsi_device *sdev) 2616 + { 2617 + if (!bnx2fc_queue_depth) 2618 + return 0; 2619 + 2620 + scsi_change_queue_depth(sdev, bnx2fc_queue_depth); 2621 + return 0; 2622 + } 2623 + 2658 2624 /** 2659 2625 * bnx2fc_mod_init - module init entry point 2660 2626 * ··· 2910 2858 .bsg_request = fc_lport_bsg_request, 2911 2859 }; 2912 2860 2861 + /* 2862 + * Additional scsi_host attributes. 2863 + */ 2864 + static ssize_t 2865 + bnx2fc_tm_timeout_show(struct device *dev, struct device_attribute *attr, 2866 + char *buf) 2867 + { 2868 + struct Scsi_Host *shost = class_to_shost(dev); 2869 + struct fc_lport *lport = shost_priv(shost); 2870 + struct fcoe_port *port = lport_priv(lport); 2871 + struct bnx2fc_interface *interface = port->priv; 2872 + 2873 + sprintf(buf, "%u\n", interface->tm_timeout); 2874 + return strlen(buf); 2875 + } 2876 + 2877 + static ssize_t 2878 + bnx2fc_tm_timeout_store(struct device *dev, 2879 + struct device_attribute *attr, const char *buf, size_t count) 2880 + { 2881 + struct Scsi_Host *shost = class_to_shost(dev); 2882 + struct fc_lport *lport = shost_priv(shost); 2883 + struct fcoe_port *port = lport_priv(lport); 2884 + struct bnx2fc_interface *interface = port->priv; 2885 + int rval, val; 2886 + 2887 + rval = kstrtouint(buf, 10, &val); 2888 + if (rval) 2889 + return rval; 2890 + if (val > 255) 2891 + return -ERANGE; 2892 + 2893 + interface->tm_timeout = (u8)val; 2894 + return strlen(buf); 2895 + } 2896 + 2897 + static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show, 2898 + bnx2fc_tm_timeout_store); 2899 + 2900 + static struct device_attribute *bnx2fc_host_attrs[] = { 2901 + &dev_attr_tm_timeout, 2902 + NULL, 2903 + }; 2904 + 2913 2905 /** 2914 2906 * scsi_host_template structure used while registering with SCSI-ml 2915 2907 */ ··· 2973 2877 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, 2974 2878 .max_sectors = 1024, 2975 2879 .track_queue_depth = 1, 2880 + .slave_configure = bnx2fc_slave_configure, 2881 + .shost_attrs = bnx2fc_host_attrs, 2976 2882 }; 2977 2883 2978 2884 static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
+13 -1
drivers/scsi/bnx2fc/bnx2fc_io.c
··· 179 179 180 180 bnx2fc_unmap_sg_list(io_req); 181 181 io_req->sc_cmd = NULL; 182 + 183 + /* Sanity checks before returning command to mid-layer */ 182 184 if (!sc_cmd) { 183 185 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " 184 186 "IO(0x%x) already cleaned up\n", 185 187 io_req->xid); 186 188 return; 187 189 } 190 + if (!sc_cmd->device) { 191 + pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid); 192 + return; 193 + } 194 + if (!sc_cmd->device->host) { 195 + pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n", 196 + io_req->xid); 197 + return; 198 + } 199 + 188 200 sc_cmd->result = err_code << 16; 189 201 190 202 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", ··· 782 770 spin_unlock_bh(&tgt->tgt_lock); 783 771 784 772 rc = wait_for_completion_timeout(&io_req->tm_done, 785 - BNX2FC_TM_TIMEOUT * HZ); 773 + interface->tm_timeout * HZ); 786 774 spin_lock_bh(&tgt->tgt_lock); 787 775 788 776 io_req->wait_for_comp = 0;
+2 -2
drivers/scsi/bnx2i/bnx2i_iscsi.c
··· 675 675 { 676 676 struct list_head *list; 677 677 struct list_head *tmp; 678 - struct bnx2i_endpoint *ep; 678 + struct bnx2i_endpoint *ep = NULL; 679 679 680 680 read_lock_bh(&hba->ep_rdwr_lock); 681 681 list_for_each_safe(list, tmp, &hba->ep_ofld_list) { ··· 703 703 { 704 704 struct list_head *list; 705 705 struct list_head *tmp; 706 - struct bnx2i_endpoint *ep; 706 + struct bnx2i_endpoint *ep = NULL; 707 707 708 708 read_lock_bh(&hba->ep_rdwr_lock); 709 709 list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
+21 -838
drivers/scsi/constants.c
··· 292 292 293 293 struct error_info { 294 294 unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ 295 - const char * text; 295 + unsigned short size; 296 296 }; 297 297 298 298 /* 299 - * The canonical list of T10 Additional Sense Codes is available at: 300 - * http://www.t10.org/lists/asc-num.txt [most recent: 20141221] 299 + * There are 700+ entries in this table. To save space, we don't store 300 + * (code, pointer) pairs, which would make sizeof(struct 301 + * error_info)==16 on 64 bits. Rather, the second element just stores 302 + * the size (including \0) of the corresponding string, and we use the 303 + * sum of these to get the appropriate offset into additional_text 304 + * defined below. This approach saves 12 bytes per entry. 301 305 */ 302 - 303 306 static const struct error_info additional[] = 304 307 { 305 - {0x0000, "No additional sense information"}, 306 - {0x0001, "Filemark detected"}, 307 - {0x0002, "End-of-partition/medium detected"}, 308 - {0x0003, "Setmark detected"}, 309 - {0x0004, "Beginning-of-partition/medium detected"}, 310 - {0x0005, "End-of-data detected"}, 311 - {0x0006, "I/O process terminated"}, 312 - {0x0007, "Programmable early warning detected"}, 313 - {0x0011, "Audio play operation in progress"}, 314 - {0x0012, "Audio play operation paused"}, 315 - {0x0013, "Audio play operation successfully completed"}, 316 - {0x0014, "Audio play operation stopped due to error"}, 317 - {0x0015, "No current audio status to return"}, 318 - {0x0016, "Operation in progress"}, 319 - {0x0017, "Cleaning requested"}, 320 - {0x0018, "Erase operation in progress"}, 321 - {0x0019, "Locate operation in progress"}, 322 - {0x001A, "Rewind operation in progress"}, 323 - {0x001B, "Set capacity operation in progress"}, 324 - {0x001C, "Verify operation in progress"}, 325 - {0x001D, "ATA pass through information available"}, 326 - {0x001E, "Conflicting SA creation request"}, 327 - {0x001F, "Logical unit transitioning to another power condition"}, 328 - {0x0020, "Extended copy information available"}, 329 - {0x0021, "Atomic command aborted due to ACA"}, 330 - 331 - {0x0100, "No index/sector signal"}, 332 - 333 - {0x0200, "No seek complete"}, 334 - 335 - {0x0300, "Peripheral device write fault"}, 336 - {0x0301, "No write current"}, 337 - {0x0302, "Excessive write errors"}, 338 - 339 - {0x0400, "Logical unit not ready, cause not reportable"}, 340 - {0x0401, "Logical unit is in process of becoming ready"}, 341 - {0x0402, "Logical unit not ready, initializing command required"}, 342 - {0x0403, "Logical unit not ready, manual intervention required"}, 343 - {0x0404, "Logical unit not ready, format in progress"}, 344 - {0x0405, "Logical unit not ready, rebuild in progress"}, 345 - {0x0406, "Logical unit not ready, recalculation in progress"}, 346 - {0x0407, "Logical unit not ready, operation in progress"}, 347 - {0x0408, "Logical unit not ready, long write in progress"}, 348 - {0x0409, "Logical unit not ready, self-test in progress"}, 349 - {0x040A, "Logical unit not accessible, asymmetric access state " 350 - "transition"}, 351 - {0x040B, "Logical unit not accessible, target port in standby state"}, 352 - {0x040C, "Logical unit not accessible, target port in unavailable " 353 - "state"}, 354 - {0x040D, "Logical unit not ready, structure check required"}, 355 - {0x040E, "Logical unit not ready, security session in progress"}, 356 - {0x0410, "Logical unit not ready, auxiliary memory not accessible"}, 357 - {0x0411, "Logical unit not ready, notify (enable spinup) required"}, 358 - {0x0412, "Logical unit not ready, offline"}, 359 - {0x0413, "Logical unit not ready, SA creation in progress"}, 360 - {0x0414, "Logical unit not ready, space allocation in progress"}, 361 - {0x0415, "Logical unit not ready, robotics disabled"}, 362 - {0x0416, "Logical unit not ready, configuration required"}, 363 - {0x0417, "Logical unit not ready, calibration required"}, 364 - {0x0418, "Logical unit not ready, a door is open"}, 365 - {0x0419, "Logical unit not ready, operating in sequential mode"}, 366 - {0x041A, "Logical unit not ready, start stop unit command in " 367 - "progress"}, 368 - {0x041B, "Logical unit not ready, sanitize in progress"}, 369 - {0x041C, "Logical unit not ready, additional power use not yet " 370 - "granted"}, 371 - {0x041D, "Logical unit not ready, configuration in progress"}, 372 - {0x041E, "Logical unit not ready, microcode activation required"}, 373 - {0x041F, "Logical unit not ready, microcode download required"}, 374 - {0x0420, "Logical unit not ready, logical unit reset required"}, 375 - {0x0421, "Logical unit not ready, hard reset required"}, 376 - {0x0422, "Logical unit not ready, power cycle required"}, 377 - 378 - {0x0500, "Logical unit does not respond to selection"}, 379 - 380 - {0x0600, "No reference position found"}, 381 - 382 - {0x0700, "Multiple peripheral devices selected"}, 383 - 384 - {0x0800, "Logical unit communication failure"}, 385 - {0x0801, "Logical unit communication time-out"}, 386 - {0x0802, "Logical unit communication parity error"}, 387 - {0x0803, "Logical unit communication CRC error (Ultra-DMA/32)"}, 388 - {0x0804, "Unreachable copy target"}, 389 - 390 - {0x0900, "Track following error"}, 391 - {0x0901, "Tracking servo failure"}, 392 - {0x0902, "Focus servo failure"}, 393 - {0x0903, "Spindle servo failure"}, 394 - {0x0904, "Head select fault"}, 395 - {0x0905, "Vibration induced tracking error"}, 396 - 397 - {0x0A00, "Error log overflow"}, 398 - 399 - {0x0B00, "Warning"}, 400 - {0x0B01, "Warning - specified temperature exceeded"}, 401 - {0x0B02, "Warning - enclosure degraded"}, 402 - {0x0B03, "Warning - background self-test failed"}, 403 - {0x0B04, "Warning - background pre-scan detected medium error"}, 404 - {0x0B05, "Warning - background medium scan detected medium error"}, 405 - {0x0B06, "Warning - non-volatile cache now volatile"}, 406 - {0x0B07, "Warning - degraded power to non-volatile cache"}, 407 - {0x0B08, "Warning - power loss expected"}, 408 - {0x0B09, "Warning - device statistics notification active"}, 409 - 410 - {0x0C00, "Write error"}, 411 - {0x0C01, "Write error - recovered with auto reallocation"}, 412 - {0x0C02, "Write error - auto reallocation failed"}, 413 - {0x0C03, "Write error - recommend reassignment"}, 414 - {0x0C04, "Compression check miscompare error"}, 415 - {0x0C05, "Data expansion occurred during compression"}, 416 - {0x0C06, "Block not compressible"}, 417 - {0x0C07, "Write error - recovery needed"}, 418 - {0x0C08, "Write error - recovery failed"}, 419 - {0x0C09, "Write error - loss of streaming"}, 420 - {0x0C0A, "Write error - padding blocks added"}, 421 - {0x0C0B, "Auxiliary memory write error"}, 422 - {0x0C0C, "Write error - unexpected unsolicited data"}, 423 - {0x0C0D, "Write error - not enough unsolicited data"}, 424 - {0x0C0E, "Multiple write errors"}, 425 - {0x0C0F, "Defects in error window"}, 426 - {0x0C10, "Incomplete multiple atomic write operations"}, 427 - 428 - {0x0D00, "Error detected by third party temporary initiator"}, 429 - {0x0D01, "Third party device failure"}, 430 - {0x0D02, "Copy target device not reachable"}, 431 - {0x0D03, "Incorrect copy target device type"}, 432 - {0x0D04, "Copy target device data underrun"}, 433 - {0x0D05, "Copy target device data overrun"}, 434 - 435 - {0x0E00, "Invalid information unit"}, 436 - {0x0E01, "Information unit too short"}, 437 - {0x0E02, "Information unit too long"}, 438 - {0x0E03, "Invalid field in command information unit"}, 439 - 440 - {0x1000, "Id CRC or ECC error"}, 441 - {0x1001, "Logical block guard check failed"}, 442 - {0x1002, "Logical block application tag check failed"}, 443 - {0x1003, "Logical block reference tag check failed"}, 444 - {0x1004, "Logical block protection error on recover buffered data"}, 445 - {0x1005, "Logical block protection method error"}, 446 - 447 - {0x1100, "Unrecovered read error"}, 448 - {0x1101, "Read retries exhausted"}, 449 - {0x1102, "Error too long to correct"}, 450 - {0x1103, "Multiple read errors"}, 451 - {0x1104, "Unrecovered read error - auto reallocate failed"}, 452 - {0x1105, "L-EC uncorrectable error"}, 453 - {0x1106, "CIRC unrecovered error"}, 454 - {0x1107, "Data re-synchronization error"}, 455 - {0x1108, "Incomplete block read"}, 456 - {0x1109, "No gap found"}, 457 - {0x110A, "Miscorrected error"}, 458 - {0x110B, "Unrecovered read error - recommend reassignment"}, 459 - {0x110C, "Unrecovered read error - recommend rewrite the data"}, 460 - {0x110D, "De-compression CRC error"}, 461 - {0x110E, "Cannot decompress using declared algorithm"}, 462 - {0x110F, "Error reading UPC/EAN number"}, 463 - {0x1110, "Error reading ISRC number"}, 464 - {0x1111, "Read error - loss of streaming"}, 465 - {0x1112, "Auxiliary memory read error"}, 466 - {0x1113, "Read error - failed retransmission request"}, 467 - {0x1114, "Read error - lba marked bad by application client"}, 468 - {0x1115, "Write after sanitize required"}, 469 - 470 - {0x1200, "Address mark not found for id field"}, 471 - 472 - {0x1300, "Address mark not found for data field"}, 473 - 474 - {0x1400, "Recorded entity not found"}, 475 - {0x1401, "Record not found"}, 476 - {0x1402, "Filemark or setmark not found"}, 477 - {0x1403, "End-of-data not found"}, 478 - {0x1404, "Block sequence error"}, 479 - {0x1405, "Record not found - recommend reassignment"}, 480 - {0x1406, "Record not found - data auto-reallocated"}, 481 - {0x1407, "Locate operation failure"}, 482 - 483 - {0x1500, "Random positioning error"}, 484 - {0x1501, "Mechanical positioning error"}, 485 - {0x1502, "Positioning error detected by read of medium"}, 486 - 487 - {0x1600, "Data synchronization mark error"}, 488 - {0x1601, "Data sync error - data rewritten"}, 489 - {0x1602, "Data sync error - recommend rewrite"}, 490 - {0x1603, "Data sync error - data auto-reallocated"}, 491 - {0x1604, "Data sync error - recommend reassignment"}, 492 - 493 - {0x1700, "Recovered data with no error correction applied"}, 494 - {0x1701, "Recovered data with retries"}, 495 - {0x1702, "Recovered data with positive head offset"}, 496 - {0x1703, "Recovered data with negative head offset"}, 497 - {0x1704, "Recovered data with retries and/or circ applied"}, 498 - {0x1705, "Recovered data using previous sector id"}, 499 - {0x1706, "Recovered data without ECC - data auto-reallocated"}, 500 - {0x1707, "Recovered data without ECC - recommend reassignment"}, 501 - {0x1708, "Recovered data without ECC - recommend rewrite"}, 502 - {0x1709, "Recovered data without ECC - data rewritten"}, 503 - 504 - {0x1800, "Recovered data with error correction applied"}, 505 - {0x1801, "Recovered data with error corr. & retries applied"}, 506 - {0x1802, "Recovered data - data auto-reallocated"}, 507 - {0x1803, "Recovered data with CIRC"}, 508 - {0x1804, "Recovered data with L-EC"}, 509 - {0x1805, "Recovered data - recommend reassignment"}, 510 - {0x1806, "Recovered data - recommend rewrite"}, 511 - {0x1807, "Recovered data with ECC - data rewritten"}, 512 - {0x1808, "Recovered data with linking"}, 513 - 514 - {0x1900, "Defect list error"}, 515 - {0x1901, "Defect list not available"}, 516 - {0x1902, "Defect list error in primary list"}, 517 - {0x1903, "Defect list error in grown list"}, 518 - 519 - {0x1A00, "Parameter list length error"}, 520 - 521 - {0x1B00, "Synchronous data transfer error"}, 522 - 523 - {0x1C00, "Defect list not found"}, 524 - {0x1C01, "Primary defect list not found"}, 525 - {0x1C02, "Grown defect list not found"}, 526 - 527 - {0x1D00, "Miscompare during verify operation"}, 528 - {0x1D01, "Miscompare verify of unmapped LBA"}, 529 - 530 - {0x1E00, "Recovered id with ECC correction"}, 531 - 532 - {0x1F00, "Partial defect list transfer"}, 533 - 534 - {0x2000, "Invalid command operation code"}, 535 - {0x2001, "Access denied - initiator pending-enrolled"}, 536 - {0x2002, "Access denied - no access rights"}, 537 - {0x2003, "Access denied - invalid mgmt id key"}, 538 - {0x2004, "Illegal command while in write capable state"}, 539 - {0x2005, "Obsolete"}, 540 - {0x2006, "Illegal command while in explicit address mode"}, 541 - {0x2007, "Illegal command while in implicit address mode"}, 542 - {0x2008, "Access denied - enrollment conflict"}, 543 - {0x2009, "Access denied - invalid LU identifier"}, 544 - {0x200A, "Access denied - invalid proxy token"}, 545 - {0x200B, "Access denied - ACL LUN conflict"}, 546 - {0x200C, "Illegal command when not in append-only mode"}, 547 - 548 - {0x2100, "Logical block address out of range"}, 549 - {0x2101, "Invalid element address"}, 550 - {0x2102, "Invalid address for write"}, 551 - {0x2103, "Invalid write crossing layer jump"}, 552 - {0x2104, "Unaligned write command"}, 553 - {0x2105, "Write boundary violation"}, 554 - {0x2106, "Attempt to read invalid data"}, 555 - {0x2107, "Read boundary violation"}, 556 - 557 - {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, 558 - 559 - {0x2300, "Invalid token operation, cause not reportable"}, 560 - {0x2301, "Invalid token operation, unsupported token type"}, 561 - {0x2302, "Invalid token operation, remote token usage not supported"}, 562 - {0x2303, "Invalid token operation, remote rod token creation not " 563 - "supported"}, 564 - {0x2304, "Invalid token operation, token unknown"}, 565 - {0x2305, "Invalid token operation, token corrupt"}, 566 - {0x2306, "Invalid token operation, token revoked"}, 567 - {0x2307, "Invalid token operation, token expired"}, 568 - {0x2308, "Invalid token operation, token cancelled"}, 569 - {0x2309, "Invalid token operation, token deleted"}, 570 - {0x230A, "Invalid token operation, invalid token length"}, 571 - 572 - {0x2400, "Invalid field in cdb"}, 573 - {0x2401, "CDB decryption error"}, 574 - {0x2402, "Obsolete"}, 575 - {0x2403, "Obsolete"}, 576 - {0x2404, "Security audit value frozen"}, 577 - {0x2405, "Security working key frozen"}, 578 - {0x2406, "Nonce not unique"}, 579 - {0x2407, "Nonce timestamp out of range"}, 580 - {0x2408, "Invalid XCDB"}, 581 - 582 - {0x2500, "Logical unit not supported"}, 583 - 584 - {0x2600, "Invalid field in parameter list"}, 585 - {0x2601, "Parameter not supported"}, 586 - {0x2602, "Parameter value invalid"}, 587 - {0x2603, "Threshold parameters not supported"}, 588 - {0x2604, "Invalid release of persistent reservation"}, 589 - {0x2605, "Data decryption error"}, 590 - {0x2606, "Too many target descriptors"}, 591 - {0x2607, "Unsupported target descriptor type code"}, 592 - {0x2608, "Too many segment descriptors"}, 593 - {0x2609, "Unsupported segment descriptor type code"}, 594 - {0x260A, "Unexpected inexact segment"}, 595 - {0x260B, "Inline data length exceeded"}, 596 - {0x260C, "Invalid operation for copy source or destination"}, 597 - {0x260D, "Copy segment granularity violation"}, 598 - {0x260E, "Invalid parameter while port is enabled"}, 599 - {0x260F, "Invalid data-out buffer integrity check value"}, 600 - {0x2610, "Data decryption key fail limit reached"}, 601 - {0x2611, "Incomplete key-associated data set"}, 602 - {0x2612, "Vendor specific key reference not found"}, 603 - 604 - {0x2700, "Write protected"}, 605 - {0x2701, "Hardware write protected"}, 606 - {0x2702, "Logical unit software write protected"}, 607 - {0x2703, "Associated write protect"}, 608 - {0x2704, "Persistent write protect"}, 609 - {0x2705, "Permanent write protect"}, 610 - {0x2706, "Conditional write protect"}, 611 - {0x2707, "Space allocation failed write protect"}, 612 - {0x2708, "Zone is read only"}, 613 - 614 - {0x2800, "Not ready to ready change, medium may have changed"}, 615 - {0x2801, "Import or export element accessed"}, 616 - {0x2802, "Format-layer may have changed"}, 617 - {0x2803, "Import/export element accessed, medium changed"}, 618 - 619 - {0x2900, "Power on, reset, or bus device reset occurred"}, 620 - {0x2901, "Power on occurred"}, 621 - {0x2902, "Scsi bus reset occurred"}, 622 - {0x2903, "Bus device reset function occurred"}, 623 - {0x2904, "Device internal reset"}, 624 - {0x2905, "Transceiver mode changed to single-ended"}, 625 - {0x2906, "Transceiver mode changed to lvd"}, 626 - {0x2907, "I_T nexus loss occurred"}, 627 - 628 - {0x2A00, "Parameters changed"}, 629 - {0x2A01, "Mode parameters changed"}, 630 - {0x2A02, "Log parameters changed"}, 631 - {0x2A03, "Reservations preempted"}, 632 - {0x2A04, "Reservations released"}, 633 - {0x2A05, "Registrations preempted"}, 634 - {0x2A06, "Asymmetric access state changed"}, 635 - {0x2A07, "Implicit asymmetric access state transition failed"}, 636 - {0x2A08, "Priority changed"}, 637 - {0x2A09, "Capacity data has changed"}, 638 - {0x2A0A, "Error history I_T nexus cleared"}, 639 - {0x2A0B, "Error history snapshot released"}, 640 - {0x2A0C, "Error recovery attributes have changed"}, 641 - {0x2A0D, "Data encryption capabilities changed"}, 642 - {0x2A10, "Timestamp changed"}, 643 - {0x2A11, "Data encryption parameters changed by another i_t nexus"}, 644 - {0x2A12, "Data encryption parameters changed by vendor specific " 645 - "event"}, 646 - {0x2A13, "Data encryption key instance counter has changed"}, 647 - {0x2A14, "SA creation capabilities data has changed"}, 648 - {0x2A15, "Medium removal prevention preempted"}, 649 - 650 - {0x2B00, "Copy cannot execute since host cannot disconnect"}, 651 - 652 - {0x2C00, "Command sequence error"}, 653 - {0x2C01, "Too many windows specified"}, 654 - {0x2C02, "Invalid combination of windows specified"}, 655 - {0x2C03, "Current program area is not empty"}, 656 - {0x2C04, "Current program area is empty"}, 657 - {0x2C05, "Illegal power condition request"}, 658 - {0x2C06, "Persistent prevent conflict"}, 659 - {0x2C07, "Previous busy status"}, 660 - {0x2C08, "Previous task set full status"}, 661 - {0x2C09, "Previous reservation conflict status"}, 662 - {0x2C0A, "Partition or collection contains user objects"}, 663 - {0x2C0B, "Not reserved"}, 664 - {0x2C0C, "Orwrite generation does not match"}, 665 - {0x2C0D, "Reset write pointer not allowed"}, 666 - {0x2C0E, "Zone is offline"}, 667 - 668 - {0x2D00, "Overwrite error on update in place"}, 669 - 670 - {0x2E00, "Insufficient time for operation"}, 671 - {0x2E01, "Command timeout before processing"}, 672 - {0x2E02, "Command timeout during processing"}, 673 - {0x2E03, "Command timeout during processing due to error recovery"}, 674 - 675 - {0x2F00, "Commands cleared by another initiator"}, 676 - {0x2F01, "Commands cleared by power loss notification"}, 677 - {0x2F02, "Commands cleared by device server"}, 678 - {0x2F03, "Some commands cleared by queuing layer event"}, 679 - 680 - {0x3000, "Incompatible medium installed"}, 681 - {0x3001, "Cannot read medium - unknown format"}, 682 - {0x3002, "Cannot read medium - incompatible format"}, 683 - {0x3003, "Cleaning cartridge installed"}, 684 - {0x3004, "Cannot write medium - unknown format"}, 685 - {0x3005, "Cannot write medium - incompatible format"}, 686 - {0x3006, "Cannot format medium - incompatible medium"}, 687 - {0x3007, "Cleaning failure"}, 688 - {0x3008, "Cannot write - application code mismatch"}, 689 - {0x3009, "Current session not fixated for append"}, 690 - {0x300A, "Cleaning request rejected"}, 691 - {0x300C, "WORM medium - overwrite attempted"}, 692 - {0x300D, "WORM medium - integrity check"}, 693 - {0x3010, "Medium not formatted"}, 694 - {0x3011, "Incompatible volume type"}, 695 - {0x3012, "Incompatible volume qualifier"}, 696 - {0x3013, "Cleaning volume expired"}, 697 - 698 - {0x3100, "Medium format corrupted"}, 699 - {0x3101, "Format command failed"}, 700 - {0x3102, "Zoned formatting failed due to spare linking"}, 701 - {0x3103, "Sanitize command failed"}, 702 - 703 - {0x3200, "No defect spare location available"}, 704 - {0x3201, "Defect list update failure"}, 705 - 706 - {0x3300, "Tape length error"}, 707 - 708 - {0x3400, "Enclosure failure"}, 709 - 710 - {0x3500, "Enclosure services failure"}, 711 - {0x3501, "Unsupported enclosure function"}, 712 - {0x3502, "Enclosure services unavailable"}, 713 - {0x3503, "Enclosure services transfer failure"}, 714 - {0x3504, "Enclosure services transfer refused"}, 715 - {0x3505, "Enclosure services checksum error"}, 716 - 717 - {0x3600, "Ribbon, ink, or toner failure"}, 718 - 719 - {0x3700, "Rounded parameter"}, 720 - 721 - {0x3800, "Event status notification"}, 722 - {0x3802, "Esn - power management class event"}, 723 - {0x3804, "Esn - media class event"}, 724 - {0x3806, "Esn - device busy class event"}, 725 - {0x3807, "Thin Provisioning soft threshold reached"}, 726 - 727 - {0x3900, "Saving parameters not supported"}, 728 - 729 - {0x3A00, "Medium not present"}, 730 - {0x3A01, "Medium not present - tray closed"}, 731 - {0x3A02, "Medium not present - tray open"}, 732 - {0x3A03, "Medium not present - loadable"}, 733 - {0x3A04, "Medium not present - medium auxiliary memory accessible"}, 734 - 735 - {0x3B00, "Sequential positioning error"}, 736 - {0x3B01, "Tape position error at beginning-of-medium"}, 737 - {0x3B02, "Tape position error at end-of-medium"}, 738 - {0x3B03, "Tape or electronic vertical forms unit not ready"}, 739 - {0x3B04, "Slew failure"}, 740 - {0x3B05, "Paper jam"}, 741 - {0x3B06, "Failed to sense top-of-form"}, 742 - {0x3B07, "Failed to sense bottom-of-form"}, 743 - {0x3B08, "Reposition error"}, 744 - {0x3B09, "Read past end of medium"}, 745 - {0x3B0A, "Read past beginning of medium"}, 746 - {0x3B0B, "Position past end of medium"}, 747 - {0x3B0C, "Position past beginning of medium"}, 748 - {0x3B0D, "Medium destination element full"}, 749 - {0x3B0E, "Medium source element empty"}, 750 - {0x3B0F, "End of medium reached"}, 751 - {0x3B11, "Medium magazine not accessible"}, 752 - {0x3B12, "Medium magazine removed"}, 753 - {0x3B13, "Medium magazine inserted"}, 754 - {0x3B14, "Medium magazine locked"}, 755 - {0x3B15, "Medium magazine unlocked"}, 756 - {0x3B16, "Mechanical positioning or changer error"}, 757 - {0x3B17, "Read past end of user object"}, 758 - {0x3B18, "Element disabled"}, 759 - {0x3B19, "Element enabled"}, 760 - {0x3B1A, "Data transfer device removed"}, 761 - {0x3B1B, "Data transfer device inserted"}, 762 - {0x3B1C, "Too many logical objects on partition to support " 763 - "operation"}, 764 - 765 - {0x3D00, "Invalid bits in identify message"}, 766 - 767 - {0x3E00, "Logical unit has not self-configured yet"}, 768 - {0x3E01, "Logical unit failure"}, 769 - {0x3E02, "Timeout on logical unit"}, 770 - {0x3E03, "Logical unit failed self-test"}, 771 - {0x3E04, "Logical unit unable to update self-test log"}, 772 - 773 - {0x3F00, "Target operating conditions have changed"}, 774 - {0x3F01, "Microcode has been changed"}, 775 - {0x3F02, "Changed operating definition"}, 776 - {0x3F03, "Inquiry data has changed"}, 777 - {0x3F04, "Component device attached"}, 778 - {0x3F05, "Device identifier changed"}, 779 - {0x3F06, "Redundancy group created or modified"}, 780 - {0x3F07, "Redundancy group deleted"}, 781 - {0x3F08, "Spare created or modified"}, 782 - {0x3F09, "Spare deleted"}, 783 - {0x3F0A, "Volume set created or modified"}, 784 - {0x3F0B, "Volume set deleted"}, 785 - {0x3F0C, "Volume set deassigned"}, 786 - {0x3F0D, "Volume set reassigned"}, 787 - {0x3F0E, "Reported luns data has changed"}, 788 - {0x3F0F, "Echo buffer overwritten"}, 789 - {0x3F10, "Medium loadable"}, 790 - {0x3F11, "Medium auxiliary memory accessible"}, 791 - {0x3F12, "iSCSI IP address added"}, 792 - {0x3F13, "iSCSI IP address removed"}, 793 - {0x3F14, "iSCSI IP address changed"}, 794 - {0x3F15, "Inspect referrals sense descriptors"}, 795 - {0x3F16, "Microcode has been changed without reset"}, 796 - /* 797 - * {0x40NN, "Ram failure"}, 798 - * {0x40NN, "Diagnostic failure on component nn"}, 799 - * {0x41NN, "Data path failure"}, 800 - * {0x42NN, "Power-on or self-test failure"}, 801 - */ 802 - {0x4300, "Message error"}, 803 - 804 - {0x4400, "Internal target failure"}, 805 - {0x4401, "Persistent reservation information lost"}, 806 - {0x4471, "ATA device failed set features"}, 807 - 808 - {0x4500, "Select or reselect failure"}, 809 - 810 - {0x4600, "Unsuccessful soft reset"}, 811 - 812 - {0x4700, "Scsi parity error"}, 813 - {0x4701, "Data phase CRC error detected"}, 814 - {0x4702, "Scsi parity error detected during st data phase"}, 815 - {0x4703, "Information unit iuCRC error detected"}, 816 - {0x4704, "Asynchronous information protection error detected"}, 817 - {0x4705, "Protocol service CRC error"}, 818 - {0x4706, "Phy test function in progress"}, 819 - {0x477f, "Some commands cleared by iSCSI Protocol event"}, 820 - 821 - {0x4800, "Initiator detected error message received"}, 822 - 823 - {0x4900, "Invalid message error"}, 824 - 825 - {0x4A00, "Command phase error"}, 826 - 827 - {0x4B00, "Data phase error"}, 828 - {0x4B01, "Invalid target port transfer tag received"}, 829 - {0x4B02, "Too much write data"}, 830 - {0x4B03, "Ack/nak timeout"}, 831 - {0x4B04, "Nak received"}, 832 - {0x4B05, "Data offset error"}, 833 - {0x4B06, "Initiator response timeout"}, 834 - {0x4B07, "Connection lost"}, 835 - {0x4B08, "Data-in buffer overflow - data buffer size"}, 836 - {0x4B09, "Data-in buffer overflow - data buffer descriptor area"}, 837 - {0x4B0A, "Data-in buffer error"}, 838 - {0x4B0B, "Data-out buffer overflow - data buffer size"}, 839 - {0x4B0C, "Data-out buffer overflow - data buffer descriptor area"}, 840 - {0x4B0D, "Data-out buffer error"}, 841 - {0x4B0E, "PCIe fabric error"}, 842 - {0x4B0F, "PCIe completion timeout"}, 843 - {0x4B10, "PCIe completer abort"}, 844 - {0x4B11, "PCIe poisoned tlp received"}, 845 - {0x4B12, "PCIe eCRC check failed"}, 846 - {0x4B13, "PCIe unsupported request"}, 847 - {0x4B14, "PCIe acs violation"}, 848 - {0x4B15, "PCIe tlp prefix blocked"}, 849 - 850 - {0x4C00, "Logical unit failed self-configuration"}, 851 - /* 852 - * {0x4DNN, "Tagged overlapped commands (nn = queue tag)"}, 853 - */ 854 - {0x4E00, "Overlapped commands attempted"}, 855 - 856 - {0x5000, "Write append error"}, 857 - {0x5001, "Write append position error"}, 858 - {0x5002, "Position error related to timing"}, 859 - 860 - {0x5100, "Erase failure"}, 861 - {0x5101, "Erase failure - incomplete erase operation detected"}, 862 - 863 - {0x5200, "Cartridge fault"}, 864 - 865 - {0x5300, "Media load or eject failed"}, 866 - {0x5301, "Unload tape failure"}, 867 - {0x5302, "Medium removal prevented"}, 868 - {0x5303, "Medium removal prevented by data transfer element"}, 869 - {0x5304, "Medium thread or unthread failure"}, 870 - {0x5305, "Volume identifier invalid"}, 871 - {0x5306, "Volume identifier missing"}, 872 - {0x5307, "Duplicate volume identifier"}, 873 - {0x5308, "Element status unknown"}, 874 - {0x5309, "Data transfer device error - load failed"}, 875 - {0x530a, "Data transfer device error - unload failed"}, 876 - {0x530b, "Data transfer device error - unload missing"}, 877 - {0x530c, "Data transfer device error - eject failed"}, 878 - {0x530d, "Data transfer device error - library communication failed"}, 879 - 880 - {0x5400, "Scsi to host system interface failure"}, 881 - 882 - {0x5500, "System resource failure"}, 883 - {0x5501, "System buffer full"}, 884 - {0x5502, "Insufficient reservation resources"}, 885 - {0x5503, "Insufficient resources"}, 886 - {0x5504, "Insufficient registration resources"}, 887 - {0x5505, "Insufficient access control resources"}, 888 - {0x5506, "Auxiliary memory out of space"}, 889 - {0x5507, "Quota error"}, 890 - {0x5508, "Maximum number of supplemental decryption keys exceeded"}, 891 - {0x5509, "Medium auxiliary memory not accessible"}, 892 - {0x550A, "Data currently unavailable"}, 893 - {0x550B, "Insufficient power for operation"}, 894 - {0x550C, "Insufficient resources to create rod"}, 895 - {0x550D, "Insufficient resources to create rod token"}, 896 - {0x550E, "Insufficient zone resources"}, 897 - 898 - {0x5700, "Unable to recover table-of-contents"}, 899 - 900 - {0x5800, "Generation does not exist"}, 901 - 902 - {0x5900, "Updated block read"}, 903 - 904 - {0x5A00, "Operator request or state change input"}, 905 - {0x5A01, "Operator medium removal request"}, 906 - {0x5A02, "Operator selected write protect"}, 907 - {0x5A03, "Operator selected write permit"}, 908 - 909 - {0x5B00, "Log exception"}, 910 - {0x5B01, "Threshold condition met"}, 911 - {0x5B02, "Log counter at maximum"}, 912 - {0x5B03, "Log list codes exhausted"}, 913 - 914 - {0x5C00, "Rpl status change"}, 915 - {0x5C01, "Spindles synchronized"}, 916 - {0x5C02, "Spindles not synchronized"}, 917 - 918 - {0x5D00, "Failure prediction threshold exceeded"}, 919 - {0x5D01, "Media failure prediction threshold exceeded"}, 920 - {0x5D02, "Logical unit failure prediction threshold exceeded"}, 921 - {0x5D03, "Spare area exhaustion prediction threshold exceeded"}, 922 - {0x5D10, "Hardware impending failure general hard drive failure"}, 923 - {0x5D11, "Hardware impending failure drive error rate too high"}, 924 - {0x5D12, "Hardware impending failure data error rate too high"}, 925 - {0x5D13, "Hardware impending failure seek error rate too high"}, 926 - {0x5D14, "Hardware impending failure too many block reassigns"}, 927 - {0x5D15, "Hardware impending failure access times too high"}, 928 - {0x5D16, "Hardware impending failure start unit times too high"}, 929 - {0x5D17, "Hardware impending failure channel parametrics"}, 930 - {0x5D18, "Hardware impending failure controller detected"}, 931 - {0x5D19, "Hardware impending failure throughput performance"}, 932 - {0x5D1A, "Hardware impending failure seek time performance"}, 933 - {0x5D1B, "Hardware impending failure spin-up retry count"}, 934 - {0x5D1C, "Hardware impending failure drive calibration retry count"}, 935 - {0x5D20, "Controller impending failure general hard drive failure"}, 936 - {0x5D21, "Controller impending failure drive error rate too high"}, 937 - {0x5D22, "Controller impending failure data error rate too high"}, 938 - {0x5D23, "Controller impending failure seek error rate too high"}, 939 - {0x5D24, "Controller impending failure too many block reassigns"}, 940 - {0x5D25, "Controller impending failure access times too high"}, 941 - {0x5D26, "Controller impending failure start unit times too high"}, 942 - {0x5D27, "Controller impending failure channel parametrics"}, 943 - {0x5D28, "Controller impending failure controller detected"}, 944 - {0x5D29, "Controller impending failure throughput performance"}, 945 - {0x5D2A, "Controller impending failure seek time performance"}, 946 - {0x5D2B, "Controller impending failure spin-up retry count"}, 947 - {0x5D2C, "Controller impending failure drive calibration retry count"}, 948 - {0x5D30, "Data channel impending failure general hard drive failure"}, 949 - {0x5D31, "Data channel impending failure drive error rate too high"}, 950 - {0x5D32, "Data channel impending failure data error rate too high"}, 951 - {0x5D33, "Data channel impending failure seek error rate too high"}, 952 - {0x5D34, "Data channel impending failure too many block reassigns"}, 953 - {0x5D35, "Data channel impending failure access times too high"}, 954 - {0x5D36, "Data channel impending failure start unit times too high"}, 955 - {0x5D37, "Data channel impending failure channel parametrics"}, 956 - {0x5D38, "Data channel impending failure controller detected"}, 957 - {0x5D39, "Data channel impending failure throughput performance"}, 958 - {0x5D3A, "Data channel impending failure seek time performance"}, 959 - {0x5D3B, "Data channel impending failure spin-up retry count"}, 960 - {0x5D3C, "Data channel impending failure drive calibration retry " 961 - "count"}, 962 - {0x5D40, "Servo impending failure general hard drive failure"}, 963 - {0x5D41, "Servo impending failure drive error rate too high"}, 964 - {0x5D42, "Servo impending failure data error rate too high"}, 965 - {0x5D43, "Servo impending failure seek error rate too high"}, 966 - {0x5D44, "Servo impending failure too many block reassigns"}, 967 - {0x5D45, "Servo impending failure access times too high"}, 968 - {0x5D46, "Servo impending failure start unit times too high"}, 969 - {0x5D47, "Servo impending failure channel parametrics"}, 970 - {0x5D48, "Servo impending failure controller detected"}, 971 - {0x5D49, "Servo impending failure throughput performance"}, 972 - {0x5D4A, "Servo impending failure seek time performance"}, 973 - {0x5D4B, "Servo impending failure spin-up retry count"}, 974 - {0x5D4C, "Servo impending failure drive calibration retry count"}, 975 - {0x5D50, "Spindle impending failure general hard drive failure"}, 976 - {0x5D51, "Spindle impending failure drive error rate too high"}, 977 - {0x5D52, "Spindle impending failure data error rate too high"}, 978 - {0x5D53, "Spindle impending failure seek error rate too high"}, 979 - {0x5D54, "Spindle impending failure too many block reassigns"}, 980 - {0x5D55, "Spindle impending failure access times too high"}, 981 - {0x5D56, "Spindle impending failure start unit times too high"}, 982 - {0x5D57, "Spindle impending failure channel parametrics"}, 983 - {0x5D58, "Spindle impending failure controller detected"}, 984 - {0x5D59, "Spindle impending failure throughput performance"}, 985 - {0x5D5A, "Spindle impending failure seek time performance"}, 986 - {0x5D5B, "Spindle impending failure spin-up retry count"}, 987 - {0x5D5C, "Spindle impending failure drive calibration retry count"}, 988 - {0x5D60, "Firmware impending failure general hard drive failure"}, 989 - {0x5D61, "Firmware impending failure drive error rate too high"}, 990 - {0x5D62, "Firmware impending failure data error rate too high"}, 991 - {0x5D63, "Firmware impending failure seek error rate too high"}, 992 - {0x5D64, "Firmware impending failure too many block reassigns"}, 993 - {0x5D65, "Firmware impending failure access times too high"}, 994 - {0x5D66, "Firmware impending failure start unit times too high"}, 995 - {0x5D67, "Firmware impending failure channel parametrics"}, 996 - {0x5D68, "Firmware impending failure controller detected"}, 997 - {0x5D69, "Firmware impending failure throughput performance"}, 998 - {0x5D6A, "Firmware impending failure seek time performance"}, 999 - {0x5D6B, "Firmware impending failure spin-up retry count"}, 1000 - {0x5D6C, "Firmware impending failure drive calibration retry count"}, 1001 - {0x5DFF, "Failure prediction threshold exceeded (false)"}, 1002 - 1003 - {0x5E00, "Low power condition on"}, 1004 - {0x5E01, "Idle condition activated by timer"}, 1005 - {0x5E02, "Standby condition activated by timer"}, 1006 - {0x5E03, "Idle condition activated by command"}, 1007 - {0x5E04, "Standby condition activated by command"}, 1008 - {0x5E05, "Idle_b condition activated by timer"}, 1009 - {0x5E06, "Idle_b condition activated by command"}, 1010 - {0x5E07, "Idle_c condition activated by timer"}, 1011 - {0x5E08, "Idle_c condition activated by command"}, 1012 - {0x5E09, "Standby_y condition activated by timer"}, 1013 - {0x5E0A, "Standby_y condition activated by command"}, 1014 - {0x5E41, "Power state change to active"}, 1015 - {0x5E42, "Power state change to idle"}, 1016 - {0x5E43, "Power state change to standby"}, 1017 - {0x5E45, "Power state change to sleep"}, 1018 - {0x5E47, "Power state change to device control"}, 1019 - 1020 - {0x6000, "Lamp failure"}, 1021 - 1022 - {0x6100, "Video acquisition error"}, 1023 - {0x6101, "Unable to acquire video"}, 1024 - {0x6102, "Out of focus"}, 1025 - 1026 - {0x6200, "Scan head positioning error"}, 1027 - 1028 - {0x6300, "End of user area encountered on this track"}, 1029 - {0x6301, "Packet does not fit in available space"}, 1030 - 1031 - {0x6400, "Illegal mode for this track"}, 1032 - {0x6401, "Invalid packet size"}, 1033 - 1034 - {0x6500, "Voltage fault"}, 1035 - 1036 - {0x6600, "Automatic document feeder cover up"}, 1037 - {0x6601, "Automatic document feeder lift up"}, 1038 - {0x6602, "Document jam in automatic document feeder"}, 1039 - {0x6603, "Document miss feed automatic in document feeder"}, 1040 - 1041 - {0x6700, "Configuration failure"}, 1042 - {0x6701, "Configuration of incapable logical units failed"}, 1043 - {0x6702, "Add logical unit failed"}, 1044 - {0x6703, "Modification of logical unit failed"}, 1045 - {0x6704, "Exchange of logical unit failed"}, 1046 - {0x6705, "Remove of logical unit failed"}, 1047 - {0x6706, "Attachment of logical unit failed"}, 1048 - {0x6707, "Creation of logical unit failed"}, 1049 - {0x6708, "Assign failure occurred"}, 1050 - {0x6709, "Multiply assigned logical unit"}, 1051 - {0x670A, "Set target port groups command failed"}, 1052 - {0x670B, "ATA device feature not enabled"}, 1053 - 1054 - {0x6800, "Logical unit not configured"}, 1055 - {0x6801, "Subsidiary logical unit not configured"}, 1056 - 1057 - {0x6900, "Data loss on logical unit"}, 1058 - {0x6901, "Multiple logical unit failures"}, 1059 - {0x6902, "Parity/data mismatch"}, 1060 - 1061 - {0x6A00, "Informational, refer to log"}, 1062 - 1063 - {0x6B00, "State change has occurred"}, 1064 - {0x6B01, "Redundancy level got better"}, 1065 - {0x6B02, "Redundancy level got worse"}, 1066 - 1067 - {0x6C00, "Rebuild failure occurred"}, 1068 - 1069 - {0x6D00, "Recalculate failure occurred"}, 1070 - 1071 - {0x6E00, "Command to logical unit failed"}, 1072 - 1073 - {0x6F00, "Copy protection key exchange failure - authentication " 1074 - "failure"}, 1075 - {0x6F01, "Copy protection key exchange failure - key not present"}, 1076 - {0x6F02, "Copy protection key exchange failure - key not established"}, 1077 - {0x6F03, "Read of scrambled sector without authentication"}, 1078 - {0x6F04, "Media region code is mismatched to logical unit region"}, 1079 - {0x6F05, "Drive region must be permanent/region reset count error"}, 1080 - {0x6F06, "Insufficient block count for binding nonce recording"}, 1081 - {0x6F07, "Conflict in binding nonce recording"}, 1082 - /* 1083 - * {0x70NN, "Decompression exception short algorithm id of nn"}, 1084 - */ 1085 - {0x7100, "Decompression exception long algorithm id"}, 1086 - 1087 - {0x7200, "Session fixation error"}, 1088 - {0x7201, "Session fixation error writing lead-in"}, 1089 - {0x7202, "Session fixation error writing lead-out"}, 1090 - {0x7203, "Session fixation error - incomplete track in session"}, 1091 - {0x7204, "Empty or partially written reserved track"}, 1092 - {0x7205, "No more track reservations allowed"}, 1093 - {0x7206, "RMZ extension is not allowed"}, 1094 - {0x7207, "No more test zone extensions are allowed"}, 1095 - 1096 - {0x7300, "Cd control error"}, 1097 - {0x7301, "Power calibration area almost full"}, 1098 - {0x7302, "Power calibration area is full"}, 1099 - {0x7303, "Power calibration area error"}, 1100 - {0x7304, "Program memory area update failure"}, 1101 - {0x7305, "Program memory area is full"}, 1102 - {0x7306, "RMA/PMA is almost full"}, 1103 - {0x7310, "Current power calibration area almost full"}, 1104 - {0x7311, "Current power calibration area is full"}, 1105 - {0x7317, "RDZ is full"}, 1106 - 1107 - {0x7400, "Security error"}, 1108 - {0x7401, "Unable to decrypt data"}, 1109 - {0x7402, "Unencrypted data encountered while decrypting"}, 1110 - {0x7403, "Incorrect data encryption key"}, 1111 - {0x7404, "Cryptographic integrity validation failed"}, 1112 - {0x7405, "Error decrypting data"}, 1113 - {0x7406, "Unknown signature verification key"}, 1114 - {0x7407, "Encryption parameters not useable"}, 1115 - {0x7408, "Digital signature validation failure"}, 1116 - {0x7409, "Encryption mode mismatch on read"}, 1117 - {0x740A, "Encrypted block not raw read enabled"}, 1118 - {0x740B, "Incorrect Encryption parameters"}, 1119 - {0x740C, "Unable to decrypt parameter list"}, 1120 - {0x740D, "Encryption algorithm disabled"}, 1121 - {0x7410, "SA creation parameter value invalid"}, 1122 - {0x7411, "SA creation parameter value rejected"}, 1123 - {0x7412, "Invalid SA usage"}, 1124 - {0x7421, "Data Encryption configuration prevented"}, 1125 - {0x7430, "SA creation parameter not supported"}, 1126 - {0x7440, "Authentication failed"}, 1127 - {0x7461, "External data encryption key manager access error"}, 1128 - {0x7462, "External data encryption key manager error"}, 1129 - {0x7463, "External data encryption key not found"}, 1130 - {0x7464, "External data encryption request not authorized"}, 1131 - {0x746E, "External data encryption control timeout"}, 1132 - {0x746F, "External data encryption control error"}, 1133 - {0x7471, "Logical unit access not authorized"}, 1134 - {0x7479, "Security conflict in translated device"}, 1135 - 1136 - {0, NULL} 308 + #define SENSE_CODE(c, s) {c, sizeof(s)}, 309 + #include "sense_codes.h" 310 + #undef SENSE_CODE 1137 311 }; 312 + 313 + static const char *additional_text = 314 + #define SENSE_CODE(c, s) s "\0" 315 + #include "sense_codes.h" 316 + #undef SENSE_CODE 317 + ; 1138 318 1139 319 struct error_info2 { 1140 320 unsigned char code1, code2_min, code2_max; ··· 377 1197 { 378 1198 int i; 379 1199 unsigned short code = ((asc << 8) | ascq); 1200 + unsigned offset = 0; 380 1201 381 1202 *fmt = NULL; 382 - for (i = 0; additional[i].text; i++) 1203 + for (i = 0; i < ARRAY_SIZE(additional); i++) { 383 1204 if (additional[i].code12 == code) 384 - return additional[i].text; 1205 + return additional_text + offset; 1206 + offset += additional[i].size; 1207 + } 385 1208 for (i = 0; additional2[i].fmt; i++) { 386 1209 if (additional2[i].code1 == asc && 387 1210 ascq >= additional2[i].code2_min &&
+15
drivers/scsi/cxlflash/superpipe.c
··· 1615 1615 * place at the same time and the failure was due to CXL services being 1616 1616 * unable to keep up. 1617 1617 * 1618 + * As this routine is called on ioctl context, it holds the ioctl r/w 1619 + * semaphore that is used to drain ioctls in recovery scenarios. The 1620 + * implementation to achieve the pacing described above (a local mutex) 1621 + * requires that the ioctl r/w semaphore be dropped and reacquired to 1622 + * avoid a 3-way deadlock when multiple process recoveries operate in 1623 + * parallel. 1624 + * 1618 1625 * Because a user can detect an error condition before the kernel, it is 1619 1626 * quite possible for this routine to act as the kernel's EEH detection 1620 1627 * source (MMIO read of mbox_r). Because of this, there is a window of ··· 1649 1642 int rc = 0; 1650 1643 1651 1644 atomic_inc(&cfg->recovery_threads); 1645 + up_read(&cfg->ioctl_rwsem); 1652 1646 rc = mutex_lock_interruptible(mutex); 1647 + down_read(&cfg->ioctl_rwsem); 1653 1648 if (rc) 1654 1649 goto out; 1650 + rc = check_state(cfg); 1651 + if (rc) { 1652 + dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc); 1653 + rc = -ENODEV; 1654 + goto out; 1655 + } 1655 1656 1656 1657 dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n", 1657 1658 __func__, recover->reason, rctxid);
+21 -12
drivers/scsi/device_handler/scsi_dh_alua.c
··· 190 190 ALUA_FAILOVER_RETRIES, NULL, req_flags); 191 191 } 192 192 193 - struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, 194 - int group_id) 193 + static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, 194 + int group_id) 195 195 { 196 196 struct alua_port_group *pg; 197 + 198 + if (!id_str || !id_size || !strlen(id_str)) 199 + return NULL; 197 200 198 201 list_for_each_entry(pg, &port_group_list, node) { 199 202 if (pg->group_id != group_id) 200 203 continue; 201 - if (pg->device_id_len != id_size) 204 + if (!pg->device_id_len || pg->device_id_len != id_size) 202 205 continue; 203 206 if (strncmp(pg->device_id_str, id_str, id_size)) 204 207 continue; ··· 222 219 * Allocate a new port_group structure for a given 223 220 * device. 224 221 */ 225 - struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, 226 - int group_id, int tpgs) 222 + static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, 223 + int group_id, int tpgs) 227 224 { 228 225 struct alua_port_group *pg, *tmp_pg; 229 226 ··· 235 232 sizeof(pg->device_id_str)); 236 233 if (pg->device_id_len <= 0) { 237 234 /* 238 - * Internal error: TPGS supported but no device 239 - * identifcation found. Disable ALUA support. 235 + * TPGS supported but no device identification found. 236 + * Generate private device identification. 240 237 */ 241 - kfree(pg); 242 238 sdev_printk(KERN_INFO, sdev, 243 239 "%s: No device descriptors found\n", 244 240 ALUA_DH_NAME); 245 - return ERR_PTR(-ENXIO); 241 + pg->device_id_str[0] = '\0'; 242 + pg->device_id_len = 0; 246 243 } 247 244 pg->group_id = group_id; 248 245 pg->tpgs = tpgs; ··· 357 354 return SCSI_DH_NOMEM; 358 355 return SCSI_DH_DEV_UNSUPP; 359 356 } 360 - sdev_printk(KERN_INFO, sdev, 361 - "%s: device %s port group %x rel port %x\n", 362 - ALUA_DH_NAME, pg->device_id_str, group_id, rel_port); 357 + if (pg->device_id_len) 358 + sdev_printk(KERN_INFO, sdev, 359 + "%s: device %s port group %x rel port %x\n", 360 + ALUA_DH_NAME, pg->device_id_str, 361 + group_id, rel_port); 362 + else 363 + sdev_printk(KERN_INFO, sdev, 364 + "%s: port group %x rel port %x\n", 365 + ALUA_DH_NAME, group_id, rel_port); 363 366 364 367 /* Check for existing port group references */ 365 368 spin_lock(&h->pg_lock);
+6 -4
drivers/scsi/dmx3191d.c
··· 34 34 * Definitions for the generic 5380 driver. 35 35 */ 36 36 37 - #define DONT_USE_INTR 38 - 39 37 #define NCR5380_read(reg) inb(instance->io_port + reg) 40 38 #define NCR5380_write(reg, value) outb(value, instance->io_port + reg) 39 + 40 + #define NCR5380_dma_xfer_len(instance, cmd, phase) (0) 41 + #define NCR5380_dma_recv_setup(instance, dst, len) (0) 42 + #define NCR5380_dma_send_setup(instance, src, len) (0) 43 + #define NCR5380_dma_residual(instance) (0) 41 44 42 45 #define NCR5380_implementation_fields /* none */ 43 46 ··· 65 62 .cmd_per_lun = 2, 66 63 .use_clustering = DISABLE_CLUSTERING, 67 64 .cmd_size = NCR5380_CMD_SIZE, 68 - .max_sectors = 128, 69 65 }; 70 66 71 67 static int dmx3191d_probe_one(struct pci_dev *pdev, ··· 95 93 */ 96 94 shost->irq = NO_IRQ; 97 95 98 - error = NCR5380_init(shost, FLAG_NO_PSEUDO_DMA); 96 + error = NCR5380_init(shost, 0); 99 97 if (error) 100 98 goto out_host_put; 101 99
+8 -19
drivers/scsi/dtc.c
··· 1 - #define PSEUDO_DMA 2 - #define DONT_USE_INTR 3 - 4 1 /* 5 2 * DTC 3180/3280 driver, by 6 3 * Ray Van Tassle rayvt@comm.mot.com ··· 51 54 #include <scsi/scsi_host.h> 52 55 53 56 #include "dtc.h" 54 - #define AUTOPROBE_IRQ 55 57 #include "NCR5380.h" 56 58 57 59 /* ··· 225 229 instance->base = addr; 226 230 ((struct NCR5380_hostdata *)(instance)->hostdata)->base = base; 227 231 228 - if (NCR5380_init(instance, FLAG_NO_DMA_FIXUP)) 232 + if (NCR5380_init(instance, FLAG_LATE_DMA_SETUP)) 229 233 goto out_unregister; 230 234 231 235 NCR5380_maybe_reset_bus(instance); ··· 240 244 if (instance->irq == 255) 241 245 instance->irq = NO_IRQ; 242 246 243 - #ifndef DONT_USE_INTR 244 247 /* With interrupts enabled, it will sometimes hang when doing heavy 245 248 * reads. So better not enable them until I finger it out. */ 249 + instance->irq = NO_IRQ; 250 + 246 251 if (instance->irq != NO_IRQ) 247 252 if (request_irq(instance->irq, dtc_intr, 0, 248 253 "dtc", instance)) { ··· 255 258 printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); 256 259 printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); 257 260 } 258 - #else 259 - if (instance->irq != NO_IRQ) 260 - printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no); 261 - instance->irq = NO_IRQ; 262 - #endif 261 + 263 262 dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n", 264 263 instance->host_no, instance->irq); 265 264 ··· 316 323 * timeout. 317 324 */ 318 325 319 - static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) 326 + static inline int dtc_pread(struct Scsi_Host *instance, 327 + unsigned char *dst, int len) 320 328 { 321 329 unsigned char *d = dst; 322 330 int i; /* For counting time spent in the poll-loop */ ··· 346 352 while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS)) 347 353 ++i; 348 354 rtrc(0); 349 - if (i > hostdata->spin_max_r) 350 - hostdata->spin_max_r = i; 351 355 return (0); 352 356 } 353 357 ··· 362 370 * timeout. 363 371 */ 364 372 365 - static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) 373 + static inline int dtc_pwrite(struct Scsi_Host *instance, 374 + unsigned char *src, int len) 366 375 { 367 376 int i; 368 377 struct NCR5380_hostdata *hostdata = shost_priv(instance); ··· 393 400 rtrc(7); 394 401 /* Check for parity error here. fixme. */ 395 402 rtrc(0); 396 - if (i > hostdata->spin_max_w) 397 - hostdata->spin_max_w = i; 398 403 return (0); 399 404 } 400 405 ··· 431 440 .detect = dtc_detect, 432 441 .release = dtc_release, 433 442 .proc_name = "dtc3x80", 434 - .show_info = dtc_show_info, 435 - .write_info = dtc_write_info, 436 443 .info = dtc_info, 437 444 .queuecommand = dtc_queue_command, 438 445 .eh_abort_handler = dtc_abort,
+5 -2
drivers/scsi/dtc.h
··· 21 21 22 22 #define NCR5380_dma_xfer_len(instance, cmd, phase) \ 23 23 dtc_dma_xfer_len(cmd) 24 + #define NCR5380_dma_recv_setup dtc_pread 25 + #define NCR5380_dma_send_setup dtc_pwrite 26 + #define NCR5380_dma_residual(instance) (0) 24 27 25 28 #define NCR5380_intr dtc_intr 26 29 #define NCR5380_queue_command dtc_queue_command 27 30 #define NCR5380_abort dtc_abort 28 31 #define NCR5380_bus_reset dtc_bus_reset 29 32 #define NCR5380_info dtc_info 30 - #define NCR5380_show_info dtc_show_info 31 - #define NCR5380_write_info dtc_write_info 33 + 34 + #define NCR5380_io_delay(x) udelay(x) 32 35 33 36 /* 15 12 11 10 34 37 1001 1100 0000 0000 */
+1
drivers/scsi/eata_pio.c
··· 729 729 break; 730 730 case 0x24: 731 731 SD(sh)->EATA_revision = 'z'; 732 + break; 732 733 default: 733 734 SD(sh)->EATA_revision = '?'; 734 735 }
+2 -2
drivers/scsi/esas2r/esas2r_main.c
··· 246 246 .eh_target_reset_handler = esas2r_target_reset, 247 247 .can_queue = 128, 248 248 .this_id = -1, 249 - .sg_tablesize = SCSI_MAX_SG_SEGMENTS, 249 + .sg_tablesize = SG_CHUNK_SIZE, 250 250 .cmd_per_lun = 251 251 ESAS2R_DEFAULT_CMD_PER_LUN, 252 252 .present = 0, ··· 271 271 MODULE_PARM_DESC(num_sg_lists, 272 272 "Number of scatter/gather lists. Default 1024."); 273 273 274 - int sg_tablesize = SCSI_MAX_SG_SEGMENTS; 274 + int sg_tablesize = SG_CHUNK_SIZE; 275 275 module_param(sg_tablesize, int, 0); 276 276 MODULE_PARM_DESC(sg_tablesize, 277 277 "Maximum number of entries in a scatter/gather table.");
+1 -1
drivers/scsi/fnic/fnic.h
··· 39 39 40 40 #define DRV_NAME "fnic" 41 41 #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 42 - #define DRV_VERSION "1.6.0.17a" 42 + #define DRV_VERSION "1.6.0.21" 43 43 #define PFX DRV_NAME ": " 44 44 #define DFX DRV_NAME "%d: " 45 45
+66 -25
drivers/scsi/fnic/fnic_scsi.c
··· 439 439 int sg_count = 0; 440 440 unsigned long flags = 0; 441 441 unsigned long ptr; 442 - struct fc_rport_priv *rdata; 443 442 spinlock_t *io_lock = NULL; 444 443 int io_lock_acquired = 0; 445 444 ··· 454 455 return 0; 455 456 } 456 457 457 - rdata = lp->tt.rport_lookup(lp, rport->port_id); 458 - if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) { 459 - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 460 - "returning IO as rport is removed\n"); 461 - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); 462 - sc->result = DID_NO_CONNECT; 463 - done(sc); 464 - return 0; 458 + if (rport) { 459 + struct fc_rport_libfc_priv *rp = rport->dd_data; 460 + 461 + if (!rp || rp->rp_state != RPORT_ST_READY) { 462 + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 463 + "returning DID_NO_CONNECT for IO as rport is removed\n"); 464 + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); 465 + sc->result = DID_NO_CONNECT<<16; 466 + done(sc); 467 + return 0; 468 + } 465 469 } 466 470 467 471 if (lp->state != LPORT_ST_READY || !(lp->link_up)) ··· 1093 1091 atomic64_inc( 1094 1092 &term_stats->terminate_fw_timeouts); 1095 1093 break; 1094 + case FCPIO_ITMF_REJECTED: 1095 + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 1096 + "abort reject recd. id %d\n", 1097 + (int)(id & FNIC_TAG_MASK)); 1098 + break; 1096 1099 case FCPIO_IO_NOT_FOUND: 1097 1100 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) 1098 1101 atomic64_inc(&abts_stats->abort_io_not_found); ··· 1118 1111 spin_unlock_irqrestore(io_lock, flags); 1119 1112 return; 1120 1113 } 1121 - CMD_ABTS_STATUS(sc) = hdr_status; 1114 + 1122 1115 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; 1116 + 1117 + /* If the status is IO not found consider it as success */ 1118 + if (hdr_status == FCPIO_IO_NOT_FOUND) 1119 + CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS; 1120 + else 1121 + CMD_ABTS_STATUS(sc) = hdr_status; 1123 1122 1124 1123 atomic64_dec(&fnic_stats->io_stats.active_ios); 1125 1124 if (atomic64_read(&fnic->io_cmpl_skip)) ··· 1939 1926 1940 1927 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 1941 1928 1929 + start_time = io_req->start_time; 1942 1930 /* 1943 1931 * firmware completed the abort, check the status, 1944 - * free the io_req irrespective of failure or success 1932 + * free the io_req if successful. If abort fails, 1933 + * Device reset will clean the I/O. 1945 1934 */ 1946 - if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS) 1935 + if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS) 1936 + CMD_SP(sc) = NULL; 1937 + else { 1947 1938 ret = FAILED; 1948 - 1949 - CMD_SP(sc) = NULL; 1939 + spin_unlock_irqrestore(io_lock, flags); 1940 + goto fnic_abort_cmd_end; 1941 + } 1950 1942 1951 1943 spin_unlock_irqrestore(io_lock, flags); 1952 1944 1953 - start_time = io_req->start_time; 1954 1945 fnic_release_ioreq_buf(fnic, io_req, sc); 1955 1946 mempool_free(io_req, fnic->io_req_pool); 1947 + 1948 + if (sc->scsi_done) { 1949 + /* Call SCSI completion function to complete the IO */ 1950 + sc->result = (DID_ABORT << 16); 1951 + sc->scsi_done(sc); 1952 + } 1956 1953 1957 1954 fnic_abort_cmd_end: 1958 1955 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, ··· 2041 2018 * successfully aborted, 1 otherwise 2042 2019 */ 2043 2020 static int fnic_clean_pending_aborts(struct fnic *fnic, 2044 - struct scsi_cmnd *lr_sc) 2021 + struct scsi_cmnd *lr_sc, 2022 + bool new_sc) 2023 + 2045 2024 { 2046 2025 int tag, abt_tag; 2047 2026 struct fnic_io_req *io_req; ··· 2061 2036 spin_lock_irqsave(io_lock, flags); 2062 2037 sc = scsi_host_find_tag(fnic->lport->host, tag); 2063 2038 /* 2064 - * ignore this lun reset cmd or cmds that do not belong to 2065 - * this lun 2039 + * ignore this lun reset cmd if issued using new SC 2040 + * or cmds that do not belong to this lun 2066 2041 */ 2067 - if (!sc || sc == lr_sc || sc->device != lun_dev) { 2042 + if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) { 2068 2043 spin_unlock_irqrestore(io_lock, flags); 2069 2044 continue; 2070 2045 } ··· 2170 2145 goto clean_pending_aborts_end; 2171 2146 } 2172 2147 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 2173 - CMD_SP(sc) = NULL; 2148 + 2149 + /* original sc used for lr is handled by dev reset code */ 2150 + if (sc != lr_sc) 2151 + CMD_SP(sc) = NULL; 2174 2152 spin_unlock_irqrestore(io_lock, flags); 2175 2153 2176 - fnic_release_ioreq_buf(fnic, io_req, sc); 2177 - mempool_free(io_req, fnic->io_req_pool); 2154 + /* original sc used for lr is handled by dev reset code */ 2155 + if (sc != lr_sc) { 2156 + fnic_release_ioreq_buf(fnic, io_req, sc); 2157 + mempool_free(io_req, fnic->io_req_pool); 2158 + } 2159 + 2160 + /* 2161 + * Any IO is returned during reset, it needs to call scsi_done 2162 + * to return the scsi_cmnd to upper layer. 2163 + */ 2164 + if (sc->scsi_done) { 2165 + /* Set result to let upper SCSI layer retry */ 2166 + sc->result = DID_RESET << 16; 2167 + sc->scsi_done(sc); 2168 + } 2178 2169 } 2179 2170 2180 2171 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); ··· 2284 2243 int tag = 0; 2285 2244 DECLARE_COMPLETION_ONSTACK(tm_done); 2286 2245 int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ 2246 + bool new_sc = 0; 2287 2247 2288 2248 /* Wait for rport to unblock */ 2289 2249 fc_block_scsi_eh(sc); ··· 2330 2288 * fix the way the EH ioctls work for real, but until 2331 2289 * that happens we fail these explicit requests here. 2332 2290 */ 2333 - if (shost_use_blk_mq(sc->device->host)) 2334 - goto fnic_device_reset_end; 2335 2291 2336 2292 tag = fnic_scsi_host_start_tag(fnic, sc); 2337 2293 if (unlikely(tag == SCSI_NO_TAG)) 2338 2294 goto fnic_device_reset_end; 2339 2295 tag_gen_flag = 1; 2296 + new_sc = 1; 2340 2297 } 2341 2298 io_lock = fnic_io_lock_hash(fnic, sc); 2342 2299 spin_lock_irqsave(io_lock, flags); ··· 2470 2429 * the lun reset cmd. If all cmds get cleaned, the lun reset 2471 2430 * succeeds 2472 2431 */ 2473 - if (fnic_clean_pending_aborts(fnic, sc)) { 2432 + if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { 2474 2433 spin_lock_irqsave(io_lock, flags); 2475 2434 io_req = (struct fnic_io_req *)CMD_SP(sc); 2476 2435 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+38 -103
drivers/scsi/g_NCR5380.c
··· 18 18 * 19 19 * Added ISAPNP support for DTC436 adapters, 20 20 * Thomas Sailer, sailer@ife.ee.ethz.ch 21 - */ 22 - 23 - /* 24 - * TODO : flesh out DMA support, find some one actually using this (I have 25 - * a memory mapped Trantor board that works fine) 26 - */ 27 - 28 - /* 29 - * The card is detected and initialized in one of several ways : 30 - * 1. With command line overrides - NCR5380=port,irq may be 31 - * used on the LILO command line to override the defaults. 32 21 * 33 - * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is 34 - * specified as an array of address, irq, dma, board tuples. Ie, for 35 - * one board at 0x350, IRQ5, no dma, I could say 36 - * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}} 37 - * 38 - * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an 39 - * IRQ line if overridden on the command line. 40 - * 41 - * 3. When included as a module, with arguments passed on the command line: 42 - * ncr_irq=xx the interrupt 43 - * ncr_addr=xx the port or base address (for port or memory 44 - * mapped, resp.) 45 - * ncr_dma=xx the DMA 46 - * ncr_5380=1 to set up for a NCR5380 board 47 - * ncr_53c400=1 to set up for a NCR53C400 board 48 - * e.g. 49 - * modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 50 - * for a port mapped NCR5380 board or 51 - * modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1 52 - * for a memory mapped NCR53C400 board with interrupts disabled. 53 - * 54 - * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an 55 - * IRQ line if overridden on the command line. 56 - * 22 + * See Documentation/scsi/g_NCR5380.txt for more info. 57 23 */ 58 - 59 - #define AUTOPROBE_IRQ 60 - 61 - #ifdef CONFIG_SCSI_GENERIC_NCR53C400 62 - #define PSEUDO_DMA 63 - #endif 64 24 65 25 #include <asm/io.h> 66 26 #include <linux/blkdev.h> ··· 230 270 #ifndef SCSI_G_NCR5380_MEM 231 271 int i; 232 272 int port_idx = -1; 233 - unsigned long region_size = 16; 273 + unsigned long region_size; 234 274 #endif 235 275 static unsigned int __initdata ncr_53c400a_ports[] = { 236 276 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 ··· 250 290 #ifdef SCSI_G_NCR5380_MEM 251 291 unsigned long base; 252 292 void __iomem *iomem; 293 + resource_size_t iomem_size; 253 294 #endif 254 295 255 296 if (ncr_irq) ··· 311 350 flags = 0; 312 351 switch (overrides[current_override].board) { 313 352 case BOARD_NCR5380: 314 - flags = FLAG_NO_PSEUDO_DMA; 315 - break; 316 - case BOARD_NCR53C400: 317 - #ifdef PSEUDO_DMA 318 - flags = FLAG_NO_DMA_FIXUP; 319 - #endif 353 + flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; 320 354 break; 321 355 case BOARD_NCR53C400A: 322 - flags = FLAG_NO_DMA_FIXUP; 323 356 ports = ncr_53c400a_ports; 324 357 magic = ncr_53c400a_magic; 325 358 break; 326 359 case BOARD_HP_C2502: 327 - flags = FLAG_NO_DMA_FIXUP; 328 360 ports = ncr_53c400a_ports; 329 361 magic = hp_c2502_magic; 330 362 break; 331 363 case BOARD_DTC3181E: 332 - flags = FLAG_NO_DMA_FIXUP; 333 364 ports = dtc_3181e_ports; 334 365 magic = ncr_53c400a_magic; 335 366 break; ··· 334 381 /* Disable the adapter and look for a free io port */ 335 382 magic_configure(-1, 0, magic); 336 383 384 + region_size = 16; 385 + 337 386 if (overrides[current_override].NCR5380_map_name != PORT_AUTO) 338 387 for (i = 0; ports[i]; i++) { 339 - if (!request_region(ports[i], 16, "ncr53c80")) 388 + if (!request_region(ports[i], region_size, "ncr53c80")) 340 389 continue; 341 390 if (overrides[current_override].NCR5380_map_name == ports[i]) 342 391 break; 343 - release_region(ports[i], 16); 392 + release_region(ports[i], region_size); 344 393 } else 345 394 for (i = 0; ports[i]; i++) { 346 - if (!request_region(ports[i], 16, "ncr53c80")) 395 + if (!request_region(ports[i], region_size, "ncr53c80")) 347 396 continue; 348 397 if (inb(ports[i]) == 0xff) 349 398 break; 350 - release_region(ports[i], 16); 399 + release_region(ports[i], region_size); 351 400 } 352 401 if (ports[i]) { 353 402 /* At this point we have our region reserved */ ··· 365 410 else 366 411 { 367 412 /* Not a 53C400A style setup - just grab */ 368 - if(!(request_region(overrides[current_override].NCR5380_map_name, NCR5380_region_size, "ncr5380"))) 413 + region_size = 8; 414 + if (!request_region(overrides[current_override].NCR5380_map_name, 415 + region_size, "ncr5380")) 369 416 continue; 370 - region_size = NCR5380_region_size; 371 417 } 372 418 #else 373 419 base = overrides[current_override].NCR5380_map_name; 374 - if (!request_mem_region(base, NCR5380_region_size, "ncr5380")) 420 + iomem_size = NCR53C400_region_size; 421 + if (!request_mem_region(base, iomem_size, "ncr5380")) 375 422 continue; 376 - iomem = ioremap(base, NCR5380_region_size); 423 + iomem = ioremap(base, iomem_size); 377 424 if (!iomem) { 378 - release_mem_region(base, NCR5380_region_size); 425 + release_mem_region(base, iomem_size); 379 426 continue; 380 427 } 381 428 #endif ··· 415 458 #else 416 459 instance->base = overrides[current_override].NCR5380_map_name; 417 460 hostdata->iomem = iomem; 461 + hostdata->iomem_size = iomem_size; 418 462 switch (overrides[current_override].board) { 419 463 case BOARD_NCR53C400: 420 464 hostdata->c400_ctl_status = 0x100; ··· 430 472 } 431 473 #endif 432 474 433 - if (NCR5380_init(instance, flags)) 475 + if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP)) 434 476 goto out_unregister; 435 477 436 478 switch (overrides[current_override].board) { ··· 482 524 release_region(overrides[current_override].NCR5380_map_name, region_size); 483 525 #else 484 526 iounmap(iomem); 485 - release_mem_region(base, NCR5380_region_size); 527 + release_mem_region(base, iomem_size); 486 528 #endif 487 529 return count; 488 530 } ··· 504 546 #ifndef SCSI_G_NCR5380_MEM 505 547 release_region(instance->io_port, instance->n_io_port); 506 548 #else 507 - iounmap(((struct NCR5380_hostdata *)instance->hostdata)->iomem); 508 - release_mem_region(instance->base, NCR5380_region_size); 549 + { 550 + struct NCR5380_hostdata *hostdata = shost_priv(instance); 551 + 552 + iounmap(hostdata->iomem); 553 + release_mem_region(instance->base, hostdata->iomem_size); 554 + } 509 555 #endif 510 556 return 0; 511 557 } 512 558 513 - #ifdef BIOSPARAM 514 559 /** 515 - * generic_NCR5380_biosparam 516 - * @disk: disk to compute geometry for 517 - * @dev: device identifier for this disk 518 - * @ip: sizes to fill in 519 - * 520 - * Generates a BIOS / DOS compatible H-C-S mapping for the specified 521 - * device / size. 522 - * 523 - * XXX Most SCSI boards use this mapping, I could be incorrect. Someone 524 - * using hard disks on a trantor should verify that this mapping 525 - * corresponds to that used by the BIOS / ASPI driver by running the linux 526 - * fdisk program and matching the H_C_S coordinates to what DOS uses. 527 - * 528 - * Locks: none 529 - */ 530 - 531 - static int 532 - generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev, 533 - sector_t capacity, int *ip) 534 - { 535 - ip[0] = 64; 536 - ip[1] = 32; 537 - ip[2] = capacity >> 11; 538 - return 0; 539 - } 540 - #endif 541 - 542 - #ifdef PSEUDO_DMA 543 - 544 - /** 545 - * NCR5380_pread - pseudo DMA read 560 + * generic_NCR5380_pread - pseudo DMA read 546 561 * @instance: adapter to read from 547 562 * @dst: buffer to read into 548 563 * @len: buffer length ··· 524 593 * controller 525 594 */ 526 595 527 - static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) 596 + static inline int generic_NCR5380_pread(struct Scsi_Host *instance, 597 + unsigned char *dst, int len) 528 598 { 529 599 struct NCR5380_hostdata *hostdata = shost_priv(instance); 530 600 int blocks = len / 128; ··· 593 661 } 594 662 595 663 /** 596 - * NCR5380_write - pseudo DMA write 664 + * generic_NCR5380_pwrite - pseudo DMA write 597 665 * @instance: adapter to read from 598 666 * @dst: buffer to read into 599 667 * @len: buffer length ··· 602 670 * controller 603 671 */ 604 672 605 - static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) 673 + static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance, 674 + unsigned char *src, int len) 606 675 { 607 676 struct NCR5380_hostdata *hostdata = shost_priv(instance); 608 677 int blocks = len / 128; ··· 671 738 return 0; 672 739 } 673 740 674 - static int generic_NCR5380_dma_xfer_len(struct scsi_cmnd *cmd) 741 + static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance, 742 + struct scsi_cmnd *cmd) 675 743 { 744 + struct NCR5380_hostdata *hostdata = shost_priv(instance); 676 745 int transfersize = cmd->transfersize; 746 + 747 + if (hostdata->flags & FLAG_NO_PSEUDO_DMA) 748 + return 0; 677 749 678 750 /* Limit transfers to 32K, for xx400 & xx406 679 751 * pseudoDMA that transfers in 128 bytes blocks. ··· 693 755 694 756 return transfersize; 695 757 } 696 - 697 - #endif /* PSEUDO_DMA */ 698 758 699 759 /* 700 760 * Include the NCR5380 core code that we build our driver around ··· 709 773 .queuecommand = generic_NCR5380_queue_command, 710 774 .eh_abort_handler = generic_NCR5380_abort, 711 775 .eh_bus_reset_handler = generic_NCR5380_bus_reset, 712 - .bios_param = NCR5380_BIOSPARAM, 713 776 .can_queue = 16, 714 777 .this_id = 7, 715 778 .sg_tablesize = SG_ALL,
+8 -18
drivers/scsi/g_NCR5380.h
··· 14 14 #ifndef GENERIC_NCR5380_H 15 15 #define GENERIC_NCR5380_H 16 16 17 - #ifdef CONFIG_SCSI_GENERIC_NCR53C400 18 - #define BIOSPARAM 19 - #define NCR5380_BIOSPARAM generic_NCR5380_biosparam 20 - #else 21 - #define NCR5380_BIOSPARAM NULL 22 - #endif 23 - 24 17 #define __STRVAL(x) #x 25 18 #define STRVAL(x) __STRVAL(x) 26 19 ··· 22 29 23 30 #define NCR5380_map_type int 24 31 #define NCR5380_map_name port 25 - 26 - #ifdef CONFIG_SCSI_GENERIC_NCR53C400 27 - #define NCR5380_region_size 16 28 - #else 29 - #define NCR5380_region_size 8 30 - #endif 31 32 32 33 #define NCR5380_read(reg) \ 33 34 inb(instance->io_port + (reg)) ··· 42 55 #define NCR5380_map_name base 43 56 #define NCR53C400_mem_base 0x3880 44 57 #define NCR53C400_host_buffer 0x3900 45 - #define NCR5380_region_size 0x3a00 58 + #define NCR53C400_region_size 0x3a00 46 59 47 60 #define NCR5380_read(reg) \ 48 61 readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \ ··· 53 66 54 67 #define NCR5380_implementation_fields \ 55 68 void __iomem *iomem; \ 69 + resource_size_t iomem_size; \ 56 70 int c400_ctl_status; \ 57 71 int c400_blk_cnt; \ 58 72 int c400_host_buf; ··· 61 73 #endif 62 74 63 75 #define NCR5380_dma_xfer_len(instance, cmd, phase) \ 64 - generic_NCR5380_dma_xfer_len(cmd) 76 + generic_NCR5380_dma_xfer_len(instance, cmd) 77 + #define NCR5380_dma_recv_setup generic_NCR5380_pread 78 + #define NCR5380_dma_send_setup generic_NCR5380_pwrite 79 + #define NCR5380_dma_residual(instance) (0) 65 80 66 81 #define NCR5380_intr generic_NCR5380_intr 67 82 #define NCR5380_queue_command generic_NCR5380_queue_command 68 83 #define NCR5380_abort generic_NCR5380_abort 69 84 #define NCR5380_bus_reset generic_NCR5380_bus_reset 70 - #define NCR5380_pread generic_NCR5380_pread 71 - #define NCR5380_pwrite generic_NCR5380_pwrite 72 85 #define NCR5380_info generic_NCR5380_info 73 - #define NCR5380_show_info generic_NCR5380_show_info 86 + 87 + #define NCR5380_io_delay(x) udelay(x) 74 88 75 89 #define BOARD_NCR5380 0 76 90 #define BOARD_NCR53C400 1
+5 -2
drivers/scsi/hisi_sas/hisi_sas.h
··· 23 23 #include <scsi/sas_ata.h> 24 24 #include <scsi/libsas.h> 25 25 26 - #define DRV_VERSION "v1.3" 26 + #define DRV_VERSION "v1.4" 27 27 28 28 #define HISI_SAS_MAX_PHYS 9 29 29 #define HISI_SAS_MAX_QUEUES 32 ··· 133 133 int (*hw_init)(struct hisi_hba *hisi_hba); 134 134 void (*setup_itct)(struct hisi_hba *hisi_hba, 135 135 struct hisi_sas_device *device); 136 + int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx, 137 + struct domain_device *device); 138 + struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); 136 139 void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); 137 140 int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s); 138 141 void (*start_delivery)(struct hisi_hba *hisi_hba); ··· 301 298 u8 atapi_cdb[ATAPI_CDB_LEN]; 302 299 }; 303 300 304 - #define HISI_SAS_SGE_PAGE_CNT SCSI_MAX_SG_SEGMENTS 301 + #define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE 305 302 struct hisi_sas_sge_page { 306 303 struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT]; 307 304 };
+9 -2
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 227 227 } else 228 228 n_elem = task->num_scatter; 229 229 230 - rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 230 + if (hisi_hba->hw->slot_index_alloc) 231 + rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, 232 + device); 233 + else 234 + rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 231 235 if (rc) 232 236 goto err_out; 233 237 rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue, ··· 421 417 struct hisi_sas_device *sas_dev; 422 418 struct device *dev = &hisi_hba->pdev->dev; 423 419 424 - sas_dev = hisi_sas_alloc_dev(device); 420 + if (hisi_hba->hw->alloc_dev) 421 + sas_dev = hisi_hba->hw->alloc_dev(device); 422 + else 423 + sas_dev = hisi_sas_alloc_dev(device); 425 424 if (!sas_dev) { 426 425 dev_err(dev, "fail alloc dev: max support %d devices\n", 427 426 HISI_SAS_MAX_DEVICES);
+75 -15
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
··· 465 465 return readl(regs); 466 466 } 467 467 468 + /* This function needs to be protected from pre-emption. */ 469 + static int 470 + slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, 471 + struct domain_device *device) 472 + { 473 + unsigned int index = 0; 474 + void *bitmap = hisi_hba->slot_index_tags; 475 + int sata_dev = dev_is_sata(device); 476 + 477 + while (1) { 478 + index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 479 + index); 480 + if (index >= hisi_hba->slot_index_count) 481 + return -SAS_QUEUE_FULL; 482 + /* 483 + * SAS IPTT bit0 should be 1 484 + */ 485 + if (sata_dev || (index & 1)) 486 + break; 487 + index++; 488 + } 489 + 490 + set_bit(index, bitmap); 491 + *slot_idx = index; 492 + return 0; 493 + } 494 + 495 + static struct 496 + hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) 497 + { 498 + struct hisi_hba *hisi_hba = device->port->ha->lldd_ha; 499 + struct hisi_sas_device *sas_dev = NULL; 500 + int i, sata_dev = dev_is_sata(device); 501 + 502 + spin_lock(&hisi_hba->lock); 503 + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 504 + /* 505 + * SATA device id bit0 should be 0 506 + */ 507 + if (sata_dev && (i & 1)) 508 + continue; 509 + if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 510 + hisi_hba->devices[i].device_id = i; 511 + sas_dev = &hisi_hba->devices[i]; 512 + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 513 + sas_dev->dev_type = device->dev_type; 514 + sas_dev->hisi_hba = hisi_hba; 515 + sas_dev->sas_device = device; 516 + break; 517 + } 518 + } 519 + spin_unlock(&hisi_hba->lock); 520 + 521 + return sas_dev; 522 + } 523 + 468 524 static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 469 525 { 470 526 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); ··· 600 544 } 601 545 602 546 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 603 - (device->max_linkrate << ITCT_HDR_MCR_OFF) | 547 + (device->linkrate << ITCT_HDR_MCR_OFF) | 604 548 (1 << ITCT_HDR_VLN_OFF) | 605 549 (port->id << ITCT_HDR_PORT_ID_OFF)); 606 550 itct->qw0 = cpu_to_le64(qw0); ··· 610 554 itct->sas_addr = __swab64(itct->sas_addr); 611 555 612 556 /* qw2 */ 613 - itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | 614 - (0xff00ULL << ITCT_HDR_BITLT_OFF) | 615 - (0xff00ULL << ITCT_HDR_MCTLT_OFF) | 616 - (0xff00ULL << ITCT_HDR_RTOLT_OFF)); 557 + if (!dev_is_sata(device)) 558 + itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | 559 + (0x1ULL << ITCT_HDR_BITLT_OFF) | 560 + (0x32ULL << ITCT_HDR_MCTLT_OFF) | 561 + (0x1ULL << ITCT_HDR_RTOLT_OFF)); 617 562 } 618 563 619 564 static void free_device_v2_hw(struct hisi_hba *hisi_hba, ··· 772 715 hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); 773 716 hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); 774 717 hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); 775 - hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x4E20); 718 + hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32); 776 719 hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); 777 720 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); 778 721 hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); ··· 2050 1993 u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; 2051 1994 irqreturn_t res = IRQ_HANDLED; 2052 1995 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 2053 - int phy_no; 1996 + int phy_no, offset; 2054 1997 2055 1998 phy_no = sas_phy->id; 2056 1999 initial_fis = &hisi_hba->initial_fis[phy_no]; 2057 2000 fis = &initial_fis->fis; 2058 2001 2059 - ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1); 2060 - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk | 1 << phy_no); 2002 + offset = 4 * (phy_no / 4); 2003 + ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset); 2004 + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, 2005 + ent_msk | 1 << ((phy_no % 4) * 8)); 2061 2006 2062 - ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1); 2063 - ent_tmp = ent_int; 2007 + ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset); 2008 + ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF * 2009 + (phy_no % 4))); 2064 2010 ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); 2065 2011 if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { 2066 2012 dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); 2067 - hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp); 2068 - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk); 2069 2013 res = IRQ_NONE; 2070 2014 goto end; 2071 2015 } ··· 2114 2056 queue_work(hisi_hba->wq, &phy->phyup_ws); 2115 2057 2116 2058 end: 2117 - hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp); 2118 - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk); 2059 + hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); 2060 + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); 2119 2061 2120 2062 return res; 2121 2063 } ··· 2223 2165 static const struct hisi_sas_hw hisi_sas_v2_hw = { 2224 2166 .hw_init = hisi_sas_v2_init, 2225 2167 .setup_itct = setup_itct_v2_hw, 2168 + .slot_index_alloc = slot_index_alloc_quirk_v2_hw, 2169 + .alloc_dev = alloc_dev_quirk_v2_hw, 2226 2170 .sl_notify = sl_notify_v2_hw, 2227 2171 .get_wideport_bitmap = get_wideport_bitmap_v2_hw, 2228 2172 .free_device = free_device_v2_hw,
+149 -38
drivers/scsi/hpsa.c
··· 60 60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' 61 61 * with an optional trailing '-' followed by a byte value (0-255). 62 62 */ 63 - #define HPSA_DRIVER_VERSION "3.4.14-0" 63 + #define HPSA_DRIVER_VERSION "3.4.16-0" 64 64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 65 65 #define HPSA "hpsa" 66 66 ··· 294 294 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 295 295 struct ReportExtendedLUNdata *buf, int bufsize); 296 296 static int hpsa_luns_changed(struct ctlr_info *h); 297 + static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, 298 + struct hpsa_scsi_dev_t *dev, 299 + unsigned char *scsi3addr); 297 300 298 301 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 299 302 { ··· 731 728 sn[12], sn[13], sn[14], sn[15]); 732 729 } 733 730 731 + static ssize_t sas_address_show(struct device *dev, 732 + struct device_attribute *attr, char *buf) 733 + { 734 + struct ctlr_info *h; 735 + struct scsi_device *sdev; 736 + struct hpsa_scsi_dev_t *hdev; 737 + unsigned long flags; 738 + u64 sas_address; 739 + 740 + sdev = to_scsi_device(dev); 741 + h = sdev_to_hba(sdev); 742 + spin_lock_irqsave(&h->lock, flags); 743 + hdev = sdev->hostdata; 744 + if (!hdev || is_logical_device(hdev) || !hdev->expose_device) { 745 + spin_unlock_irqrestore(&h->lock, flags); 746 + return -ENODEV; 747 + } 748 + sas_address = hdev->sas_address; 749 + spin_unlock_irqrestore(&h->lock, flags); 750 + 751 + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address); 752 + } 753 + 734 754 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, 735 755 struct device_attribute *attr, char *buf) 736 756 { ··· 866 840 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 867 841 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 868 842 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 843 + static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL); 869 844 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, 870 845 host_show_hp_ssd_smart_path_enabled, NULL); 871 846 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL); ··· 892 865 &dev_attr_unique_id, 893 866 &dev_attr_hp_ssd_smart_path_enabled, 894 867 &dev_attr_path_info, 868 + &dev_attr_sas_address, 895 869 NULL, 896 870 }; 897 871 ··· 1665 1637 for (j = 0; j < ndevices; j++) { 1666 1638 if (dev[j] == NULL) 1667 1639 continue; 1668 - if (dev[j]->devtype != TYPE_DISK) 1669 - continue; 1670 - if (dev[j]->devtype != TYPE_ZBC) 1640 + if (dev[j]->devtype != TYPE_DISK && 1641 + dev[j]->devtype != TYPE_ZBC) 1671 1642 continue; 1672 1643 if (is_logical_device(dev[j])) 1673 1644 continue; ··· 1711 1684 for (i = 0; i < ndevices; i++) { 1712 1685 if (dev[i] == NULL) 1713 1686 continue; 1714 - if (dev[i]->devtype != TYPE_DISK) 1715 - continue; 1716 - if (dev[i]->devtype != TYPE_ZBC) 1687 + if (dev[i]->devtype != TYPE_DISK && 1688 + dev[i]->devtype != TYPE_ZBC) 1717 1689 continue; 1718 1690 if (!is_logical_device(dev[i])) 1719 1691 continue; ··· 1746 1720 return rc; 1747 1721 } 1748 1722 1723 + static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h, 1724 + struct hpsa_scsi_dev_t *dev) 1725 + { 1726 + int i; 1727 + int count = 0; 1728 + 1729 + for (i = 0; i < h->nr_cmds; i++) { 1730 + struct CommandList *c = h->cmd_pool + i; 1731 + int refcount = atomic_inc_return(&c->refcount); 1732 + 1733 + if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, 1734 + dev->scsi3addr)) { 1735 + unsigned long flags; 1736 + 1737 + spin_lock_irqsave(&h->lock, flags); /* Implied MB */ 1738 + if (!hpsa_is_cmd_idle(c)) 1739 + ++count; 1740 + spin_unlock_irqrestore(&h->lock, flags); 1741 + } 1742 + 1743 + cmd_free(h, c); 1744 + } 1745 + 1746 + return count; 1747 + } 1748 + 1749 + static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, 1750 + struct hpsa_scsi_dev_t *device) 1751 + { 1752 + int cmds = 0; 1753 + int waits = 0; 1754 + 1755 + while (1) { 1756 + cmds = hpsa_find_outstanding_commands_for_dev(h, device); 1757 + if (cmds == 0) 1758 + break; 1759 + if (++waits > 20) 1760 + break; 1761 + dev_warn(&h->pdev->dev, 1762 + "%s: removing device with %d outstanding commands!\n", 1763 + __func__, cmds); 1764 + msleep(1000); 1765 + } 1766 + } 1767 + 1749 1768 static void hpsa_remove_device(struct ctlr_info *h, 1750 1769 struct hpsa_scsi_dev_t *device) 1751 1770 { ··· 1814 1743 hpsa_show_dev_msg(KERN_WARNING, h, device, 1815 1744 "didn't find device for removal."); 1816 1745 } 1817 - } else /* HBA */ 1746 + } else { /* HBA */ 1747 + 1748 + device->removed = 1; 1749 + hpsa_wait_for_outstanding_commands_for_dev(h, device); 1750 + 1818 1751 hpsa_remove_sas_device(device); 1752 + } 1819 1753 } 1820 1754 1821 1755 static void adjust_hpsa_scsi_table(struct ctlr_info *h, ··· 2222 2146 static int handle_ioaccel_mode2_error(struct ctlr_info *h, 2223 2147 struct CommandList *c, 2224 2148 struct scsi_cmnd *cmd, 2225 - struct io_accel2_cmd *c2) 2149 + struct io_accel2_cmd *c2, 2150 + struct hpsa_scsi_dev_t *dev) 2226 2151 { 2227 2152 int data_len; 2228 2153 int retry = 0; ··· 2287 2210 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: 2288 2211 case IOACCEL2_STATUS_SR_INVALID_DEVICE: 2289 2212 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: 2290 - /* We will get an event from ctlr to trigger rescan */ 2291 - retry = 1; 2213 + /* 2214 + * Did an HBA disk disappear? We will eventually 2215 + * get a state change event from the controller but 2216 + * in the meantime, we need to tell the OS that the 2217 + * HBA disk is no longer there and stop I/O 2218 + * from going down. This allows the potential re-insert 2219 + * of the disk to get the same device node. 2220 + */ 2221 + if (dev->physical_device && dev->expose_device) { 2222 + cmd->result = DID_NO_CONNECT << 16; 2223 + dev->removed = 1; 2224 + h->drv_req_rescan = 1; 2225 + dev_warn(&h->pdev->dev, 2226 + "%s: device is gone!\n", __func__); 2227 + } else 2228 + /* 2229 + * Retry by sending down the RAID path. 2230 + * We will get an event from ctlr to 2231 + * trigger rescan regardless. 2232 + */ 2233 + retry = 1; 2292 2234 break; 2293 2235 default: 2294 2236 retry = 1; ··· 2431 2335 c2->error_data.serv_response == 2432 2336 IOACCEL2_SERV_RESPONSE_FAILURE) { 2433 2337 if (c2->error_data.status == 2434 - IOACCEL2_STATUS_SR_IOACCEL_DISABLED) 2338 + IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { 2435 2339 dev->offload_enabled = 0; 2340 + dev->offload_to_be_enabled = 0; 2341 + } 2436 2342 2437 2343 return hpsa_retry_cmd(h, c); 2438 2344 } 2439 2345 2440 - if (handle_ioaccel_mode2_error(h, c, cmd, c2)) 2346 + if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev)) 2441 2347 return hpsa_retry_cmd(h, c); 2442 2348 2443 2349 return hpsa_cmd_free_and_done(h, c, cmd); ··· 2904 2806 goto out; 2905 2807 } 2906 2808 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 2907 - PCI_DMA_FROMDEVICE, NO_TIMEOUT); 2809 + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 2908 2810 if (rc) 2909 2811 goto out; 2910 2812 ei = c->err_info; ··· 2930 2832 /* fill_cmd can't fail here, no data buffer to map. */ 2931 2833 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, 2932 2834 scsi3addr, TYPE_MSG); 2933 - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 2835 + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); 2934 2836 if (rc) { 2935 2837 dev_warn(&h->pdev->dev, "Failed to send reset command\n"); 2936 2838 goto out; ··· 3178 3080 return -1; 3179 3081 } 3180 3082 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3181 - PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3083 + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 3182 3084 if (rc) 3183 3085 goto out; 3184 3086 ei = c->err_info; ··· 3221 3123 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 3222 3124 3223 3125 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3224 - PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3126 + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 3225 3127 if (rc) 3226 3128 goto out; 3227 3129 ei = c->err_info; ··· 3249 3151 goto out; 3250 3152 3251 3153 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3252 - PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3154 + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 3253 3155 if (rc) 3254 3156 goto out; 3255 3157 ei = c->err_info; ··· 3280 3182 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 3281 3183 3282 3184 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, 3283 - NO_TIMEOUT); 3185 + DEFAULT_TIMEOUT); 3284 3186 ei = c->err_info; 3285 3187 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3286 3188 hpsa_scsi_interpret_error(h, c); ··· 3348 3250 c->Request.CDB[5] = 0; 3349 3251 3350 3252 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, 3351 - NO_TIMEOUT); 3253 + DEFAULT_TIMEOUT); 3352 3254 if (rc) 3353 3255 goto out; 3354 3256 ··· 3560 3462 if (extended_response) 3561 3463 c->Request.CDB[1] = extended_response; 3562 3464 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3563 - PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3465 + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 3564 3466 if (rc) 3565 3467 goto out; 3566 3468 ei = c->err_info; ··· 3667 3569 c = cmd_alloc(h); 3668 3570 3669 3571 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); 3670 - rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); 3572 + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 3573 + DEFAULT_TIMEOUT); 3671 3574 if (rc) { 3672 3575 cmd_free(h, c); 3673 3576 return 0; ··· 3743 3644 c = cmd_alloc(h); 3744 3645 3745 3646 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); 3746 - (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); 3647 + (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 3648 + DEFAULT_TIMEOUT); 3747 3649 /* no unmap needed here because no data xfer. */ 3748 3650 ei = c->err_info; 3749 3651 switch (ei->CommandStatus) { ··· 5334 5234 5335 5235 dev = cmd->device->hostdata; 5336 5236 if (!dev) { 5237 + cmd->result = NOT_READY << 16; /* host byte */ 5238 + cmd->scsi_done(cmd); 5239 + return 0; 5240 + } 5241 + 5242 + if (dev->removed) { 5337 5243 cmd->result = DID_NO_CONNECT << 16; 5338 5244 cmd->scsi_done(cmd); 5339 5245 return 0; ··· 5520 5414 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ 5521 5415 (void) fill_cmd(c, TEST_UNIT_READY, h, 5522 5416 NULL, 0, 0, lunaddr, TYPE_CMD); 5523 - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 5417 + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); 5524 5418 if (rc) 5525 5419 return rc; 5526 5420 /* no unmap needed here because no data xfer. */ ··· 5744 5638 0, 0, scsi3addr, TYPE_MSG); 5745 5639 if (h->needs_abort_tags_swizzled) 5746 5640 swizzle_abort_tag(&c->Request.CDB[4]); 5747 - (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 5641 + (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); 5748 5642 hpsa_get_tag(h, abort, &taglower, &tagupper); 5749 5643 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", 5750 5644 __func__, tagupper, taglower); ··· 5909 5803 c = cmd_alloc(h); 5910 5804 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); 5911 5805 c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 5912 - (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 5806 + (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); 5913 5807 hpsa_get_tag(h, abort, &taglower, &tagupper); 5914 5808 dev_dbg(&h->pdev->dev, 5915 5809 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", ··· 6454 6348 c->SG[0].Len = cpu_to_le32(iocommand.buf_size); 6455 6349 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ 6456 6350 } 6457 - rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); 6351 + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 6352 + DEFAULT_TIMEOUT); 6458 6353 if (iocommand.buf_size > 0) 6459 6354 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 6460 6355 check_ioctl_unit_attention(h, c); ··· 6587 6480 } 6588 6481 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); 6589 6482 } 6590 - status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); 6483 + status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 6484 + DEFAULT_TIMEOUT); 6591 6485 if (sg_used) 6592 6486 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 6593 6487 check_ioctl_unit_attention(h, c); ··· 8362 8254 event_type = "configuration change"; 8363 8255 /* Stop sending new RAID offload reqs via the IO accelerator */ 8364 8256 scsi_block_requests(h->scsi_host); 8365 - for (i = 0; i < h->ndevices; i++) 8257 + for (i = 0; i < h->ndevices; i++) { 8366 8258 h->dev[i]->offload_enabled = 0; 8259 + h->dev[i]->offload_to_be_enabled = 0; 8260 + } 8367 8261 hpsa_drain_accel_commands(h); 8368 8262 /* Set 'accelerator path config change' bit */ 8369 8263 dev_warn(&h->pdev->dev, ··· 8651 8541 if (rc) 8652 8542 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ 8653 8543 8654 - /* hook into SCSI subsystem */ 8655 - rc = hpsa_scsi_add_host(h); 8656 - if (rc) 8657 - goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ 8658 - 8659 8544 /* create the resubmit workqueue */ 8660 8545 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); 8661 8546 if (!h->rescan_ctlr_wq) { ··· 8747 8642 dev_info(&h->pdev->dev, 8748 8643 "Can't track change to report lun data\n"); 8749 8644 8645 + /* hook into SCSI subsystem */ 8646 + rc = hpsa_scsi_add_host(h); 8647 + if (rc) 8648 + goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ 8649 + 8750 8650 /* Monitor the controller for firmware lockups */ 8751 8651 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 8752 8652 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); ··· 8813 8703 goto out; 8814 8704 } 8815 8705 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8816 - PCI_DMA_TODEVICE, NO_TIMEOUT); 8706 + PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); 8817 8707 if (rc) 8818 8708 goto out; 8819 8709 if (c->err_info->CommandStatus != 0) ··· 8852 8742 goto errout; 8853 8743 8854 8744 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8855 - PCI_DMA_FROMDEVICE, NO_TIMEOUT); 8745 + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 8856 8746 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8857 8747 goto errout; 8858 8748 ··· 8864 8754 goto errout; 8865 8755 8866 8756 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8867 - PCI_DMA_TODEVICE, NO_TIMEOUT); 8757 + PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); 8868 8758 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8869 8759 goto errout; 8870 8760 ··· 8874 8764 goto errout; 8875 8765 8876 8766 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8877 - PCI_DMA_FROMDEVICE, NO_TIMEOUT); 8767 + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 8878 8768 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8879 8769 goto errout; 8880 8770 ··· 9712 9602 static int 9713 9603 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 9714 9604 { 9605 + *identifier = 0; 9715 9606 return 0; 9716 9607 } 9717 9608
+1
drivers/scsi/hpsa.h
··· 63 63 unsigned char scsi3addr[8]; /* as presented to the HW */ 64 64 u8 physical_device : 1; 65 65 u8 expose_device; 66 + u8 removed : 1; /* device is marked for death */ 66 67 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" 67 68 unsigned char device_id[16]; /* from inquiry pg. 0x83 */ 68 69 u64 sas_address;
+3 -6
drivers/scsi/libiscsi.c
··· 2127 2127 struct iscsi_conn *conn; 2128 2128 struct iscsi_task *task; 2129 2129 struct iscsi_tm *hdr; 2130 - int rc, age; 2130 + int age; 2131 2131 2132 2132 cls_session = starget_to_session(scsi_target(sc->device)); 2133 2133 session = cls_session->dd_data; ··· 2188 2188 hdr = &conn->tmhdr; 2189 2189 iscsi_prep_abort_task_pdu(task, hdr); 2190 2190 2191 - if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) { 2192 - rc = FAILED; 2191 + if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) 2193 2192 goto failed; 2194 - } 2195 2193 2196 2194 switch (conn->tmf_state) { 2197 2195 case TMF_SUCCESS: ··· 2421 2423 * 2422 2424 * This will attempt to send a warm target reset. 2423 2425 */ 2424 - int iscsi_eh_target_reset(struct scsi_cmnd *sc) 2426 + static int iscsi_eh_target_reset(struct scsi_cmnd *sc) 2425 2427 { 2426 2428 struct iscsi_cls_session *cls_session; 2427 2429 struct iscsi_session *session; ··· 2493 2495 mutex_unlock(&session->eh_mutex); 2494 2496 return rc; 2495 2497 } 2496 - EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); 2497 2498 2498 2499 /** 2499 2500 * iscsi_eh_recover_target - reset target and possibly the session
+2 -2
drivers/scsi/lpfc/lpfc.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 694 694 uint8_t wwnn[8]; 695 695 uint8_t wwpn[8]; 696 696 uint32_t RandomData[7]; 697 + uint32_t fcp_embed_io; 697 698 698 699 /* HBA Config Parameters */ 699 700 uint32_t cfg_ack0; ··· 758 757 uint32_t cfg_fdmi_on; 759 758 #define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ 760 759 #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ 761 - #define LPFC_FDMI_SMART_SAN 2 /* SmartSAN supported */ 762 760 uint32_t cfg_enable_SmartSAN; 763 761 lpfc_vpd_t vpd; /* vital product data */ 764 762
+8 -18
drivers/scsi/lpfc/lpfc_attr.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 4584 4584 # lpfc_fdmi_on: Controls FDMI support. 4585 4585 # 0 No FDMI support (default) 4586 4586 # 1 Traditional FDMI support 4587 - # 2 Smart SAN support 4588 - # If lpfc_enable_SmartSAN is set 1, the driver sets lpfc_fdmi_on to value 2 4589 - # overwriting the current value. If lpfc_enable_SmartSAN is set 0, the 4590 - # driver uses the current value of lpfc_fdmi_on provided it has value 0 or 1. 4591 - # A value of 2 with lpfc_enable_SmartSAN set to 0 causes the driver to 4592 - # set lpfc_fdmi_on back to 1. 4593 - # Value range [0,2]. Default value is 0. 4587 + # Traditional FDMI support means the driver will assume FDMI-2 support; 4588 + # however, if that fails, it will fallback to FDMI-1. 4589 + # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. 4590 + # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of 4591 + # lpfc_fdmi_on. 4592 + # Value range [0,1]. Default value is 0. 4594 4593 */ 4595 - LPFC_ATTR_R(fdmi_on, 0, 0, 2, "Enable FDMI support"); 4594 + LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support"); 4596 4595 4597 4596 /* 4598 4597 # Specifies the maximum number of ELS cmds we can have outstanding (for ··· 5148 5149 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); 5149 5150 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 5150 5151 } 5151 - 5152 5152 5153 5153 /* 5154 5154 * Dynamic FC Host Attributes Support ··· 5854 5856 phba->cfg_poll = 0; 5855 5857 else 5856 5858 phba->cfg_poll = lpfc_poll; 5857 - 5858 - /* Ensure fdmi_on and enable_SmartSAN don't conflict */ 5859 - if (phba->cfg_enable_SmartSAN) { 5860 - phba->cfg_fdmi_on = LPFC_FDMI_SMART_SAN; 5861 - } else { 5862 - if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) 5863 - phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; 5864 - } 5865 5859 5866 5860 phba->cfg_soft_wwnn = 0L; 5867 5861 phba->cfg_soft_wwpn = 0L;
+3 -3
drivers/scsi/lpfc/lpfc_ct.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * * ··· 2322 2322 ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; 2323 2323 memset(ae, 0, 256); 2324 2324 2325 - strncpy(ae->un.AttrString, "Smart SAN Version 1.0", 2325 + strncpy(ae->un.AttrString, "Smart SAN Version 2.0", 2326 2326 sizeof(ae->un.AttrString)); 2327 2327 len = strnlen(ae->un.AttrString, 2328 2328 sizeof(ae->un.AttrString)); ··· 2397 2397 uint32_t size; 2398 2398 2399 2399 ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; 2400 - ae->un.AttrInt = cpu_to_be32(0); 2400 + ae->un.AttrInt = cpu_to_be32(1); 2401 2401 size = FOURBYTES + sizeof(uint32_t); 2402 2402 ad->AttrLen = cpu_to_be16(size); 2403 2403 ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY);
+167 -9
drivers/scsi/lpfc/lpfc_els.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 690 690 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 691 691 if (fabric_param_changed) { 692 692 /* Reset FDMI attribute masks based on config parameter */ 693 - if (phba->cfg_fdmi_on == LPFC_FDMI_NO_SUPPORT) { 694 - vport->fdmi_hba_mask = 0; 695 - vport->fdmi_port_mask = 0; 696 - } else { 693 + if (phba->cfg_enable_SmartSAN || 694 + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 697 695 /* Setup appropriate attribute masks */ 698 696 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 699 - if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) 697 + if (phba->cfg_enable_SmartSAN) 700 698 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 701 699 else 702 700 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 701 + } else { 702 + vport->fdmi_hba_mask = 0; 703 + vport->fdmi_port_mask = 0; 703 704 } 704 705 705 706 } ··· 1070 1069 lpfc_sli4_unreg_all_rpis(vport); 1071 1070 } 1072 1071 } 1073 - lpfc_issue_reg_vfi(vport); 1072 + 1073 + /* Do not register VFI if the driver aborted FLOGI */ 1074 + if (!lpfc_error_lost_link(irsp)) 1075 + lpfc_issue_reg_vfi(vport); 1074 1076 lpfc_nlp_put(ndlp); 1075 1077 goto out; 1076 1078 } ··· 4709 4705 desc->length = cpu_to_be32(sizeof(desc->info)); 4710 4706 } 4711 4707 4708 + void 4709 + lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 4710 + struct lpfc_vport *vport) 4711 + { 4712 + desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 4713 + 4714 + desc->bbc_info.port_bbc = cpu_to_be32( 4715 + vport->fc_sparam.cmn.bbCreditMsb | 4716 + vport->fc_sparam.cmn.bbCreditlsb << 8); 4717 + if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) 4718 + desc->bbc_info.attached_port_bbc = cpu_to_be32( 4719 + vport->phba->fc_fabparam.cmn.bbCreditMsb | 4720 + vport->phba->fc_fabparam.cmn.bbCreditlsb << 8); 4721 + else 4722 + desc->bbc_info.attached_port_bbc = 0; 4723 + 4724 + desc->bbc_info.rtt = 0; 4725 + desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 4726 + } 4727 + 4728 + void 4729 + lpfc_rdp_res_oed_temp_desc(struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 4730 + { 4731 + uint32_t flags; 4732 + 4733 + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 4734 + 4735 + desc->oed_info.hi_alarm = 4736 + cpu_to_be16(page_a2[SSF_TEMP_HIGH_ALARM]); 4737 + desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TEMP_LOW_ALARM]); 4738 + desc->oed_info.hi_warning = 4739 + cpu_to_be16(page_a2[SSF_TEMP_HIGH_WARNING]); 4740 + desc->oed_info.lo_warning = 4741 + cpu_to_be16(page_a2[SSF_TEMP_LOW_WARNING]); 4742 + flags = 0xf; /* All four are valid */ 4743 + flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 4744 + desc->oed_info.function_flags = cpu_to_be32(flags); 4745 + desc->length = cpu_to_be32(sizeof(desc->oed_info)); 4746 + } 4747 + 4748 + void 4749 + lpfc_rdp_res_oed_voltage_desc(struct fc_rdp_oed_sfp_desc *desc, 4750 + uint8_t *page_a2) 4751 + { 4752 + uint32_t flags; 4753 + 4754 + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 4755 + 4756 + desc->oed_info.hi_alarm = 4757 + cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_ALARM]); 4758 + desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_ALARM]); 4759 + desc->oed_info.hi_warning = 4760 + cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_WARNING]); 4761 + desc->oed_info.lo_warning = 4762 + cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_WARNING]); 4763 + flags = 0xf; /* All four are valid */ 4764 + flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 4765 + desc->oed_info.function_flags = cpu_to_be32(flags); 4766 + desc->length = cpu_to_be32(sizeof(desc->oed_info)); 4767 + } 4768 + 4769 + void 4770 + lpfc_rdp_res_oed_txbias_desc(struct fc_rdp_oed_sfp_desc *desc, 4771 + uint8_t *page_a2) 4772 + { 4773 + uint32_t flags; 4774 + 4775 + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 4776 + 4777 + desc->oed_info.hi_alarm = 4778 + cpu_to_be16(page_a2[SSF_BIAS_HIGH_ALARM]); 4779 + desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_BIAS_LOW_ALARM]); 4780 + desc->oed_info.hi_warning = 4781 + cpu_to_be16(page_a2[SSF_BIAS_HIGH_WARNING]); 4782 + desc->oed_info.lo_warning = 4783 + cpu_to_be16(page_a2[SSF_BIAS_LOW_WARNING]); 4784 + flags = 0xf; /* All four are valid */ 4785 + flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 4786 + desc->oed_info.function_flags = cpu_to_be32(flags); 4787 + desc->length = cpu_to_be32(sizeof(desc->oed_info)); 4788 + } 4789 + 4790 + void 4791 + lpfc_rdp_res_oed_txpower_desc(struct fc_rdp_oed_sfp_desc *desc, 4792 + uint8_t *page_a2) 4793 + { 4794 + uint32_t flags; 4795 + 4796 + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 4797 + 4798 + desc->oed_info.hi_alarm = 4799 + cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_ALARM]); 4800 + desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TXPOWER_LOW_ALARM]); 4801 + desc->oed_info.hi_warning = 4802 + cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_WARNING]); 4803 + desc->oed_info.lo_warning = 4804 + cpu_to_be16(page_a2[SSF_TXPOWER_LOW_WARNING]); 4805 + flags = 0xf; /* All four are valid */ 4806 + flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 4807 + desc->oed_info.function_flags = cpu_to_be32(flags); 4808 + desc->length = cpu_to_be32(sizeof(desc->oed_info)); 4809 + } 4810 + 4811 + 4812 + void 4813 + lpfc_rdp_res_oed_rxpower_desc(struct fc_rdp_oed_sfp_desc *desc, 4814 + uint8_t *page_a2) 4815 + { 4816 + uint32_t flags; 4817 + 4818 + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 4819 + 4820 + desc->oed_info.hi_alarm = 4821 + cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_ALARM]); 4822 + desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_RXPOWER_LOW_ALARM]); 4823 + desc->oed_info.hi_warning = 4824 + cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_WARNING]); 4825 + desc->oed_info.lo_warning = 4826 + cpu_to_be16(page_a2[SSF_RXPOWER_LOW_WARNING]); 4827 + flags = 0xf; /* All four are valid */ 4828 + flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 4829 + desc->oed_info.function_flags = cpu_to_be32(flags); 4830 + desc->length = cpu_to_be32(sizeof(desc->oed_info)); 4831 + } 4832 + 4833 + void 4834 + lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 4835 + uint8_t *page_a0, struct lpfc_vport *vport) 4836 + { 4837 + desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 4838 + memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 4839 + memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 4840 + memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 4841 + memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2); 4842 + memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 4843 + desc->length = cpu_to_be32(sizeof(desc->opd_info)); 4844 + } 4845 + 4712 4846 int 4713 4847 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 4714 4848 { ··· 4918 4776 4919 4777 if (rdp_cap == 0) 4920 4778 rdp_cap = RDP_CAP_UNKNOWN; 4779 + if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 4780 + rdp_cap |= RDP_CAP_USER_CONFIGURED; 4921 4781 4922 4782 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 4923 4783 desc->length = cpu_to_be32(sizeof(desc->info)); ··· 5019 4875 lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba); 5020 4876 lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc, 5021 4877 vport, ndlp); 4878 + lpfc_rdp_res_bbc_desc(&rdp_res->bbc_desc, &rdp_context->link_stat, 4879 + vport); 4880 + lpfc_rdp_res_oed_temp_desc(&rdp_res->oed_temp_desc, 4881 + rdp_context->page_a2); 4882 + lpfc_rdp_res_oed_voltage_desc(&rdp_res->oed_voltage_desc, 4883 + rdp_context->page_a2); 4884 + lpfc_rdp_res_oed_txbias_desc(&rdp_res->oed_txbias_desc, 4885 + rdp_context->page_a2); 4886 + lpfc_rdp_res_oed_txpower_desc(&rdp_res->oed_txpower_desc, 4887 + rdp_context->page_a2); 4888 + lpfc_rdp_res_oed_rxpower_desc(&rdp_res->oed_rxpower_desc, 4889 + rdp_context->page_a2); 4890 + lpfc_rdp_res_opd_desc(&rdp_res->opd_desc, rdp_context->page_a0, vport); 5022 4891 fec_size = lpfc_rdp_res_fec_desc(&rdp_res->fec_desc, 5023 4892 &rdp_context->link_stat); 5024 4893 rdp_res->length = cpu_to_be32(fec_size + RDP_DESC_PAYLOAD_SIZE); ··· 8006 7849 return; 8007 7850 } 8008 7851 8009 - if ((phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) && 8010 - (vport->load_flag & FC_ALLOW_FDMI)) 7852 + if ((phba->cfg_enable_SmartSAN || 7853 + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 7854 + (vport->load_flag & FC_ALLOW_FDMI)) 8011 7855 lpfc_start_fdmi(vport); 8012 7856 } 8013 7857
+3 -2
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 4545 4545 (!(vport->load_flag & FC_UNLOADING)) && 4546 4546 (bf_get(lpfc_sli_intf_if_type, 4547 4547 &phba->sli4_hba.sli_intf) == 4548 - LPFC_SLI_INTF_IF_TYPE_2)) { 4548 + LPFC_SLI_INTF_IF_TYPE_2) && 4549 + (atomic_read(&ndlp->kref.refcount) > 0)) { 4549 4550 mbox->context1 = lpfc_nlp_get(ndlp); 4550 4551 mbox->mbox_cmpl = 4551 4552 lpfc_sli4_unreg_rpi_cmpl_clr;
+69 -6
drivers/scsi/lpfc/lpfc_hw.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * * ··· 1134 1134 #define RDP_PS_16GB 0x0400 1135 1135 #define RDP_PS_32GB 0x0200 1136 1136 1137 - #define RDP_CAP_UNKNOWN 0x0001 1138 - #define RDP_PS_UNKNOWN 0x0002 1139 - #define RDP_PS_NOT_ESTABLISHED 0x0001 1137 + #define RDP_CAP_USER_CONFIGURED 0x0002 1138 + #define RDP_CAP_UNKNOWN 0x0001 1139 + #define RDP_PS_UNKNOWN 0x0002 1140 + #define RDP_PS_NOT_ESTABLISHED 0x0001 1140 1141 1141 1142 struct fc_rdp_port_speed { 1142 1143 uint16_t capabilities; ··· 1193 1192 struct fc_rdp_sfp_info sfp_info; 1194 1193 }; 1195 1194 1195 + /* Buffer Credit Descriptor */ 1196 + struct fc_rdp_bbc_info { 1197 + uint32_t port_bbc; /* FC_Port buffer-to-buffer credit */ 1198 + uint32_t attached_port_bbc; 1199 + uint32_t rtt; /* Round trip time */ 1200 + }; 1201 + #define RDP_BBC_DESC_TAG 0x00010006 1202 + struct fc_rdp_bbc_desc { 1203 + uint32_t tag; 1204 + uint32_t length; 1205 + struct fc_rdp_bbc_info bbc_info; 1206 + }; 1207 + 1208 + #define RDP_OED_TEMPERATURE 0x1 1209 + #define RDP_OED_VOLTAGE 0x2 1210 + #define RDP_OED_TXBIAS 0x3 1211 + #define RDP_OED_TXPOWER 0x4 1212 + #define RDP_OED_RXPOWER 0x5 1213 + 1214 + #define RDP_OED_TYPE_SHIFT 28 1215 + /* Optical Element Data descriptor */ 1216 + struct fc_rdp_oed_info { 1217 + uint16_t hi_alarm; 1218 + uint16_t lo_alarm; 1219 + uint16_t hi_warning; 1220 + uint16_t lo_warning; 1221 + uint32_t function_flags; 1222 + }; 1223 + #define RDP_OED_DESC_TAG 0x00010007 1224 + struct fc_rdp_oed_sfp_desc { 1225 + uint32_t tag; 1226 + uint32_t length; 1227 + struct fc_rdp_oed_info oed_info; 1228 + }; 1229 + 1230 + /* Optical Product Data descriptor */ 1231 + struct fc_rdp_opd_sfp_info { 1232 + uint8_t vendor_name[16]; 1233 + uint8_t model_number[16]; 1234 + uint8_t serial_number[16]; 1235 + uint8_t reserved[2]; 1236 + uint8_t revision[2]; 1237 + uint8_t date[8]; 1238 + }; 1239 + 1240 + #define RDP_OPD_DESC_TAG 0x00010008 1241 + struct fc_rdp_opd_sfp_desc { 1242 + uint32_t tag; 1243 + uint32_t length; 1244 + struct fc_rdp_opd_sfp_info opd_info; 1245 + }; 1246 + 1196 1247 struct fc_rdp_req_frame { 1197 1248 uint32_t rdp_command; /* ELS command opcode (0x18)*/ 1198 1249 uint32_t rdp_des_length; /* RDP Payload Word 1 */ ··· 1261 1208 struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */ 1262 1209 struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */ 1263 1210 struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */ 1264 - struct fc_fec_rdp_desc fec_desc; /* FC Word 34 - 37 */ 1211 + struct fc_rdp_bbc_desc bbc_desc; /* FC Word 34-38*/ 1212 + struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 39-43*/ 1213 + struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 44-48*/ 1214 + struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 49-53*/ 1215 + struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 54-58*/ 1216 + struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 59-63*/ 1217 + struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 64-80*/ 1218 + struct fc_fec_rdp_desc fec_desc; /* FC word 81-84*/ 1265 1219 }; 1266 1220 1267 1221 ··· 1276 1216 + sizeof(struct fc_rdp_sfp_desc) \ 1277 1217 + sizeof(struct fc_rdp_port_speed_desc) \ 1278 1218 + sizeof(struct fc_rdp_link_error_status_desc) \ 1279 - + (sizeof(struct fc_rdp_port_name_desc) * 2)) 1219 + + (sizeof(struct fc_rdp_port_name_desc) * 2) \ 1220 + + sizeof(struct fc_rdp_bbc_desc) \ 1221 + + (sizeof(struct fc_rdp_oed_sfp_desc) * 5) \ 1222 + + sizeof(struct fc_rdp_opd_sfp_desc)) 1280 1223 1281 1224 1282 1225 /******** FDMI ********/
+27 -2
drivers/scsi/lpfc/lpfc_hw4.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2009-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2009-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * * ··· 2557 2557 2558 2558 /* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */ 2559 2559 2560 - #define SSF_AW_THRESHOLDS 0 2560 + #define SSF_TEMP_HIGH_ALARM 0 2561 + #define SSF_TEMP_LOW_ALARM 2 2562 + #define SSF_TEMP_HIGH_WARNING 4 2563 + #define SSF_TEMP_LOW_WARNING 6 2564 + #define SSF_VOLTAGE_HIGH_ALARM 8 2565 + #define SSF_VOLTAGE_LOW_ALARM 10 2566 + #define SSF_VOLTAGE_HIGH_WARNING 12 2567 + #define SSF_VOLTAGE_LOW_WARNING 14 2568 + #define SSF_BIAS_HIGH_ALARM 16 2569 + #define SSF_BIAS_LOW_ALARM 18 2570 + #define SSF_BIAS_HIGH_WARNING 20 2571 + #define SSF_BIAS_LOW_WARNING 22 2572 + #define SSF_TXPOWER_HIGH_ALARM 24 2573 + #define SSF_TXPOWER_LOW_ALARM 26 2574 + #define SSF_TXPOWER_HIGH_WARNING 28 2575 + #define SSF_TXPOWER_LOW_WARNING 30 2576 + #define SSF_RXPOWER_HIGH_ALARM 32 2577 + #define SSF_RXPOWER_LOW_ALARM 34 2578 + #define SSF_RXPOWER_HIGH_WARNING 36 2579 + #define SSF_RXPOWER_LOW_WARNING 38 2561 2580 #define SSF_EXT_CAL_CONSTANTS 56 2562 2581 #define SSF_CC_DMI 95 2563 2582 #define SFF_TEMPERATURE_B1 96 ··· 2884 2865 uint32_t word17; 2885 2866 uint32_t word18; 2886 2867 uint32_t word19; 2868 + #define cfg_ext_embed_cb_SHIFT 0 2869 + #define cfg_ext_embed_cb_MASK 0x00000001 2870 + #define cfg_ext_embed_cb_WORD word19 2887 2871 }; 2888 2872 2889 2873 struct lpfc_mbx_get_sli4_parameters { ··· 3941 3919 union lpfc_wqe128 { 3942 3920 uint32_t words[32]; 3943 3921 struct lpfc_wqe_generic generic; 3922 + struct fcp_icmnd64_wqe fcp_icmd; 3923 + struct fcp_iread64_wqe fcp_iread; 3924 + struct fcp_iwrite64_wqe fcp_iwrite; 3944 3925 struct xmit_seq64_wqe xmit_sequence; 3945 3926 struct gen_req64_wqe gen_req; 3946 3927 };
+22 -5
drivers/scsi/lpfc/lpfc_init.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 6158 6158 * any initial discovery should be completed. 6159 6159 */ 6160 6160 vport->load_flag |= FC_ALLOW_FDMI; 6161 - if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) { 6161 + if (phba->cfg_enable_SmartSAN || 6162 + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 6162 6163 6163 6164 /* Setup appropriate attribute masks */ 6164 6165 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 6165 - if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) 6166 + if (phba->cfg_enable_SmartSAN) 6166 6167 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 6167 6168 else 6168 6169 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; ··· 7265 7264 phba->sli4_hba.fcp_cq[idx] = qdesc; 7266 7265 7267 7266 /* Create Fast Path FCP WQs */ 7268 - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 7269 - phba->sli4_hba.wq_ecount); 7267 + if (phba->fcp_embed_io) { 7268 + qdesc = lpfc_sli4_queue_alloc(phba, 7269 + LPFC_WQE128_SIZE, 7270 + LPFC_WQE128_DEF_COUNT); 7271 + } else { 7272 + qdesc = lpfc_sli4_queue_alloc(phba, 7273 + phba->sli4_hba.wq_esize, 7274 + phba->sli4_hba.wq_ecount); 7275 + } 7270 7276 if (!qdesc) { 7271 7277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7272 7278 "0503 Failed allocate fast-path FCP " ··· 9518 9510 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9519 9511 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9520 9512 9513 + /* 9514 + * Issue IOs with CDB embedded in WQE to minimized the number 9515 + * of DMAs the firmware has to do. Setting this to 1 also forces 9516 + * the driver to use 128 bytes WQEs for FCP IOs. 9517 + */ 9518 + if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 9519 + phba->fcp_embed_io = 1; 9520 + else 9521 + phba->fcp_embed_io = 0; 9521 9522 return 0; 9522 9523 } 9523 9524
+7 -5
drivers/scsi/lpfc/lpfc_mbox.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 2145 2145 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); 2146 2146 reg_vfi->e_d_tov = phba->fc_edtov; 2147 2147 reg_vfi->r_a_tov = phba->fc_ratov; 2148 - reg_vfi->bde.addrHigh = putPaddrHigh(phys); 2149 - reg_vfi->bde.addrLow = putPaddrLow(phys); 2150 - reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); 2151 - reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2148 + if (phys) { 2149 + reg_vfi->bde.addrHigh = putPaddrHigh(phys); 2150 + reg_vfi->bde.addrLow = putPaddrLow(phys); 2151 + reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); 2152 + reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2153 + } 2152 2154 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); 2153 2155 2154 2156 /* Only FC supports upd bit */
+4 -2
drivers/scsi/lpfc/lpfc_mem.c
··· 231 231 if (phba->lpfc_hbq_pool) 232 232 pci_pool_destroy(phba->lpfc_hbq_pool); 233 233 phba->lpfc_hbq_pool = NULL; 234 - mempool_destroy(phba->rrq_pool); 234 + 235 + if (phba->rrq_pool) 236 + mempool_destroy(phba->rrq_pool); 235 237 phba->rrq_pool = NULL; 236 238 237 239 /* Free NLP memory pool */ 238 240 mempool_destroy(phba->nlp_mem_pool); 239 241 phba->nlp_mem_pool = NULL; 240 - if (phba->sli_rev == LPFC_SLI_REV4) { 242 + if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { 241 243 mempool_destroy(phba->active_rrq_pool); 242 244 phba->active_rrq_pool = NULL; 243 245 }
+3 -1
drivers/scsi/lpfc/lpfc_nportdisc.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 1512 1512 if ((mb = phba->sli.mbox_active)) { 1513 1513 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1514 1514 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1515 + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 1515 1516 lpfc_nlp_put(ndlp); 1516 1517 mb->context2 = NULL; 1517 1518 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; ··· 1528 1527 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1529 1528 kfree(mp); 1530 1529 } 1530 + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 1531 1531 lpfc_nlp_put(ndlp); 1532 1532 list_del(&mb->list); 1533 1533 phba->sli.mboxq_cnt--;
+116 -24
drivers/scsi/lpfc/lpfc_sli.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 2000 2000 * @phba: Pointer to HBA context object. 2001 2001 * @tag: Tag of the hbq buffer. 2002 2002 * 2003 - * This function is called with hbalock held. This function searches 2004 - * for the hbq buffer associated with the given tag in the hbq buffer 2005 - * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 2006 - * it returns NULL. 2003 + * This function searches for the hbq buffer associated with the given tag in 2004 + * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2005 + * otherwise it returns NULL. 2007 2006 **/ 2008 2007 static struct hbq_dmabuf * 2009 2008 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) ··· 2010 2011 struct lpfc_dmabuf *d_buf; 2011 2012 struct hbq_dmabuf *hbq_buf; 2012 2013 uint32_t hbqno; 2013 - 2014 - lockdep_assert_held(&phba->hbalock); 2015 2014 2016 2015 hbqno = tag >> 16; 2017 2016 if (hbqno >= LPFC_MAX_HBQS) ··· 2208 2211 rpi = pmb->u.mb.un.varWords[0]; 2209 2212 vpi = pmb->u.mb.un.varRegLogin.vpi; 2210 2213 lpfc_unreg_login(phba, vpi, rpi, pmb); 2214 + pmb->vport = vport; 2211 2215 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2212 2216 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2213 2217 if (rc != MBX_NOT_FINISHED) ··· 4686 4688 4687 4689 break; 4688 4690 } 4691 + phba->fcp_embed_io = 0; /* SLI4 FC support only */ 4689 4692 4690 4693 rc = lpfc_sli_config_port(phba, mode); 4691 4694 ··· 6319 6320 6320 6321 mqe = &mboxq->u.mqe; 6321 6322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 6322 - if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 6323 + if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 6323 6324 phba->hba_flag |= HBA_FCOE_MODE; 6324 - else 6325 + phba->fcp_embed_io = 0; /* SLI4 FC support only */ 6326 + } else { 6325 6327 phba->hba_flag &= ~HBA_FCOE_MODE; 6328 + } 6326 6329 6327 6330 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 6328 6331 LPFC_DCBX_CEE_MODE) ··· 8219 8218 else 8220 8219 command_type = ELS_COMMAND_NON_FIP; 8221 8220 8221 + if (phba->fcp_embed_io) 8222 + memset(wqe, 0, sizeof(union lpfc_wqe128)); 8222 8223 /* Some of the fields are in the right position already */ 8223 8224 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 8224 - abort_tag = (uint32_t) iocbq->iotag; 8225 - xritag = iocbq->sli4_xritag; 8226 8225 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 8227 8226 wqe->generic.wqe_com.word10 = 0; 8227 + 8228 + abort_tag = (uint32_t) iocbq->iotag; 8229 + xritag = iocbq->sli4_xritag; 8228 8230 /* words0-2 bpl convert bde */ 8229 8231 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8230 8232 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / ··· 8376 8372 iocbq->iocb.ulpFCP2Rcvy); 8377 8373 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 8378 8374 /* Always open the exchange */ 8379 - bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 8380 8375 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 8381 8376 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 8382 8377 LPFC_WQE_LENLOC_WORD4); 8383 - bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 8384 8378 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 8385 8379 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 8386 8380 if (iocbq->iocb_flag & LPFC_IO_OAS) { ··· 8388 8386 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 8389 8387 (phba->cfg_XLanePriority << 1)); 8390 8388 } 8389 + } 8390 + /* Note, word 10 is already initialized to 0 */ 8391 + 8392 + if (phba->fcp_embed_io) { 8393 + struct lpfc_scsi_buf *lpfc_cmd; 8394 + struct sli4_sge *sgl; 8395 + union lpfc_wqe128 *wqe128; 8396 + struct fcp_cmnd *fcp_cmnd; 8397 + uint32_t *ptr; 8398 + 8399 + /* 128 byte wqe support here */ 8400 + wqe128 = (union lpfc_wqe128 *)wqe; 8401 + 8402 + lpfc_cmd = iocbq->context1; 8403 + sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 8404 + fcp_cmnd = lpfc_cmd->fcp_cmnd; 8405 + 8406 + /* Word 0-2 - FCP_CMND */ 8407 + wqe128->generic.bde.tus.f.bdeFlags = 8408 + BUFF_TYPE_BDE_IMMED; 8409 + wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; 8410 + wqe128->generic.bde.addrHigh = 0; 8411 + wqe128->generic.bde.addrLow = 88; /* Word 22 */ 8412 + 8413 + bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1); 8414 + 8415 + /* Word 22-29 FCP CMND Payload */ 8416 + ptr = &wqe128->words[22]; 8417 + memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 8391 8418 } 8392 8419 break; 8393 8420 case CMD_FCP_IREAD64_CR: ··· 8432 8401 iocbq->iocb.ulpFCP2Rcvy); 8433 8402 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 8434 8403 /* Always open the exchange */ 8435 - bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 8436 8404 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 8437 8405 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 8438 8406 LPFC_WQE_LENLOC_WORD4); 8439 - bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 8440 8407 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 8441 8408 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 8442 8409 if (iocbq->iocb_flag & LPFC_IO_OAS) { ··· 8444 8415 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 8445 8416 (phba->cfg_XLanePriority << 1)); 8446 8417 } 8418 + } 8419 + /* Note, word 10 is already initialized to 0 */ 8420 + 8421 + if (phba->fcp_embed_io) { 8422 + struct lpfc_scsi_buf *lpfc_cmd; 8423 + struct sli4_sge *sgl; 8424 + union lpfc_wqe128 *wqe128; 8425 + struct fcp_cmnd *fcp_cmnd; 8426 + uint32_t *ptr; 8427 + 8428 + /* 128 byte wqe support here */ 8429 + wqe128 = (union lpfc_wqe128 *)wqe; 8430 + 8431 + lpfc_cmd = iocbq->context1; 8432 + sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 8433 + fcp_cmnd = lpfc_cmd->fcp_cmnd; 8434 + 8435 + /* Word 0-2 - FCP_CMND */ 8436 + wqe128->generic.bde.tus.f.bdeFlags = 8437 + BUFF_TYPE_BDE_IMMED; 8438 + wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; 8439 + wqe128->generic.bde.addrHigh = 0; 8440 + wqe128->generic.bde.addrLow = 88; /* Word 22 */ 8441 + 8442 + bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1); 8443 + 8444 + /* Word 22-29 FCP CMND Payload */ 8445 + ptr = &wqe128->words[22]; 8446 + memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 8447 8447 } 8448 8448 break; 8449 8449 case CMD_FCP_ICMND64_CR: ··· 8485 8427 /* word3 iocb=IO_TAG wqe=reserved */ 8486 8428 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 8487 8429 /* Always open the exchange */ 8488 - bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 8489 8430 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 8490 8431 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 8491 8432 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 8492 8433 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 8493 8434 LPFC_WQE_LENLOC_NONE); 8494 - bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 8495 8435 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 8496 8436 iocbq->iocb.ulpFCP2Rcvy); 8497 8437 if (iocbq->iocb_flag & LPFC_IO_OAS) { ··· 8499 8443 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 8500 8444 (phba->cfg_XLanePriority << 1)); 8501 8445 } 8446 + } 8447 + /* Note, word 10 is already initialized to 0 */ 8448 + 8449 + if (phba->fcp_embed_io) { 8450 + struct lpfc_scsi_buf *lpfc_cmd; 8451 + struct sli4_sge *sgl; 8452 + union lpfc_wqe128 *wqe128; 8453 + struct fcp_cmnd *fcp_cmnd; 8454 + uint32_t *ptr; 8455 + 8456 + /* 128 byte wqe support here */ 8457 + wqe128 = (union lpfc_wqe128 *)wqe; 8458 + 8459 + lpfc_cmd = iocbq->context1; 8460 + sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 8461 + fcp_cmnd = lpfc_cmd->fcp_cmnd; 8462 + 8463 + /* Word 0-2 - FCP_CMND */ 8464 + wqe128->generic.bde.tus.f.bdeFlags = 8465 + BUFF_TYPE_BDE_IMMED; 8466 + wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; 8467 + wqe128->generic.bde.addrHigh = 0; 8468 + wqe128->generic.bde.addrLow = 88; /* Word 22 */ 8469 + 8470 + bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1); 8471 + 8472 + /* Word 22-29 FCP CMND Payload */ 8473 + ptr = &wqe128->words[22]; 8474 + memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 8502 8475 } 8503 8476 break; 8504 8477 case CMD_GEN_REQUEST64_CR: ··· 8760 8675 struct lpfc_iocbq *piocb, uint32_t flag) 8761 8676 { 8762 8677 struct lpfc_sglq *sglq; 8763 - union lpfc_wqe wqe; 8678 + union lpfc_wqe *wqe; 8679 + union lpfc_wqe128 wqe128; 8764 8680 struct lpfc_queue *wq; 8765 8681 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 8766 8682 8767 8683 lockdep_assert_held(&phba->hbalock); 8684 + 8685 + /* 8686 + * The WQE can be either 64 or 128 bytes, 8687 + * so allocate space on the stack assuming the largest. 8688 + */ 8689 + wqe = (union lpfc_wqe *)&wqe128; 8768 8690 8769 8691 if (piocb->sli4_xritag == NO_XRI) { 8770 8692 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || ··· 8819 8727 return IOCB_ERROR; 8820 8728 } 8821 8729 8822 - if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 8730 + if (lpfc_sli4_iocb2wqe(phba, piocb, wqe)) 8823 8731 return IOCB_ERROR; 8824 8732 8825 8733 if ((piocb->iocb_flag & LPFC_IO_FCP) || ··· 8829 8737 } else { 8830 8738 wq = phba->sli4_hba.oas_wq; 8831 8739 } 8832 - if (lpfc_sli4_wq_put(wq, &wqe)) 8740 + if (lpfc_sli4_wq_put(wq, wqe)) 8833 8741 return IOCB_ERROR; 8834 8742 } else { 8835 8743 if (unlikely(!phba->sli4_hba.els_wq)) 8836 8744 return IOCB_ERROR; 8837 - if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 8745 + if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe)) 8838 8746 return IOCB_ERROR; 8839 8747 } 8840 8748 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); ··· 8849 8757 * pointer from the lpfc_hba struct. 8850 8758 * 8851 8759 * Return codes: 8852 - * IOCB_ERROR - Error 8853 - * IOCB_SUCCESS - Success 8854 - * IOCB_BUSY - Busy 8760 + * IOCB_ERROR - Error 8761 + * IOCB_SUCCESS - Success 8762 + * IOCB_BUSY - Busy 8855 8763 **/ 8856 8764 int 8857 8765 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+3 -3
drivers/scsi/lpfc/lpfc_version.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2015 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * * ··· 18 18 * included with this package. * 19 19 *******************************************************************/ 20 20 21 - #define LPFC_DRIVER_VERSION "11.0.0.10." 21 + #define LPFC_DRIVER_VERSION "11.1.0.0." 22 22 #define LPFC_DRIVER_NAME "lpfc" 23 23 24 24 /* Used for SLI 2/3 */ ··· 30 30 31 31 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ 32 32 LPFC_DRIVER_VERSION 33 - #define LPFC_COPYRIGHT "Copyright(c) 2004-2015 Emulex. All rights reserved." 33 + #define LPFC_COPYRIGHT "Copyright(c) 2004-2016 Emulex. All rights reserved."
+3 -2
drivers/scsi/lpfc/lpfc_vport.c
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * ··· 395 395 396 396 /* At this point we are fully registered with SCSI Layer. */ 397 397 vport->load_flag |= FC_ALLOW_FDMI; 398 - if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) { 398 + if (phba->cfg_enable_SmartSAN || 399 + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 399 400 /* Setup appropriate attribute masks */ 400 401 vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask; 401 402 vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
+123 -104
drivers/scsi/mac_scsi.c
··· 28 28 29 29 /* Definitions for the core NCR5380 driver. */ 30 30 31 - #define PSEUDO_DMA 32 - 33 - #define NCR5380_implementation_fields unsigned char *pdma_base 31 + #define NCR5380_implementation_fields unsigned char *pdma_base; \ 32 + int pdma_residual 34 33 35 34 #define NCR5380_read(reg) macscsi_read(instance, reg) 36 35 #define NCR5380_write(reg, value) macscsi_write(instance, reg, value) 37 36 38 - #define NCR5380_pread macscsi_pread 39 - #define NCR5380_pwrite macscsi_pwrite 40 - #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) 37 + #define NCR5380_dma_xfer_len(instance, cmd, phase) \ 38 + macscsi_dma_xfer_len(instance, cmd) 39 + #define NCR5380_dma_recv_setup macscsi_pread 40 + #define NCR5380_dma_send_setup macscsi_pwrite 41 + #define NCR5380_dma_residual(instance) (hostdata->pdma_residual) 41 42 42 43 #define NCR5380_intr macscsi_intr 43 44 #define NCR5380_queue_command macscsi_queue_command 44 45 #define NCR5380_abort macscsi_abort 45 46 #define NCR5380_bus_reset macscsi_bus_reset 46 47 #define NCR5380_info macscsi_info 47 - #define NCR5380_show_info macscsi_show_info 48 - #define NCR5380_write_info macscsi_write_info 49 48 50 49 #include "NCR5380.h" 51 50 ··· 56 57 module_param(setup_sg_tablesize, int, 0); 57 58 static int setup_use_pdma = -1; 58 59 module_param(setup_use_pdma, int, 0); 59 - static int setup_use_tagged_queuing = -1; 60 - module_param(setup_use_tagged_queuing, int, 0); 61 60 static int setup_hostid = -1; 62 61 module_param(setup_hostid, int, 0); 63 62 static int setup_toshiba_delay = -1; ··· 94 97 setup_sg_tablesize = ints[3]; 95 98 if (ints[0] >= 4) 96 99 setup_hostid = ints[4]; 97 - if (ints[0] >= 5) 98 - setup_use_tagged_queuing = ints[5]; 100 + /* ints[5] (use_tagged_queuing) is ignored */ 99 101 if (ints[0] >= 6) 100 102 setup_use_pdma = ints[6]; 101 103 if (ints[0] >= 7) ··· 105 109 __setup("mac5380=", mac_scsi_setup); 106 110 #endif /* !MODULE */ 107 111 108 - #ifdef PSEUDO_DMA 109 - /* 110 - Pseudo-DMA: (Ove Edlund) 111 - The code attempts to catch bus errors that occur if one for example 112 - "trips over the cable". 113 - XXX: Since bus errors in the PDMA routines never happen on my 114 - computer, the bus error code is untested. 115 - If the code works as intended, a bus error results in Pseudo-DMA 116 - being disabled, meaning that the driver switches to slow handshake. 117 - If bus errors are NOT extremely rare, this has to be changed. 118 - */ 112 + /* Pseudo DMA asm originally by Ove Edlund */ 119 113 120 - #define CP_IO_TO_MEM(s,d,len) \ 114 + #define CP_IO_TO_MEM(s,d,n) \ 121 115 __asm__ __volatile__ \ 122 116 (" cmp.w #4,%2\n" \ 123 117 " bls 8f\n" \ ··· 144 158 " 9: \n" \ 145 159 ".section .fixup,\"ax\"\n" \ 146 160 " .even\n" \ 147 - "90: moveq.l #1, %2\n" \ 161 + "91: moveq.l #1, %2\n" \ 162 + " jra 9b\n" \ 163 + "94: moveq.l #4, %2\n" \ 148 164 " jra 9b\n" \ 149 165 ".previous\n" \ 150 166 ".section __ex_table,\"a\"\n" \ 151 167 " .align 4\n" \ 152 - " .long 1b,90b\n" \ 153 - " .long 3b,90b\n" \ 154 - " .long 31b,90b\n" \ 155 - " .long 32b,90b\n" \ 156 - " .long 33b,90b\n" \ 157 - " .long 34b,90b\n" \ 158 - " .long 35b,90b\n" \ 159 - " .long 36b,90b\n" \ 160 - " .long 37b,90b\n" \ 161 - " .long 5b,90b\n" \ 162 - " .long 7b,90b\n" \ 168 + " .long 1b,91b\n" \ 169 + " .long 3b,94b\n" \ 170 + " .long 31b,94b\n" \ 171 + " .long 32b,94b\n" \ 172 + " .long 33b,94b\n" \ 173 + " .long 34b,94b\n" \ 174 + " .long 35b,94b\n" \ 175 + " .long 36b,94b\n" \ 176 + " .long 37b,94b\n" \ 177 + " .long 5b,94b\n" \ 178 + " .long 7b,91b\n" \ 163 179 ".previous" \ 164 - : "=a"(s), "=a"(d), "=d"(len) \ 165 - : "0"(s), "1"(d), "2"(len) \ 180 + : "=a"(s), "=a"(d), "=d"(n) \ 181 + : "0"(s), "1"(d), "2"(n) \ 166 182 : "d0") 167 183 168 184 static int macscsi_pread(struct Scsi_Host *instance, 169 185 unsigned char *dst, int len) 170 186 { 171 187 struct NCR5380_hostdata *hostdata = shost_priv(instance); 172 - unsigned char *d; 173 - unsigned char *s; 188 + unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4); 189 + unsigned char *d = dst; 190 + int n = len; 191 + int transferred; 174 192 175 - s = hostdata->pdma_base + (INPUT_DATA_REG << 4); 176 - d = dst; 193 + while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, 194 + BASR_DRQ | BASR_PHASE_MATCH, 195 + BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { 196 + CP_IO_TO_MEM(s, d, n); 177 197 178 - /* These conditions are derived from MacOS */ 198 + transferred = d - dst - n; 199 + hostdata->pdma_residual = len - transferred; 179 200 180 - while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && 181 - !(NCR5380_read(STATUS_REG) & SR_REQ)) 182 - ; 201 + /* No bus error. */ 202 + if (n == 0) 203 + return 0; 183 204 184 - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && 185 - (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { 186 - pr_err("Error in macscsi_pread\n"); 187 - return -1; 205 + /* Target changed phase early? */ 206 + if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ, 207 + BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) 208 + scmd_printk(KERN_ERR, hostdata->connected, 209 + "%s: !REQ and !ACK\n", __func__); 210 + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) 211 + return 0; 212 + 213 + dsprintk(NDEBUG_PSEUDO_DMA, instance, 214 + "%s: bus error (%d/%d)\n", __func__, transferred, len); 215 + NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); 216 + d = dst + transferred; 217 + n = len - transferred; 188 218 } 189 219 190 - CP_IO_TO_MEM(s, d, len); 191 - 192 - if (len != 0) { 193 - pr_notice("Bus error in macscsi_pread\n"); 194 - return -1; 195 - } 196 - 197 - return 0; 220 + scmd_printk(KERN_ERR, hostdata->connected, 221 + "%s: phase mismatch or !DRQ\n", __func__); 222 + NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); 223 + return -1; 198 224 } 199 225 200 226 201 - #define CP_MEM_TO_IO(s,d,len) \ 227 + #define CP_MEM_TO_IO(s,d,n) \ 202 228 __asm__ __volatile__ \ 203 229 (" cmp.w #4,%2\n" \ 204 230 " bls 8f\n" \ ··· 247 249 " 9: \n" \ 248 250 ".section .fixup,\"ax\"\n" \ 249 251 " .even\n" \ 250 - "90: moveq.l #1, %2\n" \ 252 + "91: moveq.l #1, %2\n" \ 253 + " jra 9b\n" \ 254 + "94: moveq.l #4, %2\n" \ 251 255 " jra 9b\n" \ 252 256 ".previous\n" \ 253 257 ".section __ex_table,\"a\"\n" \ 254 258 " .align 4\n" \ 255 - " .long 1b,90b\n" \ 256 - " .long 3b,90b\n" \ 257 - " .long 31b,90b\n" \ 258 - " .long 32b,90b\n" \ 259 - " .long 33b,90b\n" \ 260 - " .long 34b,90b\n" \ 261 - " .long 35b,90b\n" \ 262 - " .long 36b,90b\n" \ 263 - " .long 37b,90b\n" \ 264 - " .long 5b,90b\n" \ 265 - " .long 7b,90b\n" \ 259 + " .long 1b,91b\n" \ 260 + " .long 3b,94b\n" \ 261 + " .long 31b,94b\n" \ 262 + " .long 32b,94b\n" \ 263 + " .long 33b,94b\n" \ 264 + " .long 34b,94b\n" \ 265 + " .long 35b,94b\n" \ 266 + " .long 36b,94b\n" \ 267 + " .long 37b,94b\n" \ 268 + " .long 5b,94b\n" \ 269 + " .long 7b,91b\n" \ 266 270 ".previous" \ 267 - : "=a"(s), "=a"(d), "=d"(len) \ 268 - : "0"(s), "1"(d), "2"(len) \ 271 + : "=a"(s), "=a"(d), "=d"(n) \ 272 + : "0"(s), "1"(d), "2"(n) \ 269 273 : "d0") 270 274 271 275 static int macscsi_pwrite(struct Scsi_Host *instance, 272 276 unsigned char *src, int len) 273 277 { 274 278 struct NCR5380_hostdata *hostdata = shost_priv(instance); 275 - unsigned char *s; 276 - unsigned char *d; 279 + unsigned char *s = src; 280 + unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); 281 + int n = len; 282 + int transferred; 277 283 278 - s = src; 279 - d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); 284 + while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, 285 + BASR_DRQ | BASR_PHASE_MATCH, 286 + BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { 287 + CP_MEM_TO_IO(s, d, n); 280 288 281 - /* These conditions are derived from MacOS */ 289 + transferred = s - src - n; 290 + hostdata->pdma_residual = len - transferred; 282 291 283 - while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && 284 - (!(NCR5380_read(STATUS_REG) & SR_REQ) || 285 - (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))) 286 - ; 292 + /* Target changed phase early? */ 293 + if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ, 294 + BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) 295 + scmd_printk(KERN_ERR, hostdata->connected, 296 + "%s: !REQ and !ACK\n", __func__); 297 + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) 298 + return 0; 287 299 288 - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) { 289 - pr_err("Error in macscsi_pwrite\n"); 290 - return -1; 300 + /* No bus error. */ 301 + if (n == 0) { 302 + if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG, 303 + TCR_LAST_BYTE_SENT, 304 + TCR_LAST_BYTE_SENT, HZ / 64) < 0) 305 + scmd_printk(KERN_ERR, hostdata->connected, 306 + "%s: Last Byte Sent timeout\n", __func__); 307 + return 0; 308 + } 309 + 310 + dsprintk(NDEBUG_PSEUDO_DMA, instance, 311 + "%s: bus error (%d/%d)\n", __func__, transferred, len); 312 + NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); 313 + s = src + transferred; 314 + n = len - transferred; 291 315 } 292 316 293 - CP_MEM_TO_IO(s, d, len); 317 + scmd_printk(KERN_ERR, hostdata->connected, 318 + "%s: phase mismatch or !DRQ\n", __func__); 319 + NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); 294 320 295 - if (len != 0) { 296 - pr_notice("Bus error in macscsi_pwrite\n"); 297 - return -1; 298 - } 299 - 300 - return 0; 321 + return -1; 301 322 } 302 - #endif 323 + 324 + static int macscsi_dma_xfer_len(struct Scsi_Host *instance, 325 + struct scsi_cmnd *cmd) 326 + { 327 + struct NCR5380_hostdata *hostdata = shost_priv(instance); 328 + 329 + if (hostdata->flags & FLAG_NO_PSEUDO_DMA || 330 + cmd->SCp.this_residual < 16) 331 + return 0; 332 + 333 + return cmd->SCp.this_residual; 334 + } 303 335 304 336 #include "NCR5380.c" 305 337 ··· 339 311 static struct scsi_host_template mac_scsi_template = { 340 312 .module = THIS_MODULE, 341 313 .proc_name = DRV_MODULE_NAME, 342 - .show_info = macscsi_show_info, 343 - .write_info = macscsi_write_info, 344 314 .name = "Macintosh NCR5380 SCSI", 345 315 .info = macscsi_info, 346 316 .queuecommand = macscsi_queue_command, ··· 346 320 .eh_bus_reset_handler = macscsi_bus_reset, 347 321 .can_queue = 16, 348 322 .this_id = 7, 349 - .sg_tablesize = SG_ALL, 323 + .sg_tablesize = 1, 350 324 .cmd_per_lun = 2, 351 325 .use_clustering = DISABLE_CLUSTERING, 352 326 .cmd_size = NCR5380_CMD_SIZE, ··· 364 338 if (!pio_mem) 365 339 return -ENODEV; 366 340 367 - #ifdef PSEUDO_DMA 368 341 pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); 369 - #endif 370 342 371 343 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 372 344 ··· 382 358 mac_scsi_template.sg_tablesize = setup_sg_tablesize; 383 359 if (setup_hostid >= 0) 384 360 mac_scsi_template.this_id = setup_hostid & 7; 385 - if (setup_use_pdma < 0) 386 - setup_use_pdma = 0; 387 361 388 362 instance = scsi_host_alloc(&mac_scsi_template, 389 363 sizeof(struct NCR5380_hostdata)); ··· 401 379 } else 402 380 host_flags |= FLAG_NO_PSEUDO_DMA; 403 381 404 - #ifdef SUPPORT_TAGS 405 - host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; 406 - #endif 407 382 host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; 408 383 409 - error = NCR5380_init(instance, host_flags); 384 + error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP); 410 385 if (error) 411 386 goto fail_init; 412 387
+4 -2
drivers/scsi/megaraid/megaraid_sas.h
··· 35 35 /* 36 36 * MegaRAID SAS Driver meta data 37 37 */ 38 - #define MEGASAS_VERSION "06.810.09.00-rc1" 39 - #define MEGASAS_RELDATE "Jan. 28, 2016" 38 + #define MEGASAS_VERSION "06.811.02.00-rc1" 39 + #define MEGASAS_RELDATE "April 12, 2016" 40 40 41 41 /* 42 42 * Device IDs ··· 1343 1343 1344 1344 #define SCAN_PD_CHANNEL 0x1 1345 1345 #define SCAN_VD_CHANNEL 0x2 1346 + 1347 + #define MEGASAS_KDUMP_QUEUE_DEPTH 100 1346 1348 1347 1349 enum MR_SCSI_CMD_TYPE { 1348 1350 READ_WRITE_LDIO = 0,
+76 -41
drivers/scsi/megaraid/megaraid_sas_base.c
··· 2670 2670 } 2671 2671 2672 2672 /** 2673 - * megasas_reset_device - Device reset handler entry point 2674 - */ 2675 - static int megasas_reset_device(struct scsi_cmnd *scmd) 2676 - { 2677 - /* 2678 - * First wait for all commands to complete 2679 - */ 2680 - return megasas_generic_reset(scmd); 2681 - } 2682 - 2683 - /** 2684 2673 * megasas_reset_bus_host - Bus & host reset handler entry point 2685 2674 */ 2686 2675 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) ··· 2686 2697 ret = megasas_reset_fusion(scmd->device->host, 1); 2687 2698 else 2688 2699 ret = megasas_generic_reset(scmd); 2700 + 2701 + return ret; 2702 + } 2703 + 2704 + /** 2705 + * megasas_task_abort - Issues task abort request to firmware 2706 + * (supported only for fusion adapters) 2707 + * @scmd: SCSI command pointer 2708 + */ 2709 + static int megasas_task_abort(struct scsi_cmnd *scmd) 2710 + { 2711 + int ret; 2712 + struct megasas_instance *instance; 2713 + 2714 + instance = (struct megasas_instance *)scmd->device->host->hostdata; 2715 + 2716 + if (instance->ctrl_context) 2717 + ret = megasas_task_abort_fusion(scmd); 2718 + else { 2719 + sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 2720 + ret = FAILED; 2721 + } 2722 + 2723 + return ret; 2724 + } 2725 + 2726 + /** 2727 + * megasas_reset_target: Issues target reset request to firmware 2728 + * (supported only for fusion adapters) 2729 + * @scmd: SCSI command pointer 2730 + */ 2731 + static int megasas_reset_target(struct scsi_cmnd *scmd) 2732 + { 2733 + int ret; 2734 + struct megasas_instance *instance; 2735 + 2736 + instance = (struct megasas_instance *)scmd->device->host->hostdata; 2737 + 2738 + if (instance->ctrl_context) 2739 + ret = megasas_reset_target_fusion(scmd); 2740 + else { 2741 + sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 2742 + ret = FAILED; 2743 + } 2689 2744 2690 2745 return ret; 2691 2746 } ··· 3002 2969 .slave_alloc = megasas_slave_alloc, 3003 2970 .slave_destroy = megasas_slave_destroy, 3004 2971 .queuecommand = megasas_queue_command, 3005 - .eh_device_reset_handler = megasas_reset_device, 3006 - .eh_bus_reset_handler = megasas_reset_bus_host, 2972 + .eh_target_reset_handler = megasas_reset_target, 2973 + .eh_abort_handler = megasas_task_abort, 3007 2974 .eh_host_reset_handler = megasas_reset_bus_host, 3008 2975 .eh_timed_out = megasas_reset_timer, 3009 2976 .shost_attrs = megaraid_host_attrs, ··· 5185 5152 5186 5153 instance->instancet->enable_intr(instance); 5187 5154 5188 - dev_err(&instance->pdev->dev, "INIT adapter done\n"); 5155 + dev_info(&instance->pdev->dev, "INIT adapter done\n"); 5189 5156 5190 5157 megasas_setup_jbod_map(instance); 5191 5158 ··· 5631 5598 host->max_lun = MEGASAS_MAX_LUN; 5632 5599 host->max_cmd_len = 16; 5633 5600 5634 - /* Fusion only supports host reset */ 5635 - if (instance->ctrl_context) { 5636 - host->hostt->eh_device_reset_handler = NULL; 5637 - host->hostt->eh_bus_reset_handler = NULL; 5638 - host->hostt->eh_target_reset_handler = megasas_reset_target_fusion; 5639 - host->hostt->eh_abort_handler = megasas_task_abort_fusion; 5640 - } 5641 - 5642 5601 /* 5643 5602 * Notify the mid-layer about the new controller 5644 5603 */ ··· 5786 5761 break; 5787 5762 } 5788 5763 5789 - instance->system_info_buf = pci_zalloc_consistent(pdev, 5790 - sizeof(struct MR_DRV_SYSTEM_INFO), 5791 - &instance->system_info_h); 5792 - 5793 - if (!instance->system_info_buf) 5794 - dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); 5795 - 5796 5764 /* Crash dump feature related initialisation*/ 5797 5765 instance->drv_buf_index = 0; 5798 5766 instance->drv_buf_alloc = 0; ··· 5794 5776 instance->fw_crash_state = UNAVAILABLE; 5795 5777 spin_lock_init(&instance->crashdump_lock); 5796 5778 instance->crash_dump_buf = NULL; 5797 - 5798 - if (!reset_devices) 5799 - instance->crash_dump_buf = pci_alloc_consistent(pdev, 5800 - CRASH_DMA_BUF_SIZE, 5801 - &instance->crash_dump_h); 5802 - if (!instance->crash_dump_buf) 5803 - dev_err(&pdev->dev, "Can't allocate Firmware " 5804 - "crash dump DMA buffer\n"); 5805 5779 5806 5780 megasas_poll_wait_aen = 0; 5807 5781 instance->flag_ieee = 0; ··· 5813 5803 goto fail_alloc_dma_buf; 5814 5804 } 5815 5805 5816 - instance->pd_info = pci_alloc_consistent(pdev, 5817 - sizeof(struct MR_PD_INFO), &instance->pd_info_h); 5806 + if (!reset_devices) { 5807 + instance->system_info_buf = pci_zalloc_consistent(pdev, 5808 + sizeof(struct MR_DRV_SYSTEM_INFO), 5809 + &instance->system_info_h); 5810 + if (!instance->system_info_buf) 5811 + dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); 5818 5812 5819 - if (!instance->pd_info) 5820 - dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); 5813 + instance->pd_info = pci_alloc_consistent(pdev, 5814 + sizeof(struct MR_PD_INFO), &instance->pd_info_h); 5815 + 5816 + if (!instance->pd_info) 5817 + dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); 5818 + 5819 + instance->crash_dump_buf = pci_alloc_consistent(pdev, 5820 + CRASH_DMA_BUF_SIZE, 5821 + &instance->crash_dump_h); 5822 + if (!instance->crash_dump_buf) 5823 + dev_err(&pdev->dev, "Can't allocate Firmware " 5824 + "crash dump DMA buffer\n"); 5825 + } 5821 5826 5822 5827 /* 5823 5828 * Initialize locks and queues ··· 7197 7172 static int __init megasas_init(void) 7198 7173 { 7199 7174 int rval; 7175 + 7176 + /* 7177 + * Booted in kdump kernel, minimize memory footprints by 7178 + * disabling few features 7179 + */ 7180 + if (reset_devices) { 7181 + msix_vectors = 1; 7182 + rdpq_enable = 0; 7183 + dual_qdepth_disable = 1; 7184 + } 7200 7185 7201 7186 /* 7202 7187 * Announce driver version and other information
+6 -1
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 257 257 if (!instance->is_rdpq) 258 258 instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024); 259 259 260 + if (reset_devices) 261 + instance->max_fw_cmds = min(instance->max_fw_cmds, 262 + (u16)MEGASAS_KDUMP_QUEUE_DEPTH); 260 263 /* 261 264 * Reduce the max supported cmds by 1. This is to ensure that the 262 265 * reply_q_sz (1 more than the max cmd that driver may send) ··· 854 851 ret = 1; 855 852 goto fail_fw_init; 856 853 } 857 - dev_err(&instance->pdev->dev, "Init cmd success\n"); 854 + dev_info(&instance->pdev->dev, "Init cmd success\n"); 858 855 859 856 ret = 0; 860 857 ··· 2762 2759 dev_warn(&instance->pdev->dev, "Found FW in FAULT state," 2763 2760 " will reset adapter scsi%d.\n", 2764 2761 instance->host->host_no); 2762 + megasas_complete_cmd_dpc_fusion((unsigned long)instance); 2765 2763 retval = 1; 2766 2764 goto out; 2767 2765 } ··· 2770 2766 if (reason == MFI_IO_TIMEOUT_OCR) { 2771 2767 dev_info(&instance->pdev->dev, 2772 2768 "MFI IO is timed out, initiating OCR\n"); 2769 + megasas_complete_cmd_dpc_fusion((unsigned long)instance); 2773 2770 retval = 1; 2774 2771 goto out; 2775 2772 }
+5 -2
drivers/scsi/mpt3sas/mpi/mpi2.h
··· 8 8 * scatter/gather formats. 9 9 * Creation Date: June 21, 2006 10 10 * 11 - * mpi2.h Version: 02.00.39 11 + * mpi2.h Version: 02.00.42 12 12 * 13 13 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 14 14 * prefix are for use only on MPI v2.5 products, and must not be used ··· 100 100 * Added MPI2_DIAG_SBR_RELOAD. 101 101 * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. 102 102 * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. 103 + * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. 104 + * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT 105 + * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT 103 106 * -------------------------------------------------------------------------- 104 107 */ 105 108 ··· 142 139 #define MPI2_VERSION_02_06 (0x0206) 143 140 144 141 /*Unit and Dev versioning for this MPI header set */ 145 - #define MPI2_HEADER_VERSION_UNIT (0x27) 142 + #define MPI2_HEADER_VERSION_UNIT (0x2A) 146 143 #define MPI2_HEADER_VERSION_DEV (0x00) 147 144 #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 148 145 #define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+13 -5
drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
··· 6 6 * Title: MPI Configuration messages and pages 7 7 * Creation Date: November 10, 2006 8 8 * 9 - * mpi2_cnfg.h Version: 02.00.33 9 + * mpi2_cnfg.h Version: 02.00.35 10 10 * 11 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 12 * prefix are for use only on MPI v2.5 products, and must not be used ··· 183 183 * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. 184 184 * Added AdapterOrderAux fields to BIOS Page 3. 185 185 * 03-16-15 02.00.31 Updated for MPI v2.6. 186 + * Added Flags field to IO Unit Page 7. 186 187 * Added new SAS Phy Event codes 187 188 * 05-25-15 02.00.33 Added more defines for the BiosOptions field of 188 189 * MPI2_CONFIG_PAGE_BIOS_1. 190 + * 08-25-15 02.00.34 Bumped Header Version. 191 + * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. 189 192 * -------------------------------------------------------------------------- 190 193 */ 191 194 ··· 961 958 U8 Reserved3; /*0x17 */ 962 959 U32 BoardPowerRequirement; /*0x18 */ 963 960 U32 PCISlotPowerAllocation; /*0x1C */ 964 - U32 Reserved6; /* 0x20 */ 965 - U32 Reserved7; /* 0x24 */ 961 + /* reserved prior to MPI v2.6 */ 962 + U8 Flags; /* 0x20 */ 963 + U8 Reserved6; /* 0x21 */ 964 + U16 Reserved7; /* 0x22 */ 965 + U32 Reserved8; /* 0x24 */ 966 966 } MPI2_CONFIG_PAGE_IO_UNIT_7, 967 967 *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, 968 968 Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; 969 969 970 - #define MPI2_IOUNITPAGE7_PAGEVERSION (0x04) 970 + #define MPI2_IOUNITPAGE7_PAGEVERSION (0x05) 971 971 972 972 /*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ 973 973 #define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) ··· 1051 1045 #define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) 1052 1046 #define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) 1053 1047 1048 + /* defines for IO Unit Page 7 Flags field */ 1049 + #define MPI2_IOUNITPAGE7_FLAG_CABLE_POWER_EXC (0x01) 1054 1050 1055 1051 /*IO Unit Page 8 */ 1056 1052 ··· 2279 2271 U8 2280 2272 BootDeviceWaitTime; /*0x24 */ 2281 2273 U8 2282 - Reserved4; /*0x25 */ 2274 + SATADeviceWaitTime; /*0x25 */ 2283 2275 U16 2284 2276 Reserved5; /*0x26 */ 2285 2277 U8
+12 -3
drivers/scsi/mpt3sas/mpi/mpi2_init.h
··· 6 6 * Title: MPI SCSI initiator mode messages and structures 7 7 * Creation Date: June 23, 2006 8 8 * 9 - * mpi2_init.h Version: 02.00.17 9 + * mpi2_init.h Version: 02.00.20 10 10 * 11 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 12 * prefix are for use only on MPI v2.5 products, and must not be used ··· 51 51 * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. 52 52 * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and 53 53 * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. 54 + * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. 55 + * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. 56 + * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. 54 57 * -------------------------------------------------------------------------- 55 58 */ 56 59 ··· 362 359 U16 TaskTag; /*0x20 */ 363 360 U16 SCSIStatusQualifier; /* 0x22 */ 364 361 U32 BidirectionalTransferCount; /*0x24 */ 365 - U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/ 366 - U32 Reserved6; /*0x2C */ 362 + /* MPI 2.5+ only; Reserved in MPI 2.0 */ 363 + U32 EEDPErrorOffset; /* 0x28 */ 364 + /* MPI 2.5+ only; Reserved in MPI 2.0 */ 365 + U16 EEDPObservedAppTag; /* 0x2C */ 366 + /* MPI 2.5+ only; Reserved in MPI 2.0 */ 367 + U16 EEDPObservedGuard; /* 0x2E */ 368 + /* MPI 2.5+ only; Reserved in MPI 2.0 */ 369 + U32 EEDPObservedRefTag; /* 0x30 */ 367 370 } MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY, 368 371 Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t; 369 372
+35 -5
drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
··· 6 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 7 7 * Creation Date: October 11, 2006 8 8 * 9 - * mpi2_ioc.h Version: 02.00.26 9 + * mpi2_ioc.h Version: 02.00.27 10 10 * 11 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 12 * prefix are for use only on MPI v2.5 products, and must not be used ··· 134 134 * Added Encrypted Hash Extended Image. 135 135 * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. 136 136 * 11-18-14 02.00.25 Updated copyright information. 137 - * 03-16-15 02.00.26 Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and 137 + * 03-16-15 02.00.26 Updated for MPI v2.6. 138 + * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and 139 + * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. 140 + * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and 138 141 * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. 139 142 * Added MPI26_CTRL_OP_SHUTDOWN. 143 + * 08-25-15 02.00.27 Added IC ARCH Class based signature defines 140 144 * -------------------------------------------------------------------------- 141 145 */ 142 146 ··· 172 168 U16 MsgVersion; /*0x0C */ 173 169 U16 HeaderVersion; /*0x0E */ 174 170 U32 Reserved5; /*0x10 */ 175 - U16 Reserved6; /*0x14 */ 171 + U16 ConfigurationFlags; /* 0x14 */ 176 172 U8 HostPageSize; /*0x16 */ 177 173 U8 HostMSIxVectors; /*0x17 */ 178 174 U16 Reserved8; /*0x18 */ ··· 520 516 #define MPI2_EVENT_TEMP_THRESHOLD (0x0027) 521 517 #define MPI2_EVENT_HOST_MESSAGE (0x0028) 522 518 #define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) 519 + #define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034) 523 520 #define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) 524 521 #define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) 525 522 ··· 585 580 } MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE, 586 581 Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t; 587 582 588 - /*Power Performance Change Event */ 583 + /*Power Performance Change Event data */ 589 584 590 585 typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { 591 586 U8 CurrentPowerMode; /*0x00 */ ··· 609 604 #define MPI2_EVENT_PM_MODE_FULL_POWER (0x04) 610 605 #define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) 611 606 #define MPI2_EVENT_PM_MODE_STANDBY (0x06) 607 + 608 + /* Active Cable Exception Event data */ 609 + 610 + typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT { 611 + U32 ActiveCablePowerRequirement; /* 0x00 */ 612 + U8 ReasonCode; /* 0x04 */ 613 + U8 ReceptacleID; /* 0x05 */ 614 + U16 Reserved1; /* 0x06 */ 615 + } MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, 616 + *PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, 617 + Mpi26EventDataActiveCableExcept_t, 618 + *pMpi26EventDataActiveCableExcept_t; 619 + 620 + /* defines for ReasonCode field */ 621 + #define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) 612 622 613 623 /*Hard Reset Received Event data */ 614 624 ··· 1386 1366 /*Signature0 field */ 1387 1367 #define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) 1388 1368 #define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) 1389 - #define MPI26_FW_HEADER_SIGNATURE0 (0x5AEAA55A) 1369 + /* Last byte is defined by architecture */ 1370 + #define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500) 1371 + #define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A) 1372 + #define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00) 1373 + #define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01) 1374 + /* legacy (0x5AEAA55A) */ 1375 + #define MPI26_FW_HEADER_SIGNATURE0 \ 1376 + (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0) 1377 + #define MPI26_FW_HEADER_SIGNATURE0_3516 \ 1378 + (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1) 1390 1379 1391 1380 /*Signature1 field */ 1392 1381 #define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) ··· 1807 1778 #define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06) 1808 1779 #define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07) 1809 1780 #define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08) 1781 + #define MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG (0x09) 1810 1782 #define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A) 1811 1783 #define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B) 1812 1784 #define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D)
+18 -14
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 57 57 #include <linux/dma-mapping.h> 58 58 #include <linux/io.h> 59 59 #include <linux/time.h> 60 + #include <linux/ktime.h> 60 61 #include <linux/kthread.h> 61 62 #include <linux/aer.h> 62 63 ··· 655 654 case MPI2_EVENT_TEMP_THRESHOLD: 656 655 desc = "Temperature Threshold"; 657 656 break; 657 + case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 658 + desc = "Active cable exception"; 659 + break; 658 660 } 659 661 660 662 if (!desc) ··· 1104 1100 } 1105 1101 1106 1102 /** 1107 - * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues 1103 + * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts 1108 1104 * @ioc: per adapter object 1109 - * Context: ISR conext 1105 + * Context: non ISR conext 1110 1106 * 1111 - * Called when a Task Management request has completed. We want 1112 - * to flush the other reply queues so all the outstanding IO has been 1113 - * completed back to OS before we process the TM completetion. 1107 + * Called when a Task Management request has completed. 1114 1108 * 1115 1109 * Return nothing. 1116 1110 */ 1117 1111 void 1118 - mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc) 1112 + mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) 1119 1113 { 1120 1114 struct adapter_reply_queue *reply_q; 1121 1115 ··· 1124 1122 return; 1125 1123 1126 1124 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 1127 - if (ioc->shost_recovery) 1125 + if (ioc->shost_recovery || ioc->remove_host || 1126 + ioc->pci_error_recovery) 1128 1127 return; 1129 1128 /* TMs are on msix_index == 0 */ 1130 1129 if (reply_q->msix_index == 0) 1131 1130 continue; 1132 - _base_interrupt(reply_q->vector, (void *)reply_q); 1131 + synchronize_irq(reply_q->vector); 1133 1132 } 1134 1133 } 1135 1134 ··· 3210 3207 sg_tablesize = MPT_MIN_PHYS_SEGMENTS; 3211 3208 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { 3212 3209 sg_tablesize = min_t(unsigned short, sg_tablesize, 3213 - SCSI_MAX_SG_CHAIN_SEGMENTS); 3210 + SG_MAX_SEGMENTS); 3214 3211 pr_warn(MPT3SAS_FMT 3215 3212 "sg_tablesize(%u) is bigger than kernel" 3216 - " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name, 3213 + " defined SG_CHUNK_SIZE(%u)\n", ioc->name, 3217 3214 sg_tablesize, MPT_MAX_PHYS_SEGMENTS); 3218 3215 } 3219 3216 ioc->shost->sg_tablesize = sg_tablesize; ··· 4390 4387 Mpi2IOCInitRequest_t mpi_request; 4391 4388 Mpi2IOCInitReply_t mpi_reply; 4392 4389 int i, r = 0; 4393 - struct timeval current_time; 4390 + ktime_t current_time; 4394 4391 u16 ioc_status; 4395 4392 u32 reply_post_free_array_sz = 0; 4396 4393 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; ··· 4452 4449 /* This time stamp specifies number of milliseconds 4453 4450 * since epoch ~ midnight January 1, 1970. 4454 4451 */ 4455 - do_gettimeofday(&current_time); 4456 - mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 + 4457 - (current_time.tv_usec / 1000)); 4452 + current_time = ktime_get_real(); 4453 + mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); 4458 4454 4459 4455 if (ioc->logging_level & MPT_DEBUG_INIT) { 4460 4456 __le32 *mfp; ··· 5426 5424 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); 5427 5425 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 5428 5426 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); 5427 + if (ioc->hba_mpi_version_belonged == MPI26_VERSION) 5428 + _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); 5429 5429 5430 5430 r = _base_make_ioc_operational(ioc, CAN_SLEEP); 5431 5431 if (r)
+7 -4
drivers/scsi/mpt3sas/mpt3sas_base.h
··· 73 73 #define MPT3SAS_DRIVER_NAME "mpt3sas" 74 74 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 75 75 #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 76 - #define MPT3SAS_DRIVER_VERSION "12.100.00.00" 77 - #define MPT3SAS_MAJOR_VERSION 12 76 + #define MPT3SAS_DRIVER_VERSION "13.100.00.00" 77 + #define MPT3SAS_MAJOR_VERSION 13 78 78 #define MPT3SAS_MINOR_VERSION 100 79 79 #define MPT3SAS_BUILD_VERSION 0 80 80 #define MPT3SAS_RELEASE_VERSION 00 ··· 90 90 /* 91 91 * Set MPT3SAS_SG_DEPTH value based on user input. 92 92 */ 93 - #define MPT_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS 93 + #define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE 94 94 #define MPT_MIN_PHYS_SEGMENTS 16 95 95 96 96 #ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE ··· 111 111 #define MPT3SAS_SATA_QUEUE_DEPTH 32 112 112 #define MPT3SAS_SAS_QUEUE_DEPTH 254 113 113 #define MPT3SAS_RAID_QUEUE_DEPTH 128 114 + 115 + #define MPT3SAS_RAID_MAX_SECTORS 8192 114 116 115 117 #define MPT_NAME_LENGTH 32 /* generic length of strings */ 116 118 #define MPT_STRING_LENGTH 64 ··· 1236 1234 void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid); 1237 1235 __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, 1238 1236 u16 smid); 1239 - void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc); 1237 + 1238 + void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc); 1240 1239 1241 1240 /* hi-priority queue */ 1242 1241 u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
+29 -8
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 174 174 * struct fw_event_work - firmware event struct 175 175 * @list: link list framework 176 176 * @work: work object (ioc->fault_reset_work_q) 177 - * @cancel_pending_work: flag set during reset handling 178 177 * @ioc: per adapter object 179 178 * @device_handle: device handle 180 179 * @VF_ID: virtual function id 181 180 * @VP_ID: virtual port id 182 181 * @ignore: flag meaning this event has been marked to ignore 183 - * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h 182 + * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h 183 + * @refcount: kref for this event 184 184 * @event_data: reply event data payload follows 185 185 * 186 186 * This object stored on ioc->fw_event_list. ··· 188 188 struct fw_event_work { 189 189 struct list_head list; 190 190 struct work_struct work; 191 - u8 cancel_pending_work; 192 - struct delayed_work delayed_work; 193 191 194 192 struct MPT3SAS_ADAPTER *ioc; 195 193 u16 device_handle; ··· 1909 1911 (unsigned long long)raid_device->wwid, 1910 1912 raid_device->num_pds, ds); 1911 1913 1914 + if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { 1915 + blk_queue_max_hw_sectors(sdev->request_queue, 1916 + MPT3SAS_RAID_MAX_SECTORS); 1917 + sdev_printk(KERN_INFO, sdev, 1918 + "Set queue's max_sector to: %u\n", 1919 + MPT3SAS_RAID_MAX_SECTORS); 1920 + } 1921 + 1912 1922 scsih_change_queue_depth(sdev, qdepth); 1913 1923 1914 1924 /* raid transport support */ ··· 2124 2118 return 1; 2125 2119 if (ioc->tm_cmds.smid != smid) 2126 2120 return 1; 2127 - mpt3sas_base_flush_reply_queues(ioc); 2128 2121 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; 2129 2122 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 2130 2123 if (mpi_reply) { ··· 2307 2302 goto err_out; 2308 2303 } 2309 2304 } 2305 + 2306 + /* sync IRQs in case those were busy during flush. */ 2307 + mpt3sas_base_sync_reply_irqs(ioc); 2310 2308 2311 2309 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { 2312 2310 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); ··· 2812 2804 /* 2813 2805 * Wait on the fw_event to complete. If this returns 1, then 2814 2806 * the event was never executed, and we need a put for the 2815 - * reference the delayed_work had on the fw_event. 2807 + * reference the work had on the fw_event. 2816 2808 * 2817 2809 * If it did execute, we wait for it to finish, and the put will 2818 2810 * happen from _firmware_event_work() 2819 2811 */ 2820 - if (cancel_delayed_work_sync(&fw_event->delayed_work)) 2812 + if (cancel_work_sync(&fw_event->work)) 2821 2813 fw_event_work_put(fw_event); 2822 2814 2823 2815 fw_event_work_put(fw_event); ··· 3969 3961 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 3970 3962 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 3971 3963 mpi_request->CDB.EEDP32.PrimaryReferenceTag = 3972 - cpu_to_be32(scsi_get_lba(scmd)); 3964 + cpu_to_be32(scsi_prot_ref_tag(scmd)); 3973 3965 break; 3974 3966 3975 3967 case SCSI_PROT_DIF_TYPE3: ··· 7858 7850 Mpi2EventNotificationReply_t *mpi_reply; 7859 7851 u16 event; 7860 7852 u16 sz; 7853 + Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; 7861 7854 7862 7855 /* events turned off due to host reset or driver unloading */ 7863 7856 if (ioc->remove_host || ioc->pci_error_recovery) ··· 7970 7961 _scsih_temp_threshold_events(ioc, 7971 7962 (Mpi2EventDataTemperature_t *) 7972 7963 mpi_reply->EventData); 7964 + break; 7965 + case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 7966 + ActiveCableEventData = 7967 + (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 7968 + if (ActiveCableEventData->ReasonCode == 7969 + MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) 7970 + pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", 7971 + ioc->name, ActiveCableEventData->ReceptacleID); 7972 + pr_info("cannot be powered and devices connected to this active cable"); 7973 + pr_info("will not be seen. This active cable"); 7974 + pr_info("requires %d mW of power", 7975 + ActiveCableEventData->ActiveCablePowerRequirement); 7973 7976 break; 7974 7977 7975 7978 default: /* ignore the rest */
+1 -18
drivers/scsi/mvsas/mv_init.c
··· 704 704 .class_mask = 0, 705 705 .driver_data = chip_9445, 706 706 }, 707 - { 708 - .vendor = PCI_VENDOR_ID_MARVELL_EXT, 709 - .device = 0x9485, 710 - .subvendor = PCI_ANY_ID, 711 - .subdevice = 0x9480, 712 - .class = 0, 713 - .class_mask = 0, 714 - .driver_data = chip_9485, 715 - }, 716 - { 717 - .vendor = PCI_VENDOR_ID_MARVELL_EXT, 718 - .device = 0x9485, 719 - .subvendor = PCI_ANY_ID, 720 - .subdevice = 0x9485, 721 - .class = 0, 722 - .class_mask = 0, 723 - .driver_data = chip_9485, 724 - }, 707 + { PCI_VDEVICE(MARVELL_EXT, 0x9485), chip_9485 }, /* Marvell 9480/9485 (any vendor/model) */ 725 708 { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ 726 709 { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ 727 710 { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
+9 -18
drivers/scsi/pas16.c
··· 1 - #define PSEUDO_DMA 2 - 3 1 /* 4 2 * This driver adapted from Drew Eckhardt's Trantor T128 driver 5 3 * ··· 75 77 76 78 #include <scsi/scsi_host.h> 77 79 #include "pas16.h" 78 - #define AUTOPROBE_IRQ 79 80 #include "NCR5380.h" 80 81 81 82 ··· 374 377 375 378 instance->io_port = io_port; 376 379 377 - if (NCR5380_init(instance, 0)) 380 + if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP)) 378 381 goto out_unregister; 379 382 380 383 NCR5380_maybe_reset_bus(instance); ··· 457 460 } 458 461 459 462 /* 460 - * Function : int NCR5380_pread (struct Scsi_Host *instance, 463 + * Function : int pas16_pread (struct Scsi_Host *instance, 461 464 * unsigned char *dst, int len) 462 465 * 463 466 * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to ··· 469 472 * timeout. 470 473 */ 471 474 472 - static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, 473 - int len) { 475 + static inline int pas16_pread(struct Scsi_Host *instance, 476 + unsigned char *dst, int len) 477 + { 474 478 register unsigned char *d = dst; 475 479 register unsigned short reg = (unsigned short) (instance->io_port + 476 480 P_DATA_REG_OFFSET); 477 481 register int i = len; 478 482 int ii = 0; 479 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 480 483 481 484 while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) ) 482 485 ++ii; ··· 489 492 instance->host_no); 490 493 return -1; 491 494 } 492 - if (ii > hostdata->spin_max_r) 493 - hostdata->spin_max_r = ii; 494 495 return 0; 495 496 } 496 497 497 498 /* 498 - * Function : int NCR5380_pwrite (struct Scsi_Host *instance, 499 + * Function : int pas16_pwrite (struct Scsi_Host *instance, 499 500 * unsigned char *src, int len) 500 501 * 501 502 * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from ··· 505 510 * timeout. 506 511 */ 507 512 508 - static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, 509 - int len) { 513 + static inline int pas16_pwrite(struct Scsi_Host *instance, 514 + unsigned char *src, int len) 515 + { 510 516 register unsigned char *s = src; 511 517 register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET); 512 518 register int i = len; 513 519 int ii = 0; 514 - struct NCR5380_hostdata *hostdata = shost_priv(instance); 515 520 516 521 while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) ) 517 522 ++ii; ··· 524 529 instance->host_no); 525 530 return -1; 526 531 } 527 - if (ii > hostdata->spin_max_w) 528 - hostdata->spin_max_w = ii; 529 532 return 0; 530 533 } 531 534 ··· 543 550 .detect = pas16_detect, 544 551 .release = pas16_release, 545 552 .proc_name = "pas16", 546 - .show_info = pas16_show_info, 547 - .write_info = pas16_write_info, 548 553 .info = pas16_info, 549 554 .queuecommand = pas16_queue_command, 550 555 .eh_abort_handler = pas16_abort,
+3 -2
drivers/scsi/pas16.h
··· 103 103 #define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) ) 104 104 105 105 #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) 106 + #define NCR5380_dma_recv_setup pas16_pread 107 + #define NCR5380_dma_send_setup pas16_pwrite 108 + #define NCR5380_dma_residual(instance) (0) 106 109 107 110 #define NCR5380_intr pas16_intr 108 111 #define NCR5380_queue_command pas16_queue_command 109 112 #define NCR5380_abort pas16_abort 110 113 #define NCR5380_bus_reset pas16_bus_reset 111 114 #define NCR5380_info pas16_info 112 - #define NCR5380_show_info pas16_show_info 113 - #define NCR5380_write_info pas16_write_info 114 115 115 116 /* 15 14 12 10 7 5 3 116 117 1101 0100 1010 1000 */
-2
drivers/scsi/pm8001/pm8001_init.c
··· 418 418 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 419 419 pm8001_ha->io_mem[logicalBar].membase = 420 420 pci_resource_start(pdev, bar); 421 - pm8001_ha->io_mem[logicalBar].membase &= 422 - (u32)PCI_BASE_ADDRESS_MEM_MASK; 423 421 pm8001_ha->io_mem[logicalBar].memsize = 424 422 pci_resource_len(pdev, bar); 425 423 pm8001_ha->io_mem[logicalBar].memvirtaddr =
+2 -3
drivers/scsi/qla2xxx/qla_mr.c
··· 6 6 */ 7 7 #include "qla_def.h" 8 8 #include <linux/delay.h> 9 + #include <linux/ktime.h> 9 10 #include <linux/pci.h> 10 11 #include <linux/ratelimit.h> 11 12 #include <linux/vmalloc.h> ··· 1813 1812 struct host_system_info *phost_info; 1814 1813 struct register_host_info *preg_hsi; 1815 1814 struct new_utsname *p_sysid = NULL; 1816 - struct timeval tv; 1817 1815 1818 1816 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1819 1817 if (!sp) ··· 1886 1886 p_sysid->domainname, DOMNAME_LENGTH); 1887 1887 strncpy(phost_info->hostdriver, 1888 1888 QLA2XXX_VERSION, VERSION_LENGTH); 1889 - do_gettimeofday(&tv); 1890 - preg_hsi->utc = (uint64_t)tv.tv_sec; 1889 + preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); 1891 1890 ql_dbg(ql_dbg_init, vha, 0x0149, 1892 1891 "ISP%04X: Host registration with firmware\n", 1893 1892 ha->pdev->device);
+1 -1
drivers/scsi/qla2xxx/qla_nx.c
··· 1229 1229 if (buf == NULL) { 1230 1230 ql_log(ql_log_fatal, vha, 0x010c, 1231 1231 "Unable to allocate memory.\n"); 1232 - return -1; 1232 + return -ENOMEM; 1233 1233 } 1234 1234 1235 1235 for (i = 0; i < n; i++) {
+1436 -1349
drivers/scsi/scsi_debug.c
··· 6 6 * anything out of the ordinary is seen. 7 7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 8 8 * 9 - * This version is more generic, simulating a variable number of disk 10 - * (or disk like devices) sharing a common amount of RAM. To be more 11 - * realistic, the simulated devices have the transport attributes of 12 - * SAS disks. 9 + * Copyright (C) 2001 - 2016 Douglas Gilbert 13 10 * 11 + * This program is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License as published by 13 + * the Free Software Foundation; either version 2, or (at your option) 14 + * any later version. 14 15 * 15 16 * For documentation see http://sg.danny.cz/sg/sdebug26.html 16 17 * 17 - * D. Gilbert (dpg) work for Magneto-Optical device test [20010421] 18 - * dpg: work for devfs large number of disks [20010809] 19 - * forked for lk 2.5 series [20011216, 20020101] 20 - * use vmalloc() more inquiry+mode_sense [20020302] 21 - * add timers for delayed responses [20020721] 22 - * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031] 23 - * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118] 24 - * dpg: change style of boot options to "scsi_debug.num_tgts=2" and 25 - * module options to "modprobe scsi_debug num_tgts=2" [20021221] 26 18 */ 27 19 28 20 ··· 24 32 25 33 #include <linux/kernel.h> 26 34 #include <linux/errno.h> 27 - #include <linux/timer.h> 35 + #include <linux/jiffies.h> 28 36 #include <linux/slab.h> 29 37 #include <linux/types.h> 30 38 #include <linux/string.h> ··· 41 49 #include <linux/interrupt.h> 42 50 #include <linux/atomic.h> 43 51 #include <linux/hrtimer.h> 52 + #include <linux/uuid.h> 44 53 45 54 #include <net/checksum.h> 46 55 ··· 59 66 #include "sd.h" 60 67 #include "scsi_logging.h" 61 68 62 - #define SCSI_DEBUG_VERSION "1.85" 63 - static const char *scsi_debug_version_date = "20141022"; 69 + /* make sure inq_product_rev string corresponds to this version */ 70 + #define SDEBUG_VERSION "1.86" 71 + static const char *sdebug_version_date = "20160430"; 64 72 65 73 #define MY_NAME "scsi_debug" 66 74 ··· 96 102 /* Additional Sense Code Qualifier (ASCQ) */ 97 103 #define ACK_NAK_TO 0x3 98 104 99 - 100 105 /* Default values for driver parameters */ 101 106 #define DEF_NUM_HOST 1 102 107 #define DEF_NUM_TGTS 1 ··· 104 111 * (id 0) containing 1 logical unit (lun 0). That is 1 device. 105 112 */ 106 113 #define DEF_ATO 1 107 - #define DEF_DELAY 1 /* if > 0 unit is a jiffy */ 114 + #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */ 108 115 #define DEF_DEV_SIZE_MB 8 109 116 #define DEF_DIF 0 110 117 #define DEF_DIX 0 ··· 124 131 #define DEF_OPTS 0 125 132 #define DEF_OPT_BLKS 1024 126 133 #define DEF_PHYSBLK_EXP 0 127 - #define DEF_PTYPE 0 134 + #define DEF_PTYPE TYPE_DISK 128 135 #define DEF_REMOVABLE false 129 - #define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */ 136 + #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */ 130 137 #define DEF_SECTOR_SIZE 512 131 138 #define DEF_UNMAP_ALIGNMENT 0 132 139 #define DEF_UNMAP_GRANULARITY 1 ··· 136 143 #define DEF_VPD_USE_HOSTNO 1 137 144 #define DEF_WRITESAME_LENGTH 0xFFFF 138 145 #define DEF_STRICT 0 139 - #define DELAY_OVERRIDDEN -9999 146 + #define DEF_STATISTICS false 147 + #define DEF_SUBMIT_QUEUES 1 148 + #define DEF_UUID_CTL 0 149 + #define JDELAY_OVERRIDDEN -9999 140 150 141 - /* bit mask values for scsi_debug_opts */ 142 - #define SCSI_DEBUG_OPT_NOISE 1 143 - #define SCSI_DEBUG_OPT_MEDIUM_ERR 2 144 - #define SCSI_DEBUG_OPT_TIMEOUT 4 145 - #define SCSI_DEBUG_OPT_RECOVERED_ERR 8 146 - #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16 147 - #define SCSI_DEBUG_OPT_DIF_ERR 32 148 - #define SCSI_DEBUG_OPT_DIX_ERR 64 149 - #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128 150 - #define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100 151 - #define SCSI_DEBUG_OPT_Q_NOISE 0x200 152 - #define SCSI_DEBUG_OPT_ALL_TSF 0x400 153 - #define SCSI_DEBUG_OPT_RARE_TSF 0x800 154 - #define SCSI_DEBUG_OPT_N_WCE 0x1000 155 - #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000 156 - #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000 157 - #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000) 151 + #define SDEBUG_LUN_0_VAL 0 152 + 153 + /* bit mask values for sdebug_opts */ 154 + #define SDEBUG_OPT_NOISE 1 155 + #define SDEBUG_OPT_MEDIUM_ERR 2 156 + #define SDEBUG_OPT_TIMEOUT 4 157 + #define SDEBUG_OPT_RECOVERED_ERR 8 158 + #define SDEBUG_OPT_TRANSPORT_ERR 16 159 + #define SDEBUG_OPT_DIF_ERR 32 160 + #define SDEBUG_OPT_DIX_ERR 64 161 + #define SDEBUG_OPT_MAC_TIMEOUT 128 162 + #define SDEBUG_OPT_SHORT_TRANSFER 0x100 163 + #define SDEBUG_OPT_Q_NOISE 0x200 164 + #define SDEBUG_OPT_ALL_TSF 0x400 165 + #define SDEBUG_OPT_RARE_TSF 0x800 166 + #define SDEBUG_OPT_N_WCE 0x1000 167 + #define SDEBUG_OPT_RESET_NOISE 0x2000 168 + #define SDEBUG_OPT_NO_CDB_NOISE 0x4000 169 + #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \ 170 + SDEBUG_OPT_RESET_NOISE) 171 + #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \ 172 + SDEBUG_OPT_TRANSPORT_ERR | \ 173 + SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \ 174 + SDEBUG_OPT_SHORT_TRANSFER) 158 175 /* When "every_nth" > 0 then modulo "every_nth" commands: 159 - * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set 176 + * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set 160 177 * - a RECOVERED_ERROR is simulated on successful read and write 161 - * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. 178 + * commands if SDEBUG_OPT_RECOVERED_ERR is set. 162 179 * - a TRANSPORT_ERROR is simulated on successful read and write 163 - * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set. 180 + * commands if SDEBUG_OPT_TRANSPORT_ERR is set. 164 181 * 165 182 * When "every_nth" < 0 then after "- every_nth" commands: 166 - * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set 183 + * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set 167 184 * - a RECOVERED_ERROR is simulated on successful read and write 168 - * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. 185 + * commands if SDEBUG_OPT_RECOVERED_ERR is set. 169 186 * - a TRANSPORT_ERROR is simulated on successful read and write 170 - * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set. 171 - * This will continue until some other action occurs (e.g. the user 172 - * writing a new value (other than -1 or 1) to every_nth via sysfs). 187 + * commands if _DEBUG_OPT_TRANSPORT_ERR is set. 188 + * This will continue on every subsequent command until some other action 189 + * occurs (e.g. the user * writing a new value (other than -1 or 1) to 190 + * every_nth via sysfs). 173 191 */ 174 192 175 - /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in 193 + /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in 176 194 * priority order. In the subset implemented here lower numbers have higher 177 195 * priority. The UA numbers should be a sequence starting from 0 with 178 196 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */ ··· 196 192 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6 197 193 #define SDEBUG_NUM_UAS 7 198 194 199 - /* for check_readiness() */ 200 - #define UAS_ONLY 1 /* check for UAs only */ 201 - #define UAS_TUR 0 /* if no UAs then check if media access possible */ 202 - 203 - /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this 195 + /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this 204 196 * sector on read commands: */ 205 197 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ 206 198 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */ ··· 205 205 * or "peripheral device" addressing (value 0) */ 206 206 #define SAM2_LUN_ADDRESS_METHOD 0 207 207 208 - /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued 209 - * (for response) at one time. Can be reduced by max_queue option. Command 210 - * responses are not queued when delay=0 and ndelay=0. The per-device 211 - * DEF_CMD_PER_LUN can be changed via sysfs: 212 - * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed 213 - * SCSI_DEBUG_CANQUEUE. */ 214 - #define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */ 215 - #define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG) 208 + /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued 209 + * (for response) per submit queue at one time. Can be reduced by max_queue 210 + * option. Command responses are not queued when jdelay=0 and ndelay=0. The 211 + * per-device DEF_CMD_PER_LUN can be changed via sysfs: 212 + * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth 213 + * but cannot exceed SDEBUG_CANQUEUE . 214 + */ 215 + #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */ 216 + #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG) 216 217 #define DEF_CMD_PER_LUN 255 217 218 218 - #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE 219 - #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE" 220 - #endif 219 + #define F_D_IN 1 220 + #define F_D_OUT 2 221 + #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */ 222 + #define F_D_UNKN 8 223 + #define F_RL_WLUN_OK 0x10 224 + #define F_SKIP_UA 0x20 225 + #define F_DELAY_OVERR 0x40 226 + #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */ 227 + #define F_SA_HIGH 0x100 /* as used by variable length cdbs */ 228 + #define F_INV_OP 0x200 229 + #define F_FAKE_RW 0x400 230 + #define F_M_ACCESS 0x800 /* media access */ 221 231 222 - /* SCSI opcodes (first byte of cdb) mapped onto these indexes */ 232 + #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) 233 + #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW) 234 + #define FF_SA (F_SA_HIGH | F_SA_LOW) 235 + 236 + #define SDEBUG_MAX_PARTS 4 237 + 238 + #define SDEBUG_MAX_CMD_LEN 32 239 + 240 + 241 + struct sdebug_dev_info { 242 + struct list_head dev_list; 243 + unsigned int channel; 244 + unsigned int target; 245 + u64 lun; 246 + uuid_be lu_name; 247 + struct sdebug_host_info *sdbg_host; 248 + unsigned long uas_bm[1]; 249 + atomic_t num_in_q; 250 + atomic_t stopped; 251 + bool used; 252 + }; 253 + 254 + struct sdebug_host_info { 255 + struct list_head host_list; 256 + struct Scsi_Host *shost; 257 + struct device dev; 258 + struct list_head dev_info_list; 259 + }; 260 + 261 + #define to_sdebug_host(d) \ 262 + container_of(d, struct sdebug_host_info, dev) 263 + 264 + struct sdebug_defer { 265 + struct hrtimer hrt; 266 + struct execute_work ew; 267 + int sqa_idx; /* index of sdebug_queue array */ 268 + int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */ 269 + int issuing_cpu; 270 + }; 271 + 272 + struct sdebug_queued_cmd { 273 + /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue 274 + * instance indicates this slot is in use. 275 + */ 276 + struct sdebug_defer *sd_dp; 277 + struct scsi_cmnd *a_cmnd; 278 + unsigned int inj_recovered:1; 279 + unsigned int inj_transport:1; 280 + unsigned int inj_dif:1; 281 + unsigned int inj_dix:1; 282 + unsigned int inj_short:1; 283 + }; 284 + 285 + struct sdebug_queue { 286 + struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE]; 287 + unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS]; 288 + spinlock_t qc_lock; 289 + atomic_t blocked; /* to temporarily stop more being queued */ 290 + }; 291 + 292 + static atomic_t sdebug_cmnd_count; /* number of incoming commands */ 293 + static atomic_t sdebug_completions; /* count of deferred completions */ 294 + static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */ 295 + static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */ 296 + 297 + struct opcode_info_t { 298 + u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */ 299 + /* for terminating element */ 300 + u8 opcode; /* if num_attached > 0, preferred */ 301 + u16 sa; /* service action */ 302 + u32 flags; /* OR-ed set of SDEB_F_* */ 303 + int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); 304 + const struct opcode_info_t *arrp; /* num_attached elements or NULL */ 305 + u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */ 306 + /* ignore cdb bytes after position 15 */ 307 + }; 308 + 309 + /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */ 223 310 enum sdeb_opcode_index { 224 311 SDEB_I_INVALID_OPCODE = 0, 225 312 SDEB_I_INQUIRY = 1, ··· 341 254 SDEB_I_LAST_ELEMENT = 30, /* keep this last */ 342 255 }; 343 256 257 + 344 258 static const unsigned char opcode_ind_arr[256] = { 345 259 /* 0x0; 0x0->0x1f: 6 byte cdbs */ 346 260 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE, ··· 362 274 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, 363 275 SDEB_I_RELEASE, 364 276 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0, 365 - /* 0x60; 0x60->0x7d are reserved */ 277 + /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */ 366 278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 367 279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 368 280 0, SDEB_I_VARIABLE_LEN, ··· 385 297 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 386 298 }; 387 299 388 - #define F_D_IN 1 389 - #define F_D_OUT 2 390 - #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */ 391 - #define F_D_UNKN 8 392 - #define F_RL_WLUN_OK 0x10 393 - #define F_SKIP_UA 0x20 394 - #define F_DELAY_OVERR 0x40 395 - #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */ 396 - #define F_SA_HIGH 0x100 /* as used by variable length cdbs */ 397 - #define F_INV_OP 0x200 398 - #define F_FAKE_RW 0x400 399 - #define F_M_ACCESS 0x800 /* media access */ 400 - 401 - #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) 402 - #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW) 403 - #define FF_SA (F_SA_HIGH | F_SA_LOW) 404 - 405 - struct sdebug_dev_info; 406 300 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *); 407 301 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *); 408 302 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *); ··· 406 336 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *); 407 337 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); 408 338 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); 409 - 410 - struct opcode_info_t { 411 - u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff 412 - * for terminating element */ 413 - u8 opcode; /* if num_attached > 0, preferred */ 414 - u16 sa; /* service action */ 415 - u32 flags; /* OR-ed set of SDEB_F_* */ 416 - int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); 417 - const struct opcode_info_t *arrp; /* num_attached elements or NULL */ 418 - u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */ 419 - /* ignore cdb bytes after position 15 */ 420 - }; 421 339 422 340 static const struct opcode_info_t msense_iarr[1] = { 423 341 {0, 0x1a, 0, F_D_IN, NULL, NULL, ··· 567 509 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 568 510 }; 569 511 570 - struct sdebug_scmd_extra_t { 571 - bool inj_recovered; 572 - bool inj_transport; 573 - bool inj_dif; 574 - bool inj_dix; 575 - bool inj_short; 576 - }; 577 - 578 - static int scsi_debug_add_host = DEF_NUM_HOST; 579 - static int scsi_debug_ato = DEF_ATO; 580 - static int scsi_debug_delay = DEF_DELAY; 581 - static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB; 582 - static int scsi_debug_dif = DEF_DIF; 583 - static int scsi_debug_dix = DEF_DIX; 584 - static int scsi_debug_dsense = DEF_D_SENSE; 585 - static int scsi_debug_every_nth = DEF_EVERY_NTH; 586 - static int scsi_debug_fake_rw = DEF_FAKE_RW; 587 - static unsigned int scsi_debug_guard = DEF_GUARD; 588 - static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; 589 - static int scsi_debug_max_luns = DEF_MAX_LUNS; 590 - static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; 512 + static int sdebug_add_host = DEF_NUM_HOST; 513 + static int sdebug_ato = DEF_ATO; 514 + static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */ 515 + static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB; 516 + static int sdebug_dif = DEF_DIF; 517 + static int sdebug_dix = DEF_DIX; 518 + static int sdebug_dsense = DEF_D_SENSE; 519 + static int sdebug_every_nth = DEF_EVERY_NTH; 520 + static int sdebug_fake_rw = DEF_FAKE_RW; 521 + static unsigned int sdebug_guard = DEF_GUARD; 522 + static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED; 523 + static int sdebug_max_luns = DEF_MAX_LUNS; 524 + static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */ 591 525 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */ 592 - static int scsi_debug_ndelay = DEF_NDELAY; 593 - static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; 594 - static int scsi_debug_no_uld = 0; 595 - static int scsi_debug_num_parts = DEF_NUM_PARTS; 596 - static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */ 597 - static int scsi_debug_opt_blks = DEF_OPT_BLKS; 598 - static int scsi_debug_opts = DEF_OPTS; 599 - static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; 600 - static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */ 601 - static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; 602 - static int scsi_debug_sector_size = DEF_SECTOR_SIZE; 603 - static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; 604 - static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; 605 - static unsigned int scsi_debug_lbpu = DEF_LBPU; 606 - static unsigned int scsi_debug_lbpws = DEF_LBPWS; 607 - static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10; 608 - static unsigned int scsi_debug_lbprz = DEF_LBPRZ; 609 - static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; 610 - static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; 611 - static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; 612 - static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; 613 - static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; 614 - static bool scsi_debug_removable = DEF_REMOVABLE; 615 - static bool scsi_debug_clustering; 616 - static bool scsi_debug_host_lock = DEF_HOST_LOCK; 617 - static bool scsi_debug_strict = DEF_STRICT; 526 + static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */ 527 + static int sdebug_no_lun_0 = DEF_NO_LUN_0; 528 + static int sdebug_no_uld; 529 + static int sdebug_num_parts = DEF_NUM_PARTS; 530 + static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */ 531 + static int sdebug_opt_blks = DEF_OPT_BLKS; 532 + static int sdebug_opts = DEF_OPTS; 533 + static int sdebug_physblk_exp = DEF_PHYSBLK_EXP; 534 + static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */ 535 + static int sdebug_scsi_level = DEF_SCSI_LEVEL; 536 + static int sdebug_sector_size = DEF_SECTOR_SIZE; 537 + static int sdebug_virtual_gb = DEF_VIRTUAL_GB; 538 + static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; 539 + static unsigned int sdebug_lbpu = DEF_LBPU; 540 + static unsigned int sdebug_lbpws = DEF_LBPWS; 541 + static unsigned int sdebug_lbpws10 = DEF_LBPWS10; 542 + static unsigned int sdebug_lbprz = DEF_LBPRZ; 543 + static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT; 544 + static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY; 545 + static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; 546 + static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC; 547 + static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH; 548 + static int sdebug_uuid_ctl = DEF_UUID_CTL; 549 + static bool sdebug_removable = DEF_REMOVABLE; 550 + static bool sdebug_clustering; 551 + static bool sdebug_host_lock = DEF_HOST_LOCK; 552 + static bool sdebug_strict = DEF_STRICT; 618 553 static bool sdebug_any_injecting_opt; 619 - 620 - static atomic_t sdebug_cmnd_count; 621 - static atomic_t sdebug_completions; 622 - static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */ 623 - 624 - #define DEV_READONLY(TGT) (0) 554 + static bool sdebug_verbose; 555 + static bool have_dif_prot; 556 + static bool sdebug_statistics = DEF_STATISTICS; 557 + static bool sdebug_mq_active; 625 558 626 559 static unsigned int sdebug_store_sectors; 627 560 static sector_t sdebug_capacity; /* in sectors */ ··· 623 574 static int sdebug_cylinders_per; /* cylinders per surface */ 624 575 static int sdebug_sectors_per; /* sectors per cylinder */ 625 576 626 - #define SDEBUG_MAX_PARTS 4 627 - 628 - #define SCSI_DEBUG_MAX_CMD_LEN 32 629 - 630 - static unsigned int scsi_debug_lbp(void) 631 - { 632 - return ((0 == scsi_debug_fake_rw) && 633 - (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10)); 634 - } 635 - 636 - struct sdebug_dev_info { 637 - struct list_head dev_list; 638 - unsigned int channel; 639 - unsigned int target; 640 - u64 lun; 641 - struct sdebug_host_info *sdbg_host; 642 - unsigned long uas_bm[1]; 643 - atomic_t num_in_q; 644 - char stopped; /* TODO: should be atomic */ 645 - bool used; 646 - }; 647 - 648 - struct sdebug_host_info { 649 - struct list_head host_list; 650 - struct Scsi_Host *shost; 651 - struct device dev; 652 - struct list_head dev_info_list; 653 - }; 654 - 655 - #define to_sdebug_host(d) \ 656 - container_of(d, struct sdebug_host_info, dev) 657 - 658 577 static LIST_HEAD(sdebug_host_list); 659 578 static DEFINE_SPINLOCK(sdebug_host_list_lock); 660 579 661 - 662 - struct sdebug_hrtimer { /* ... is derived from hrtimer */ 663 - struct hrtimer hrt; /* must be first element */ 664 - int qa_indx; 665 - }; 666 - 667 - struct sdebug_queued_cmd { 668 - /* in_use flagged by a bit in queued_in_use_bm[] */ 669 - struct timer_list *cmnd_timerp; 670 - struct tasklet_struct *tletp; 671 - struct sdebug_hrtimer *sd_hrtp; 672 - struct scsi_cmnd * a_cmnd; 673 - }; 674 - static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; 675 - static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS]; 676 - 677 - 678 - static unsigned char * fake_storep; /* ramdisk storage */ 580 + static unsigned char *fake_storep; /* ramdisk storage */ 679 581 static struct sd_dif_tuple *dif_storep; /* protection info */ 680 582 static void *map_storep; /* provisioning map */ 681 583 ··· 640 640 static int dix_reads; 641 641 static int dif_errors; 642 642 643 - static DEFINE_SPINLOCK(queued_arr_lock); 643 + static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */ 644 + static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */ 645 + 644 646 static DEFINE_RWLOCK(atomic_rw); 645 647 646 648 static char sdebug_proc_name[] = MY_NAME; ··· 664 662 static const int device_qfull_result = 665 663 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL; 666 664 667 - static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, 668 - 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 669 - 0, 0, 0, 0}; 670 - static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 671 - 0, 0, 0x2, 0x4b}; 672 - static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 673 - 0, 0, 0x0, 0x0}; 665 + 666 + /* Only do the extra work involved in logical block provisioning if one or 667 + * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing 668 + * real reads and writes (i.e. not skipping them for speed). 669 + */ 670 + static inline bool scsi_debug_lbp(void) 671 + { 672 + return 0 == sdebug_fake_rw && 673 + (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); 674 + } 674 675 675 676 static void *fake_store(unsigned long long lba) 676 677 { 677 678 lba = do_div(lba, sdebug_store_sectors); 678 679 679 - return fake_storep + lba * scsi_debug_sector_size; 680 + return fake_storep + lba * sdebug_sector_size; 680 681 } 681 682 682 683 static struct sd_dif_tuple *dif_store(sector_t sector) ··· 688 683 689 684 return dif_storep + sector; 690 685 } 691 - 692 - static int sdebug_add_adapter(void); 693 - static void sdebug_remove_adapter(void); 694 686 695 687 static void sdebug_max_tgts_luns(void) 696 688 { ··· 698 696 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { 699 697 hpnt = sdbg_host->shost; 700 698 if ((hpnt->this_id >= 0) && 701 - (scsi_debug_num_tgts > hpnt->this_id)) 702 - hpnt->max_id = scsi_debug_num_tgts + 1; 699 + (sdebug_num_tgts > hpnt->this_id)) 700 + hpnt->max_id = sdebug_num_tgts + 1; 703 701 else 704 - hpnt->max_id = scsi_debug_num_tgts; 705 - /* scsi_debug_max_luns; */ 702 + hpnt->max_id = sdebug_num_tgts; 703 + /* sdebug_max_luns; */ 706 704 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; 707 705 } 708 706 spin_unlock(&sdebug_host_list_lock); ··· 711 709 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1}; 712 710 713 711 /* Set in_bit to -1 to indicate no bit position of invalid field */ 714 - static void 715 - mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d, 716 - int in_byte, int in_bit) 712 + static void mk_sense_invalid_fld(struct scsi_cmnd *scp, 713 + enum sdeb_cmd_data c_d, 714 + int in_byte, int in_bit) 717 715 { 718 716 unsigned char *sbuff; 719 717 u8 sks[4]; ··· 727 725 } 728 726 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST; 729 727 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); 730 - scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST, 731 - asc, 0); 728 + scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0); 732 729 memset(sks, 0, sizeof(sks)); 733 730 sks[0] = 0x80; 734 731 if (c_d) ··· 737 736 sks[0] |= 0x7 & in_bit; 738 737 } 739 738 put_unaligned_be16(in_byte, sks + 1); 740 - if (scsi_debug_dsense) { 739 + if (sdebug_dsense) { 741 740 sl = sbuff[7] + 8; 742 741 sbuff[7] = sl; 743 742 sbuff[sl] = 0x2; ··· 745 744 memcpy(sbuff + sl + 4, sks, 3); 746 745 } else 747 746 memcpy(sbuff + 15, sks, 3); 748 - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 747 + if (sdebug_verbose) 749 748 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq" 750 749 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n", 751 750 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit); ··· 763 762 } 764 763 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); 765 764 766 - scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq); 765 + scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq); 767 766 768 - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 767 + if (sdebug_verbose) 769 768 sdev_printk(KERN_INFO, scp->device, 770 769 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n", 771 770 my_name, key, asc, asq); 772 771 } 773 772 774 - static void 775 - mk_sense_invalid_opcode(struct scsi_cmnd *scp) 773 + static void mk_sense_invalid_opcode(struct scsi_cmnd *scp) 776 774 { 777 775 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0); 778 776 } 779 777 780 778 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 781 779 { 782 - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { 780 + if (sdebug_verbose) { 783 781 if (0x1261 == cmd) 784 782 sdev_printk(KERN_INFO, dev, 785 783 "%s: BLKFLSBUF [0x1261]\n", __func__); ··· 810 810 spin_unlock(&sdebug_host_list_lock); 811 811 } 812 812 813 - static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, 814 - struct sdebug_dev_info * devip) 813 + static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 815 814 { 816 815 int k; 817 - bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts); 818 816 819 817 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); 820 818 if (k != SDEBUG_NUM_UAS) { ··· 820 822 821 823 switch (k) { 822 824 case SDEBUG_UA_POR: 823 - mk_sense_buffer(SCpnt, UNIT_ATTENTION, 824 - UA_RESET_ASC, POWER_ON_RESET_ASCQ); 825 - if (debug) 825 + mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, 826 + POWER_ON_RESET_ASCQ); 827 + if (sdebug_verbose) 826 828 cp = "power on reset"; 827 829 break; 828 830 case SDEBUG_UA_BUS_RESET: 829 - mk_sense_buffer(SCpnt, UNIT_ATTENTION, 830 - UA_RESET_ASC, BUS_RESET_ASCQ); 831 - if (debug) 831 + mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, 832 + BUS_RESET_ASCQ); 833 + if (sdebug_verbose) 832 834 cp = "bus reset"; 833 835 break; 834 836 case SDEBUG_UA_MODE_CHANGED: 835 - mk_sense_buffer(SCpnt, UNIT_ATTENTION, 836 - UA_CHANGED_ASC, MODE_CHANGED_ASCQ); 837 - if (debug) 837 + mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC, 838 + MODE_CHANGED_ASCQ); 839 + if (sdebug_verbose) 838 840 cp = "mode parameters changed"; 839 841 break; 840 842 case SDEBUG_UA_CAPACITY_CHANGED: 841 - mk_sense_buffer(SCpnt, UNIT_ATTENTION, 842 - UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ); 843 - if (debug) 843 + mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC, 844 + CAPACITY_CHANGED_ASCQ); 845 + if (sdebug_verbose) 844 846 cp = "capacity data changed"; 845 847 break; 846 848 case SDEBUG_UA_MICROCODE_CHANGED: 847 - mk_sense_buffer(SCpnt, UNIT_ATTENTION, 848 - TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ); 849 - if (debug) 849 + mk_sense_buffer(scp, UNIT_ATTENTION, 850 + TARGET_CHANGED_ASC, 851 + MICROCODE_CHANGED_ASCQ); 852 + if (sdebug_verbose) 850 853 cp = "microcode has been changed"; 851 854 break; 852 855 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET: 853 - mk_sense_buffer(SCpnt, UNIT_ATTENTION, 856 + mk_sense_buffer(scp, UNIT_ATTENTION, 854 857 TARGET_CHANGED_ASC, 855 858 MICROCODE_CHANGED_WO_RESET_ASCQ); 856 - if (debug) 859 + if (sdebug_verbose) 857 860 cp = "microcode has been changed without reset"; 858 861 break; 859 862 case SDEBUG_UA_LUNS_CHANGED: ··· 863 864 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN 864 865 * on the target, until a REPORT LUNS command is 865 866 * received. SPC-4 behavior is to report it only once. 866 - * NOTE: scsi_debug_scsi_level does not use the same 867 + * NOTE: sdebug_scsi_level does not use the same 867 868 * values as struct scsi_device->scsi_level. 868 869 */ 869 - if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */ 870 + if (sdebug_scsi_level >= 6) /* SPC-4 and above */ 870 871 clear_luns_changed_on_target(devip); 871 - mk_sense_buffer(SCpnt, UNIT_ATTENTION, 872 + mk_sense_buffer(scp, UNIT_ATTENTION, 872 873 TARGET_CHANGED_ASC, 873 874 LUNS_CHANGED_ASCQ); 874 - if (debug) 875 + if (sdebug_verbose) 875 876 cp = "reported luns data has changed"; 876 877 break; 877 878 default: 878 - pr_warn("%s: unexpected unit attention code=%d\n", 879 - __func__, k); 880 - if (debug) 879 + pr_warn("unexpected unit attention code=%d\n", k); 880 + if (sdebug_verbose) 881 881 cp = "unknown"; 882 882 break; 883 883 } 884 884 clear_bit(k, devip->uas_bm); 885 - if (debug) 886 - sdev_printk(KERN_INFO, SCpnt->device, 885 + if (sdebug_verbose) 886 + sdev_printk(KERN_INFO, scp->device, 887 887 "%s reports: Unit attention: %s\n", 888 888 my_name, cp); 889 - return check_condition_result; 890 - } 891 - if ((UAS_TUR == uas_only) && devip->stopped) { 892 - mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY, 893 - 0x2); 894 - if (debug) 895 - sdev_printk(KERN_INFO, SCpnt->device, 896 - "%s reports: Not ready: %s\n", my_name, 897 - "initializing command required"); 898 889 return check_condition_result; 899 890 } 900 891 return 0; ··· 900 911 if (!sdb->length) 901 912 return 0; 902 913 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) 903 - return (DID_ERROR << 16); 914 + return DID_ERROR << 16; 904 915 905 916 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, 906 917 arr, arr_len); ··· 924 935 925 936 static const char * inq_vendor_id = "Linux "; 926 937 static const char * inq_product_id = "scsi_debug "; 927 - static const char *inq_product_rev = "0184"; /* version less '.' */ 938 + static const char *inq_product_rev = "0186"; /* version less '.' */ 939 + /* Use some locally assigned NAAs for SAS addresses. */ 940 + static const u64 naa3_comp_a = 0x3222222000000000ULL; 941 + static const u64 naa3_comp_b = 0x3333333000000000ULL; 942 + static const u64 naa3_comp_c = 0x3111111000000000ULL; 928 943 929 944 /* Device identification VPD page. Returns number of bytes placed in arr */ 930 - static int inquiry_evpd_83(unsigned char * arr, int port_group_id, 931 - int target_dev_id, int dev_id_num, 932 - const char * dev_id_str, 933 - int dev_id_str_len) 945 + static int inquiry_vpd_83(unsigned char *arr, int port_group_id, 946 + int target_dev_id, int dev_id_num, 947 + const char *dev_id_str, int dev_id_str_len, 948 + const uuid_be *lu_name) 934 949 { 935 950 int num, port_a; 936 951 char b[32]; ··· 951 958 arr[3] = num; 952 959 num += 4; 953 960 if (dev_id_num >= 0) { 954 - /* NAA-5, Logical unit identifier (binary) */ 955 - arr[num++] = 0x1; /* binary (not necessarily sas) */ 956 - arr[num++] = 0x3; /* PIV=0, lu, naa */ 957 - arr[num++] = 0x0; 958 - arr[num++] = 0x8; 959 - arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */ 960 - arr[num++] = 0x33; 961 - arr[num++] = 0x33; 962 - arr[num++] = 0x30; 963 - arr[num++] = (dev_id_num >> 24); 964 - arr[num++] = (dev_id_num >> 16) & 0xff; 965 - arr[num++] = (dev_id_num >> 8) & 0xff; 966 - arr[num++] = dev_id_num & 0xff; 961 + if (sdebug_uuid_ctl) { 962 + /* Locally assigned UUID */ 963 + arr[num++] = 0x1; /* binary (not necessarily sas) */ 964 + arr[num++] = 0xa; /* PIV=0, lu, naa */ 965 + arr[num++] = 0x0; 966 + arr[num++] = 0x12; 967 + arr[num++] = 0x10; /* uuid type=1, locally assigned */ 968 + arr[num++] = 0x0; 969 + memcpy(arr + num, lu_name, 16); 970 + num += 16; 971 + } else { 972 + /* NAA-3, Logical unit identifier (binary) */ 973 + arr[num++] = 0x1; /* binary (not necessarily sas) */ 974 + arr[num++] = 0x3; /* PIV=0, lu, naa */ 975 + arr[num++] = 0x0; 976 + arr[num++] = 0x8; 977 + put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num); 978 + num += 8; 979 + } 967 980 /* Target relative port number */ 968 981 arr[num++] = 0x61; /* proto=sas, binary */ 969 982 arr[num++] = 0x94; /* PIV=1, target port, rel port */ ··· 980 981 arr[num++] = 0x0; 981 982 arr[num++] = 0x1; /* relative port A */ 982 983 } 983 - /* NAA-5, Target port identifier */ 984 + /* NAA-3, Target port identifier */ 984 985 arr[num++] = 0x61; /* proto=sas, binary */ 985 986 arr[num++] = 0x93; /* piv=1, target port, naa */ 986 987 arr[num++] = 0x0; 987 988 arr[num++] = 0x8; 988 - arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */ 989 - arr[num++] = 0x22; 990 - arr[num++] = 0x22; 991 - arr[num++] = 0x20; 992 - arr[num++] = (port_a >> 24); 993 - arr[num++] = (port_a >> 16) & 0xff; 994 - arr[num++] = (port_a >> 8) & 0xff; 995 - arr[num++] = port_a & 0xff; 996 - /* NAA-5, Target port group identifier */ 989 + put_unaligned_be64(naa3_comp_a + port_a, arr + num); 990 + num += 8; 991 + /* NAA-3, Target port group identifier */ 997 992 arr[num++] = 0x61; /* proto=sas, binary */ 998 993 arr[num++] = 0x95; /* piv=1, target port group id */ 999 994 arr[num++] = 0x0; 1000 995 arr[num++] = 0x4; 1001 996 arr[num++] = 0; 1002 997 arr[num++] = 0; 1003 - arr[num++] = (port_group_id >> 8) & 0xff; 1004 - arr[num++] = port_group_id & 0xff; 1005 - /* NAA-5, Target device identifier */ 998 + put_unaligned_be16(port_group_id, arr + num); 999 + num += 2; 1000 + /* NAA-3, Target device identifier */ 1006 1001 arr[num++] = 0x61; /* proto=sas, binary */ 1007 1002 arr[num++] = 0xa3; /* piv=1, target device, naa */ 1008 1003 arr[num++] = 0x0; 1009 1004 arr[num++] = 0x8; 1010 - arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */ 1011 - arr[num++] = 0x22; 1012 - arr[num++] = 0x22; 1013 - arr[num++] = 0x20; 1014 - arr[num++] = (target_dev_id >> 24); 1015 - arr[num++] = (target_dev_id >> 16) & 0xff; 1016 - arr[num++] = (target_dev_id >> 8) & 0xff; 1017 - arr[num++] = target_dev_id & 0xff; 1005 + put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num); 1006 + num += 8; 1018 1007 /* SCSI name string: Target device identifier */ 1019 1008 arr[num++] = 0x63; /* proto=sas, UTF-8 */ 1020 1009 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */ 1021 1010 arr[num++] = 0x0; 1022 1011 arr[num++] = 24; 1023 - memcpy(arr + num, "naa.52222220", 12); 1012 + memcpy(arr + num, "naa.32222220", 12); 1024 1013 num += 12; 1025 1014 snprintf(b, sizeof(b), "%08X", target_dev_id); 1026 1015 memcpy(arr + num, b, 8); ··· 1018 1031 return num; 1019 1032 } 1020 1033 1021 - 1022 1034 static unsigned char vpd84_data[] = { 1023 1035 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0, 1024 1036 0x22,0x22,0x22,0x0,0xbb,0x1, ··· 1025 1039 }; 1026 1040 1027 1041 /* Software interface identification VPD page */ 1028 - static int inquiry_evpd_84(unsigned char * arr) 1042 + static int inquiry_vpd_84(unsigned char *arr) 1029 1043 { 1030 1044 memcpy(arr, vpd84_data, sizeof(vpd84_data)); 1031 1045 return sizeof(vpd84_data); 1032 1046 } 1033 1047 1034 1048 /* Management network addresses VPD page */ 1035 - static int inquiry_evpd_85(unsigned char * arr) 1049 + static int inquiry_vpd_85(unsigned char *arr) 1036 1050 { 1037 1051 int num = 0; 1038 1052 const char * na1 = "https://www.kernel.org/config"; ··· 1067 1081 } 1068 1082 1069 1083 /* SCSI ports VPD page */ 1070 - static int inquiry_evpd_88(unsigned char * arr, int target_dev_id) 1084 + static int inquiry_vpd_88(unsigned char *arr, int target_dev_id) 1071 1085 { 1072 1086 int num = 0; 1073 1087 int port_a, port_b; ··· 1087 1101 arr[num++] = 0x93; /* PIV=1, target port, NAA */ 1088 1102 arr[num++] = 0x0; /* reserved */ 1089 1103 arr[num++] = 0x8; /* length */ 1090 - arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */ 1091 - arr[num++] = 0x22; 1092 - arr[num++] = 0x22; 1093 - arr[num++] = 0x20; 1094 - arr[num++] = (port_a >> 24); 1095 - arr[num++] = (port_a >> 16) & 0xff; 1096 - arr[num++] = (port_a >> 8) & 0xff; 1097 - arr[num++] = port_a & 0xff; 1098 - 1104 + put_unaligned_be64(naa3_comp_a + port_a, arr + num); 1105 + num += 8; 1099 1106 arr[num++] = 0x0; /* reserved */ 1100 1107 arr[num++] = 0x0; /* reserved */ 1101 1108 arr[num++] = 0x0; ··· 1102 1123 arr[num++] = 0x93; /* PIV=1, target port, NAA */ 1103 1124 arr[num++] = 0x0; /* reserved */ 1104 1125 arr[num++] = 0x8; /* length */ 1105 - arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */ 1106 - arr[num++] = 0x22; 1107 - arr[num++] = 0x22; 1108 - arr[num++] = 0x20; 1109 - arr[num++] = (port_b >> 24); 1110 - arr[num++] = (port_b >> 16) & 0xff; 1111 - arr[num++] = (port_b >> 8) & 0xff; 1112 - arr[num++] = port_b & 0xff; 1126 + put_unaligned_be64(naa3_comp_a + port_b, arr + num); 1127 + num += 8; 1113 1128 1114 1129 return num; 1115 1130 } ··· 1154 1181 }; 1155 1182 1156 1183 /* ATA Information VPD page */ 1157 - static int inquiry_evpd_89(unsigned char * arr) 1184 + static int inquiry_vpd_89(unsigned char *arr) 1158 1185 { 1159 1186 memcpy(arr, vpd89_data, sizeof(vpd89_data)); 1160 1187 return sizeof(vpd89_data); ··· 1169 1196 }; 1170 1197 1171 1198 /* Block limits VPD page (SBC-3) */ 1172 - static int inquiry_evpd_b0(unsigned char * arr) 1199 + static int inquiry_vpd_b0(unsigned char *arr) 1173 1200 { 1174 1201 unsigned int gran; 1175 1202 1176 1203 memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); 1177 1204 1178 1205 /* Optimal transfer length granularity */ 1179 - gran = 1 << scsi_debug_physblk_exp; 1180 - arr[2] = (gran >> 8) & 0xff; 1181 - arr[3] = gran & 0xff; 1206 + gran = 1 << sdebug_physblk_exp; 1207 + put_unaligned_be16(gran, arr + 2); 1182 1208 1183 1209 /* Maximum Transfer Length */ 1184 - if (sdebug_store_sectors > 0x400) { 1185 - arr[4] = (sdebug_store_sectors >> 24) & 0xff; 1186 - arr[5] = (sdebug_store_sectors >> 16) & 0xff; 1187 - arr[6] = (sdebug_store_sectors >> 8) & 0xff; 1188 - arr[7] = sdebug_store_sectors & 0xff; 1189 - } 1210 + if (sdebug_store_sectors > 0x400) 1211 + put_unaligned_be32(sdebug_store_sectors, arr + 4); 1190 1212 1191 1213 /* Optimal Transfer Length */ 1192 - put_unaligned_be32(scsi_debug_opt_blks, &arr[8]); 1214 + put_unaligned_be32(sdebug_opt_blks, &arr[8]); 1193 1215 1194 - if (scsi_debug_lbpu) { 1216 + if (sdebug_lbpu) { 1195 1217 /* Maximum Unmap LBA Count */ 1196 - put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]); 1218 + put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]); 1197 1219 1198 1220 /* Maximum Unmap Block Descriptor Count */ 1199 - put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]); 1221 + put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]); 1200 1222 } 1201 1223 1202 1224 /* Unmap Granularity Alignment */ 1203 - if (scsi_debug_unmap_alignment) { 1204 - put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]); 1225 + if (sdebug_unmap_alignment) { 1226 + put_unaligned_be32(sdebug_unmap_alignment, &arr[28]); 1205 1227 arr[28] |= 0x80; /* UGAVALID */ 1206 1228 } 1207 1229 1208 1230 /* Optimal Unmap Granularity */ 1209 - put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); 1231 + put_unaligned_be32(sdebug_unmap_granularity, &arr[24]); 1210 1232 1211 1233 /* Maximum WRITE SAME Length */ 1212 - put_unaligned_be64(scsi_debug_write_same_length, &arr[32]); 1234 + put_unaligned_be64(sdebug_write_same_length, &arr[32]); 1213 1235 1214 1236 return 0x3c; /* Mandatory page length for Logical Block Provisioning */ 1215 1237 ··· 1212 1244 } 1213 1245 1214 1246 /* Block device characteristics VPD page (SBC-3) */ 1215 - static int inquiry_evpd_b1(unsigned char *arr) 1247 + static int inquiry_vpd_b1(unsigned char *arr) 1216 1248 { 1217 1249 memset(arr, 0, 0x3c); 1218 1250 arr[0] = 0; ··· 1223 1255 return 0x3c; 1224 1256 } 1225 1257 1226 - /* Logical block provisioning VPD page (SBC-3) */ 1227 - static int inquiry_evpd_b2(unsigned char *arr) 1258 + /* Logical block provisioning VPD page (SBC-4) */ 1259 + static int inquiry_vpd_b2(unsigned char *arr) 1228 1260 { 1229 1261 memset(arr, 0, 0x4); 1230 1262 arr[0] = 0; /* threshold exponent */ 1231 - 1232 - if (scsi_debug_lbpu) 1263 + if (sdebug_lbpu) 1233 1264 arr[1] = 1 << 7; 1234 - 1235 - if (scsi_debug_lbpws) 1265 + if (sdebug_lbpws) 1236 1266 arr[1] |= 1 << 6; 1237 - 1238 - if (scsi_debug_lbpws10) 1267 + if (sdebug_lbpws10) 1239 1268 arr[1] |= 1 << 5; 1240 - 1241 - if (scsi_debug_lbprz) 1242 - arr[1] |= 1 << 2; 1243 - 1269 + if (sdebug_lbprz && scsi_debug_lbp()) 1270 + arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */ 1271 + /* anc_sup=0; dp=0 (no provisioning group descriptor) */ 1272 + /* minimum_percentage=0; provisioning_type=0 (unknown) */ 1273 + /* threshold_percentage=0 */ 1244 1274 return 0x4; 1245 1275 } 1246 1276 ··· 1251 1285 unsigned char * arr; 1252 1286 unsigned char *cmd = scp->cmnd; 1253 1287 int alloc_len, n, ret; 1254 - bool have_wlun; 1288 + bool have_wlun, is_disk; 1255 1289 1256 - alloc_len = (cmd[3] << 8) + cmd[4]; 1290 + alloc_len = get_unaligned_be16(cmd + 3); 1257 1291 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); 1258 1292 if (! arr) 1259 1293 return DID_REQUEUE << 16; 1260 - have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS); 1294 + is_disk = (sdebug_ptype == TYPE_DISK); 1295 + have_wlun = scsi_is_wlun(scp->device->lun); 1261 1296 if (have_wlun) 1262 - pq_pdt = 0x1e; /* present, wlun */ 1263 - else if (scsi_debug_no_lun_0 && (0 == devip->lun)) 1264 - pq_pdt = 0x7f; /* not present, no device type */ 1297 + pq_pdt = TYPE_WLUN; /* present, wlun */ 1298 + else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) 1299 + pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */ 1265 1300 else 1266 - pq_pdt = (scsi_debug_ptype & 0x1f); 1301 + pq_pdt = (sdebug_ptype & 0x1f); 1267 1302 arr[0] = pq_pdt; 1268 1303 if (0x2 & cmd[1]) { /* CMDDT bit set */ 1269 1304 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); ··· 1277 1310 1278 1311 port_group_id = (((host_no + 1) & 0x7f) << 8) + 1279 1312 (devip->channel & 0x7f); 1280 - if (0 == scsi_debug_vpd_use_hostno) 1313 + if (sdebug_vpd_use_hostno == 0) 1281 1314 host_no = 0; 1282 1315 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) + 1283 1316 (devip->target * 1000) + devip->lun); ··· 1295 1328 arr[n++] = 0x86; /* extended inquiry */ 1296 1329 arr[n++] = 0x87; /* mode page policy */ 1297 1330 arr[n++] = 0x88; /* SCSI ports */ 1298 - arr[n++] = 0x89; /* ATA information */ 1299 - arr[n++] = 0xb0; /* Block limits (SBC) */ 1300 - arr[n++] = 0xb1; /* Block characteristics (SBC) */ 1301 - if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */ 1302 - arr[n++] = 0xb2; 1331 + if (is_disk) { /* SBC only */ 1332 + arr[n++] = 0x89; /* ATA information */ 1333 + arr[n++] = 0xb0; /* Block limits */ 1334 + arr[n++] = 0xb1; /* Block characteristics */ 1335 + arr[n++] = 0xb2; /* Logical Block Prov */ 1336 + } 1303 1337 arr[3] = n - 4; /* number of supported VPD pages */ 1304 1338 } else if (0x80 == cmd[2]) { /* unit serial number */ 1305 1339 arr[1] = cmd[2]; /*sanity */ ··· 1308 1340 memcpy(&arr[4], lu_id_str, len); 1309 1341 } else if (0x83 == cmd[2]) { /* device identification */ 1310 1342 arr[1] = cmd[2]; /*sanity */ 1311 - arr[3] = inquiry_evpd_83(&arr[4], port_group_id, 1312 - target_dev_id, lu_id_num, 1313 - lu_id_str, len); 1343 + arr[3] = inquiry_vpd_83(&arr[4], port_group_id, 1344 + target_dev_id, lu_id_num, 1345 + lu_id_str, len, 1346 + &devip->lu_name); 1314 1347 } else if (0x84 == cmd[2]) { /* Software interface ident. */ 1315 1348 arr[1] = cmd[2]; /*sanity */ 1316 - arr[3] = inquiry_evpd_84(&arr[4]); 1349 + arr[3] = inquiry_vpd_84(&arr[4]); 1317 1350 } else if (0x85 == cmd[2]) { /* Management network addresses */ 1318 1351 arr[1] = cmd[2]; /*sanity */ 1319 - arr[3] = inquiry_evpd_85(&arr[4]); 1352 + arr[3] = inquiry_vpd_85(&arr[4]); 1320 1353 } else if (0x86 == cmd[2]) { /* extended inquiry */ 1321 1354 arr[1] = cmd[2]; /*sanity */ 1322 1355 arr[3] = 0x3c; /* number of following entries */ 1323 - if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) 1356 + if (sdebug_dif == SD_DIF_TYPE3_PROTECTION) 1324 1357 arr[4] = 0x4; /* SPT: GRD_CHK:1 */ 1325 - else if (scsi_debug_dif) 1358 + else if (have_dif_prot) 1326 1359 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */ 1327 1360 else 1328 1361 arr[4] = 0x0; /* no protection stuff */ ··· 1337 1368 arr[10] = 0x82; /* mlus, per initiator port */ 1338 1369 } else if (0x88 == cmd[2]) { /* SCSI Ports */ 1339 1370 arr[1] = cmd[2]; /*sanity */ 1340 - arr[3] = inquiry_evpd_88(&arr[4], target_dev_id); 1341 - } else if (0x89 == cmd[2]) { /* ATA information */ 1371 + arr[3] = inquiry_vpd_88(&arr[4], target_dev_id); 1372 + } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */ 1342 1373 arr[1] = cmd[2]; /*sanity */ 1343 - n = inquiry_evpd_89(&arr[4]); 1344 - arr[2] = (n >> 8); 1345 - arr[3] = (n & 0xff); 1346 - } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */ 1374 + n = inquiry_vpd_89(&arr[4]); 1375 + put_unaligned_be16(n, arr + 2); 1376 + } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */ 1347 1377 arr[1] = cmd[2]; /*sanity */ 1348 - arr[3] = inquiry_evpd_b0(&arr[4]); 1349 - } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */ 1378 + arr[3] = inquiry_vpd_b0(&arr[4]); 1379 + } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */ 1350 1380 arr[1] = cmd[2]; /*sanity */ 1351 - arr[3] = inquiry_evpd_b1(&arr[4]); 1352 - } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */ 1381 + arr[3] = inquiry_vpd_b1(&arr[4]); 1382 + } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */ 1353 1383 arr[1] = cmd[2]; /*sanity */ 1354 - arr[3] = inquiry_evpd_b2(&arr[4]); 1384 + arr[3] = inquiry_vpd_b2(&arr[4]); 1355 1385 } else { 1356 1386 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); 1357 1387 kfree(arr); 1358 1388 return check_condition_result; 1359 1389 } 1360 - len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); 1390 + len = min(get_unaligned_be16(arr + 2) + 4, alloc_len); 1361 1391 ret = fill_from_dev_buffer(scp, arr, 1362 1392 min(len, SDEBUG_MAX_INQ_ARR_SZ)); 1363 1393 kfree(arr); 1364 1394 return ret; 1365 1395 } 1366 1396 /* drops through here for a standard inquiry */ 1367 - arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */ 1368 - arr[2] = scsi_debug_scsi_level; 1397 + arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */ 1398 + arr[2] = sdebug_scsi_level; 1369 1399 arr[3] = 2; /* response_data_format==2 */ 1370 1400 arr[4] = SDEBUG_LONG_INQ_SZ - 5; 1371 - arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */ 1372 - if (0 == scsi_debug_vpd_use_hostno) 1401 + arr[5] = (int)have_dif_prot; /* PROTECT bit */ 1402 + if (sdebug_vpd_use_hostno == 0) 1373 1403 arr[5] = 0x10; /* claim: implicit TGPS */ 1374 1404 arr[6] = 0x10; /* claim: MultiP */ 1375 1405 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ ··· 1377 1409 memcpy(&arr[16], inq_product_id, 16); 1378 1410 memcpy(&arr[32], inq_product_rev, 4); 1379 1411 /* version descriptors (2 bytes each) follow */ 1380 - arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */ 1381 - arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */ 1412 + put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */ 1413 + put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */ 1382 1414 n = 62; 1383 - if (scsi_debug_ptype == 0) { 1384 - arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */ 1385 - } else if (scsi_debug_ptype == 1) { 1386 - arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */ 1415 + if (is_disk) { /* SBC-4 no version claimed */ 1416 + put_unaligned_be16(0x600, arr + n); 1417 + n += 2; 1418 + } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ 1419 + put_unaligned_be16(0x525, arr + n); 1420 + n += 2; 1387 1421 } 1388 - arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */ 1422 + put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ 1389 1423 ret = fill_from_dev_buffer(scp, arr, 1390 1424 min(alloc_len, SDEBUG_LONG_INQ_SZ)); 1391 1425 kfree(arr); 1392 1426 return ret; 1393 1427 } 1428 + 1429 + static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 1430 + 0, 0, 0x0, 0x0}; 1394 1431 1395 1432 static int resp_requests(struct scsi_cmnd * scp, 1396 1433 struct sdebug_dev_info * devip) ··· 1425 1452 } 1426 1453 } else { 1427 1454 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE); 1428 - if (arr[0] >= 0x70 && dsense == scsi_debug_dsense) 1455 + if (arr[0] >= 0x70 && dsense == sdebug_dsense) 1429 1456 ; /* have sense and formats match */ 1430 1457 else if (arr[0] <= 0x70) { 1431 1458 if (dsense) { ··· 1462 1489 struct sdebug_dev_info * devip) 1463 1490 { 1464 1491 unsigned char *cmd = scp->cmnd; 1465 - int power_cond, start; 1492 + int power_cond, stop; 1466 1493 1467 1494 power_cond = (cmd[4] & 0xf0) >> 4; 1468 1495 if (power_cond) { 1469 1496 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7); 1470 1497 return check_condition_result; 1471 1498 } 1472 - start = cmd[4] & 1; 1473 - if (start == devip->stopped) 1474 - devip->stopped = !start; 1499 + stop = !(cmd[4] & 1); 1500 + atomic_xchg(&devip->stopped, stop); 1475 1501 return 0; 1476 1502 } 1477 1503 1478 1504 static sector_t get_sdebug_capacity(void) 1479 1505 { 1480 - if (scsi_debug_virtual_gb > 0) 1481 - return (sector_t)scsi_debug_virtual_gb * 1482 - (1073741824 / scsi_debug_sector_size); 1506 + static const unsigned int gibibyte = 1073741824; 1507 + 1508 + if (sdebug_virtual_gb > 0) 1509 + return (sector_t)sdebug_virtual_gb * 1510 + (gibibyte / sdebug_sector_size); 1483 1511 else 1484 1512 return sdebug_store_sectors; 1485 1513 } ··· 1497 1523 memset(arr, 0, SDEBUG_READCAP_ARR_SZ); 1498 1524 if (sdebug_capacity < 0xffffffff) { 1499 1525 capac = (unsigned int)sdebug_capacity - 1; 1500 - arr[0] = (capac >> 24); 1501 - arr[1] = (capac >> 16) & 0xff; 1502 - arr[2] = (capac >> 8) & 0xff; 1503 - arr[3] = capac & 0xff; 1504 - } else { 1505 - arr[0] = 0xff; 1506 - arr[1] = 0xff; 1507 - arr[2] = 0xff; 1508 - arr[3] = 0xff; 1509 - } 1510 - arr[6] = (scsi_debug_sector_size >> 8) & 0xff; 1511 - arr[7] = scsi_debug_sector_size & 0xff; 1526 + put_unaligned_be32(capac, arr + 0); 1527 + } else 1528 + put_unaligned_be32(0xffffffff, arr + 0); 1529 + put_unaligned_be16(sdebug_sector_size, arr + 6); 1512 1530 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); 1513 1531 } 1514 1532 ··· 1510 1544 { 1511 1545 unsigned char *cmd = scp->cmnd; 1512 1546 unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; 1513 - unsigned long long capac; 1514 - int k, alloc_len; 1547 + int alloc_len; 1515 1548 1516 - alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) 1517 - + cmd[13]); 1549 + alloc_len = get_unaligned_be32(cmd + 10); 1518 1550 /* following just in case virtual_gb changed */ 1519 1551 sdebug_capacity = get_sdebug_capacity(); 1520 1552 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ); 1521 - capac = sdebug_capacity - 1; 1522 - for (k = 0; k < 8; ++k, capac >>= 8) 1523 - arr[7 - k] = capac & 0xff; 1524 - arr[8] = (scsi_debug_sector_size >> 24) & 0xff; 1525 - arr[9] = (scsi_debug_sector_size >> 16) & 0xff; 1526 - arr[10] = (scsi_debug_sector_size >> 8) & 0xff; 1527 - arr[11] = scsi_debug_sector_size & 0xff; 1528 - arr[13] = scsi_debug_physblk_exp & 0xf; 1529 - arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; 1553 + put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0); 1554 + put_unaligned_be32(sdebug_sector_size, arr + 8); 1555 + arr[13] = sdebug_physblk_exp & 0xf; 1556 + arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f; 1530 1557 1531 1558 if (scsi_debug_lbp()) { 1532 1559 arr[14] |= 0x80; /* LBPME */ 1533 - if (scsi_debug_lbprz) 1534 - arr[14] |= 0x40; /* LBPRZ */ 1560 + /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in 1561 + * the LB Provisioning VPD page is 3 bits. Note that lbprz=2 1562 + * in the wider field maps to 0 in this field. 1563 + */ 1564 + if (sdebug_lbprz & 1) /* precisely what the draft requires */ 1565 + arr[14] |= 0x40; 1535 1566 } 1536 1567 1537 - arr[15] = scsi_debug_lowest_aligned & 0xff; 1568 + arr[15] = sdebug_lowest_aligned & 0xff; 1538 1569 1539 - if (scsi_debug_dif) { 1540 - arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ 1570 + if (have_dif_prot) { 1571 + arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */ 1541 1572 arr[12] |= 1; /* PROT_EN */ 1542 1573 } 1543 1574 ··· 1553 1590 int n, ret, alen, rlen; 1554 1591 int port_group_a, port_group_b, port_a, port_b; 1555 1592 1556 - alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8) 1557 - + cmd[9]); 1558 - 1593 + alen = get_unaligned_be32(cmd + 6); 1559 1594 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC); 1560 1595 if (! arr) 1561 1596 return DID_REQUEUE << 16; ··· 1566 1605 port_a = 0x1; /* relative port A */ 1567 1606 port_b = 0x2; /* relative port B */ 1568 1607 port_group_a = (((host_no + 1) & 0x7f) << 8) + 1569 - (devip->channel & 0x7f); 1608 + (devip->channel & 0x7f); 1570 1609 port_group_b = (((host_no + 1) & 0x7f) << 8) + 1571 - (devip->channel & 0x7f) + 0x80; 1610 + (devip->channel & 0x7f) + 0x80; 1572 1611 1573 1612 /* 1574 1613 * The asymmetric access state is cycled according to the host_id. 1575 1614 */ 1576 1615 n = 4; 1577 - if (0 == scsi_debug_vpd_use_hostno) { 1578 - arr[n++] = host_no % 3; /* Asymm access state */ 1579 - arr[n++] = 0x0F; /* claim: all states are supported */ 1616 + if (sdebug_vpd_use_hostno == 0) { 1617 + arr[n++] = host_no % 3; /* Asymm access state */ 1618 + arr[n++] = 0x0F; /* claim: all states are supported */ 1580 1619 } else { 1581 - arr[n++] = 0x0; /* Active/Optimized path */ 1582 - arr[n++] = 0x01; /* claim: only support active/optimized paths */ 1620 + arr[n++] = 0x0; /* Active/Optimized path */ 1621 + arr[n++] = 0x01; /* only support active/optimized paths */ 1583 1622 } 1584 - arr[n++] = (port_group_a >> 8) & 0xff; 1585 - arr[n++] = port_group_a & 0xff; 1623 + put_unaligned_be16(port_group_a, arr + n); 1624 + n += 2; 1586 1625 arr[n++] = 0; /* Reserved */ 1587 1626 arr[n++] = 0; /* Status code */ 1588 1627 arr[n++] = 0; /* Vendor unique */ 1589 1628 arr[n++] = 0x1; /* One port per group */ 1590 1629 arr[n++] = 0; /* Reserved */ 1591 1630 arr[n++] = 0; /* Reserved */ 1592 - arr[n++] = (port_a >> 8) & 0xff; 1593 - arr[n++] = port_a & 0xff; 1631 + put_unaligned_be16(port_a, arr + n); 1632 + n += 2; 1594 1633 arr[n++] = 3; /* Port unavailable */ 1595 1634 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */ 1596 - arr[n++] = (port_group_b >> 8) & 0xff; 1597 - arr[n++] = port_group_b & 0xff; 1635 + put_unaligned_be16(port_group_b, arr + n); 1636 + n += 2; 1598 1637 arr[n++] = 0; /* Reserved */ 1599 1638 arr[n++] = 0; /* Status code */ 1600 1639 arr[n++] = 0; /* Vendor unique */ 1601 1640 arr[n++] = 0x1; /* One port per group */ 1602 1641 arr[n++] = 0; /* Reserved */ 1603 1642 arr[n++] = 0; /* Reserved */ 1604 - arr[n++] = (port_b >> 8) & 0xff; 1605 - arr[n++] = port_b & 0xff; 1643 + put_unaligned_be16(port_b, arr + n); 1644 + n += 2; 1606 1645 1607 1646 rlen = n - 4; 1608 - arr[0] = (rlen >> 24) & 0xff; 1609 - arr[1] = (rlen >> 16) & 0xff; 1610 - arr[2] = (rlen >> 8) & 0xff; 1611 - arr[3] = rlen & 0xff; 1647 + put_unaligned_be32(rlen, arr + 0); 1612 1648 1613 1649 /* 1614 1650 * Return the smallest value of either ··· 1620 1662 return ret; 1621 1663 } 1622 1664 1623 - static int 1624 - resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 1665 + static int resp_rsup_opcodes(struct scsi_cmnd *scp, 1666 + struct sdebug_dev_info *devip) 1625 1667 { 1626 1668 bool rctd; 1627 1669 u8 reporting_opts, req_opcode, sdeb_i, supp; ··· 1771 1813 return errsts; 1772 1814 } 1773 1815 1774 - static int 1775 - resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 1816 + static int resp_rsup_tmfs(struct scsi_cmnd *scp, 1817 + struct sdebug_dev_info *devip) 1776 1818 { 1777 1819 bool repd; 1778 1820 u32 alloc_len, len; ··· 1829 1871 0, 0, 0, 0, 0x40, 0, 0, 0}; 1830 1872 1831 1873 memcpy(p, format_pg, sizeof(format_pg)); 1832 - p[10] = (sdebug_sectors_per >> 8) & 0xff; 1833 - p[11] = sdebug_sectors_per & 0xff; 1834 - p[12] = (scsi_debug_sector_size >> 8) & 0xff; 1835 - p[13] = scsi_debug_sector_size & 0xff; 1836 - if (scsi_debug_removable) 1874 + put_unaligned_be16(sdebug_sectors_per, p + 10); 1875 + put_unaligned_be16(sdebug_sector_size, p + 12); 1876 + if (sdebug_removable) 1837 1877 p[20] |= 0x20; /* should agree with INQUIRY */ 1838 1878 if (1 == pcontrol) 1839 1879 memset(p + 2, 0, sizeof(format_pg) - 2); 1840 1880 return sizeof(format_pg); 1841 1881 } 1882 + 1883 + static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, 1884 + 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 1885 + 0, 0, 0, 0}; 1842 1886 1843 1887 static int resp_caching_pg(unsigned char * p, int pcontrol, int target) 1844 1888 { /* Caching page for mode_sense */ ··· 1849 1889 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, 1850 1890 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; 1851 1891 1852 - if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts) 1892 + if (SDEBUG_OPT_N_WCE & sdebug_opts) 1853 1893 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ 1854 1894 memcpy(p, caching_pg, sizeof(caching_pg)); 1855 1895 if (1 == pcontrol) ··· 1859 1899 return sizeof(caching_pg); 1860 1900 } 1861 1901 1902 + static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 1903 + 0, 0, 0x2, 0x4b}; 1904 + 1862 1905 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target) 1863 1906 { /* Control mode page for mode_sense */ 1864 1907 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, ··· 1869 1906 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 1870 1907 0, 0, 0x2, 0x4b}; 1871 1908 1872 - if (scsi_debug_dsense) 1909 + if (sdebug_dsense) 1873 1910 ctrl_m_pg[2] |= 0x4; 1874 1911 else 1875 1912 ctrl_m_pg[2] &= ~0x4; 1876 1913 1877 - if (scsi_debug_ato) 1914 + if (sdebug_ato) 1878 1915 ctrl_m_pg[5] |= 0x80; /* ATO=1 */ 1879 1916 1880 1917 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg)); ··· 1918 1955 { /* SAS phy control and discover mode page for mode_sense */ 1919 1956 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2, 1920 1957 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0, 1921 - 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0, 1922 - 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1, 1958 + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ 1959 + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ 1923 1960 0x2, 0, 0, 0, 0, 0, 0, 0, 1924 1961 0x88, 0x99, 0, 0, 0, 0, 0, 0, 1925 1962 0, 0, 0, 0, 0, 0, 0, 0, 1926 1963 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0, 1927 - 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0, 1928 - 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1, 1964 + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ 1965 + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ 1929 1966 0x3, 0, 0, 0, 0, 0, 0, 0, 1930 1967 0x88, 0x99, 0, 0, 0, 0, 0, 0, 1931 1968 0, 0, 0, 0, 0, 0, 0, 0, 1932 1969 }; 1933 1970 int port_a, port_b; 1934 1971 1972 + put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16); 1973 + put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24); 1974 + put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64); 1975 + put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72); 1935 1976 port_a = target_dev_id + 1; 1936 1977 port_b = port_a + 1; 1937 1978 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg)); 1938 - p[20] = (port_a >> 24); 1939 - p[21] = (port_a >> 16) & 0xff; 1940 - p[22] = (port_a >> 8) & 0xff; 1941 - p[23] = port_a & 0xff; 1942 - p[48 + 20] = (port_b >> 24); 1943 - p[48 + 21] = (port_b >> 16) & 0xff; 1944 - p[48 + 22] = (port_b >> 8) & 0xff; 1945 - p[48 + 23] = port_b & 0xff; 1979 + put_unaligned_be32(port_a, p + 20); 1980 + put_unaligned_be32(port_b, p + 48 + 20); 1946 1981 if (1 == pcontrol) 1947 1982 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4); 1948 1983 return sizeof(sas_pcd_m_pg); ··· 1960 1999 1961 2000 #define SDEBUG_MAX_MSENSE_SZ 256 1962 2001 1963 - static int 1964 - resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 2002 + static int resp_mode_sense(struct scsi_cmnd *scp, 2003 + struct sdebug_dev_info *devip) 1965 2004 { 1966 - unsigned char dbd, llbaa; 1967 2005 int pcontrol, pcode, subpcode, bd_len; 1968 2006 unsigned char dev_spec; 1969 - int k, alloc_len, msense_6, offset, len, target_dev_id; 2007 + int alloc_len, offset, len, target_dev_id; 1970 2008 int target = scp->device->id; 1971 2009 unsigned char * ap; 1972 2010 unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; 1973 2011 unsigned char *cmd = scp->cmnd; 2012 + bool dbd, llbaa, msense_6, is_disk, bad_pcode; 1974 2013 1975 - dbd = !!(cmd[1] & 0x8); 2014 + dbd = !!(cmd[1] & 0x8); /* disable block descriptors */ 1976 2015 pcontrol = (cmd[2] & 0xc0) >> 6; 1977 2016 pcode = cmd[2] & 0x3f; 1978 2017 subpcode = cmd[3]; 1979 2018 msense_6 = (MODE_SENSE == cmd[0]); 1980 - llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10); 1981 - if ((0 == scsi_debug_ptype) && (0 == dbd)) 2019 + llbaa = msense_6 ? false : !!(cmd[1] & 0x10); 2020 + is_disk = (sdebug_ptype == TYPE_DISK); 2021 + if (is_disk && !dbd) 1982 2022 bd_len = llbaa ? 16 : 8; 1983 2023 else 1984 2024 bd_len = 0; 1985 - alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]); 2025 + alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7); 1986 2026 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); 1987 2027 if (0x3 == pcontrol) { /* Saving values not supported */ 1988 2028 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); ··· 1991 2029 } 1992 2030 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + 1993 2031 (devip->target * 1000) - 3; 1994 - /* set DPOFUA bit for disks */ 1995 - if (0 == scsi_debug_ptype) 1996 - dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10; 2032 + /* for disks set DPOFUA bit and clear write protect (WP) bit */ 2033 + if (is_disk) 2034 + dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */ 1997 2035 else 1998 2036 dev_spec = 0x0; 1999 2037 if (msense_6) { ··· 2012 2050 sdebug_capacity = get_sdebug_capacity(); 2013 2051 2014 2052 if (8 == bd_len) { 2015 - if (sdebug_capacity > 0xfffffffe) { 2016 - ap[0] = 0xff; 2017 - ap[1] = 0xff; 2018 - ap[2] = 0xff; 2019 - ap[3] = 0xff; 2020 - } else { 2021 - ap[0] = (sdebug_capacity >> 24) & 0xff; 2022 - ap[1] = (sdebug_capacity >> 16) & 0xff; 2023 - ap[2] = (sdebug_capacity >> 8) & 0xff; 2024 - ap[3] = sdebug_capacity & 0xff; 2025 - } 2026 - ap[6] = (scsi_debug_sector_size >> 8) & 0xff; 2027 - ap[7] = scsi_debug_sector_size & 0xff; 2053 + if (sdebug_capacity > 0xfffffffe) 2054 + put_unaligned_be32(0xffffffff, ap + 0); 2055 + else 2056 + put_unaligned_be32(sdebug_capacity, ap + 0); 2057 + put_unaligned_be16(sdebug_sector_size, ap + 6); 2028 2058 offset += bd_len; 2029 2059 ap = arr + offset; 2030 2060 } else if (16 == bd_len) { 2031 - unsigned long long capac = sdebug_capacity; 2032 - 2033 - for (k = 0; k < 8; ++k, capac >>= 8) 2034 - ap[7 - k] = capac & 0xff; 2035 - ap[12] = (scsi_debug_sector_size >> 24) & 0xff; 2036 - ap[13] = (scsi_debug_sector_size >> 16) & 0xff; 2037 - ap[14] = (scsi_debug_sector_size >> 8) & 0xff; 2038 - ap[15] = scsi_debug_sector_size & 0xff; 2061 + put_unaligned_be64((u64)sdebug_capacity, ap + 0); 2062 + put_unaligned_be32(sdebug_sector_size, ap + 12); 2039 2063 offset += bd_len; 2040 2064 ap = arr + offset; 2041 2065 } ··· 2031 2083 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); 2032 2084 return check_condition_result; 2033 2085 } 2086 + bad_pcode = false; 2087 + 2034 2088 switch (pcode) { 2035 2089 case 0x1: /* Read-Write error recovery page, direct access */ 2036 2090 len = resp_err_recov_pg(ap, pcontrol, target); ··· 2043 2093 offset += len; 2044 2094 break; 2045 2095 case 0x3: /* Format device page, direct access */ 2046 - len = resp_format_pg(ap, pcontrol, target); 2047 - offset += len; 2096 + if (is_disk) { 2097 + len = resp_format_pg(ap, pcontrol, target); 2098 + offset += len; 2099 + } else 2100 + bad_pcode = true; 2048 2101 break; 2049 2102 case 0x8: /* Caching page, direct access */ 2050 - len = resp_caching_pg(ap, pcontrol, target); 2051 - offset += len; 2103 + if (is_disk) { 2104 + len = resp_caching_pg(ap, pcontrol, target); 2105 + offset += len; 2106 + } else 2107 + bad_pcode = true; 2052 2108 break; 2053 2109 case 0xa: /* Control Mode page, all devices */ 2054 2110 len = resp_ctrl_m_pg(ap, pcontrol, target); ··· 2083 2127 if ((0 == subpcode) || (0xff == subpcode)) { 2084 2128 len = resp_err_recov_pg(ap, pcontrol, target); 2085 2129 len += resp_disconnect_pg(ap + len, pcontrol, target); 2086 - len += resp_format_pg(ap + len, pcontrol, target); 2087 - len += resp_caching_pg(ap + len, pcontrol, target); 2130 + if (is_disk) { 2131 + len += resp_format_pg(ap + len, pcontrol, 2132 + target); 2133 + len += resp_caching_pg(ap + len, pcontrol, 2134 + target); 2135 + } 2088 2136 len += resp_ctrl_m_pg(ap + len, pcontrol, target); 2089 2137 len += resp_sas_sf_m_pg(ap + len, pcontrol, target); 2090 2138 if (0xff == subpcode) { ··· 2097 2137 len += resp_sas_sha_m_spg(ap + len, pcontrol); 2098 2138 } 2099 2139 len += resp_iec_m_pg(ap + len, pcontrol, target); 2140 + offset += len; 2100 2141 } else { 2101 2142 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); 2102 2143 return check_condition_result; 2103 2144 } 2104 - offset += len; 2105 2145 break; 2106 2146 default: 2147 + bad_pcode = true; 2148 + break; 2149 + } 2150 + if (bad_pcode) { 2107 2151 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); 2108 2152 return check_condition_result; 2109 2153 } 2110 2154 if (msense_6) 2111 2155 arr[0] = offset - 1; 2112 - else { 2113 - arr[0] = ((offset - 2) >> 8) & 0xff; 2114 - arr[1] = (offset - 2) & 0xff; 2115 - } 2156 + else 2157 + put_unaligned_be16((offset - 2), arr + 0); 2116 2158 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset)); 2117 2159 } 2118 2160 2119 2161 #define SDEBUG_MAX_MSELECT_SZ 512 2120 2162 2121 - static int 2122 - resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 2163 + static int resp_mode_select(struct scsi_cmnd *scp, 2164 + struct sdebug_dev_info *devip) 2123 2165 { 2124 2166 int pf, sp, ps, md_len, bd_len, off, spf, pg_len; 2125 2167 int param_len, res, mpage; ··· 2132 2170 memset(arr, 0, sizeof(arr)); 2133 2171 pf = cmd[1] & 0x10; 2134 2172 sp = cmd[1] & 0x1; 2135 - param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]); 2173 + param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7); 2136 2174 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { 2137 2175 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); 2138 2176 return check_condition_result; 2139 2177 } 2140 2178 res = fetch_to_dev_buffer(scp, arr, param_len); 2141 2179 if (-1 == res) 2142 - return (DID_ERROR << 16); 2143 - else if ((res < param_len) && 2144 - (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 2180 + return DID_ERROR << 16; 2181 + else if (sdebug_verbose && (res < param_len)) 2145 2182 sdev_printk(KERN_INFO, scp->device, 2146 2183 "%s: cdb indicated=%d, IO sent=%d bytes\n", 2147 2184 __func__, param_len, res); 2148 - md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); 2149 - bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); 2185 + md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); 2186 + bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); 2150 2187 if (md_len > 2) { 2151 2188 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); 2152 2189 return check_condition_result; ··· 2158 2197 return check_condition_result; 2159 2198 } 2160 2199 spf = !!(arr[off] & 0x40); 2161 - pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) : 2200 + pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) : 2162 2201 (arr[off + 1] + 2); 2163 2202 if ((pg_len + off) > param_len) { 2164 2203 mk_sense_buffer(scp, ILLEGAL_REQUEST, ··· 2177 2216 if (ctrl_m_pg[1] == arr[off + 1]) { 2178 2217 memcpy(ctrl_m_pg + 2, arr + off + 2, 2179 2218 sizeof(ctrl_m_pg) - 2); 2180 - scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4); 2219 + sdebug_dsense = !!(ctrl_m_pg[2] & 0x4); 2181 2220 goto set_mode_changed_ua; 2182 2221 } 2183 2222 break; ··· 2240 2279 pcontrol = (cmd[2] & 0xc0) >> 6; 2241 2280 pcode = cmd[2] & 0x3f; 2242 2281 subpcode = cmd[3] & 0xff; 2243 - alloc_len = (cmd[7] << 8) + cmd[8]; 2282 + alloc_len = get_unaligned_be16(cmd + 7); 2244 2283 arr[0] = pcode; 2245 2284 if (0 == subpcode) { 2246 2285 switch (pcode) { ··· 2297 2336 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); 2298 2337 return check_condition_result; 2299 2338 } 2300 - len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); 2339 + len = min(get_unaligned_be16(arr + 2) + 4, alloc_len); 2301 2340 return fill_from_dev_buffer(scp, arr, 2302 2341 min(len, SDEBUG_MAX_INQ_ARR_SZ)); 2303 2342 } ··· 2319 2358 } 2320 2359 2321 2360 /* Returns number of bytes copied or -1 if error. */ 2322 - static int 2323 - do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) 2361 + static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, 2362 + bool do_write) 2324 2363 { 2325 2364 int ret; 2326 2365 u64 block, rest = 0; ··· 2345 2384 rest = block + num - sdebug_store_sectors; 2346 2385 2347 2386 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, 2348 - fake_storep + (block * scsi_debug_sector_size), 2349 - (num - rest) * scsi_debug_sector_size, 0, do_write); 2350 - if (ret != (num - rest) * scsi_debug_sector_size) 2387 + fake_storep + (block * sdebug_sector_size), 2388 + (num - rest) * sdebug_sector_size, 0, do_write); 2389 + if (ret != (num - rest) * sdebug_sector_size) 2351 2390 return ret; 2352 2391 2353 2392 if (rest) { 2354 2393 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, 2355 - fake_storep, rest * scsi_debug_sector_size, 2356 - (num - rest) * scsi_debug_sector_size, do_write); 2394 + fake_storep, rest * sdebug_sector_size, 2395 + (num - rest) * sdebug_sector_size, do_write); 2357 2396 } 2358 2397 2359 2398 return ret; ··· 2362 2401 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of 2363 2402 * arr into fake_store(lba,num) and return true. If comparison fails then 2364 2403 * return false. */ 2365 - static bool 2366 - comp_write_worker(u64 lba, u32 num, const u8 *arr) 2404 + static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) 2367 2405 { 2368 2406 bool res; 2369 2407 u64 block, rest = 0; 2370 2408 u32 store_blks = sdebug_store_sectors; 2371 - u32 lb_size = scsi_debug_sector_size; 2409 + u32 lb_size = sdebug_sector_size; 2372 2410 2373 2411 block = do_div(lba, store_blks); 2374 2412 if (block + num > store_blks) ··· 2394 2434 { 2395 2435 __be16 csum; 2396 2436 2397 - if (scsi_debug_guard) 2437 + if (sdebug_guard) 2398 2438 csum = (__force __be16)ip_compute_csum(buf, len); 2399 2439 else 2400 2440 csum = cpu_to_be16(crc_t10dif(buf, len)); ··· 2405 2445 static int dif_verify(struct sd_dif_tuple *sdt, const void *data, 2406 2446 sector_t sector, u32 ei_lba) 2407 2447 { 2408 - __be16 csum = dif_compute_csum(data, scsi_debug_sector_size); 2448 + __be16 csum = dif_compute_csum(data, sdebug_sector_size); 2409 2449 2410 2450 if (sdt->guard_tag != csum) { 2411 2451 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", ··· 2414 2454 be16_to_cpu(csum)); 2415 2455 return 0x01; 2416 2456 } 2417 - if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && 2457 + if (sdebug_dif == SD_DIF_TYPE1_PROTECTION && 2418 2458 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 2419 2459 pr_err("REF check failed on sector %lu\n", 2420 2460 (unsigned long)sector); 2421 2461 return 0x03; 2422 2462 } 2423 - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && 2463 + if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && 2424 2464 be32_to_cpu(sdt->ref_tag) != ei_lba) { 2425 2465 pr_err("REF check failed on sector %lu\n", 2426 2466 (unsigned long)sector); ··· 2501 2541 return 0; 2502 2542 } 2503 2543 2504 - static int 2505 - resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 2544 + static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 2506 2545 { 2507 2546 u8 *cmd = scp->cmnd; 2547 + struct sdebug_queued_cmd *sqcp; 2508 2548 u64 lba; 2509 2549 u32 num; 2510 2550 u32 ei_lba; ··· 2551 2591 check_prot = false; 2552 2592 break; 2553 2593 } 2554 - if (check_prot) { 2555 - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && 2594 + if (unlikely(have_dif_prot && check_prot)) { 2595 + if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && 2556 2596 (cmd[1] & 0xe0)) { 2557 2597 mk_sense_invalid_opcode(scp); 2558 2598 return check_condition_result; 2559 2599 } 2560 - if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || 2561 - scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && 2600 + if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || 2601 + sdebug_dif == SD_DIF_TYPE3_PROTECTION) && 2562 2602 (cmd[1] & 0xe0) == 0) 2563 2603 sdev_printk(KERN_ERR, scp->device, "Unprotected RD " 2564 2604 "to DIF device\n"); 2565 2605 } 2566 - if (sdebug_any_injecting_opt) { 2567 - struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); 2606 + if (unlikely(sdebug_any_injecting_opt)) { 2607 + sqcp = (struct sdebug_queued_cmd *)scp->host_scribble; 2568 2608 2569 - if (ep->inj_short) 2570 - num /= 2; 2571 - } 2609 + if (sqcp) { 2610 + if (sqcp->inj_short) 2611 + num /= 2; 2612 + } 2613 + } else 2614 + sqcp = NULL; 2572 2615 2573 2616 /* inline check_device_access_params() */ 2574 - if (lba + num > sdebug_capacity) { 2617 + if (unlikely(lba + num > sdebug_capacity)) { 2575 2618 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 2576 2619 return check_condition_result; 2577 2620 } 2578 2621 /* transfer length excessive (tie in to block limits VPD page) */ 2579 - if (num > sdebug_store_sectors) { 2622 + if (unlikely(num > sdebug_store_sectors)) { 2580 2623 /* needs work to find which cdb byte 'num' comes from */ 2581 2624 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 2582 2625 return check_condition_result; 2583 2626 } 2584 2627 2585 - if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && 2586 - (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && 2587 - ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { 2628 + if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) && 2629 + (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && 2630 + ((lba + num) > OPT_MEDIUM_ERR_ADDR))) { 2588 2631 /* claim unrecoverable read error */ 2589 2632 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); 2590 2633 /* set info field and valid bit for fixed descriptor */ ··· 2604 2641 read_lock_irqsave(&atomic_rw, iflags); 2605 2642 2606 2643 /* DIX + T10 DIF */ 2607 - if (scsi_debug_dix && scsi_prot_sg_count(scp)) { 2644 + if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { 2608 2645 int prot_ret = prot_verify_read(scp, lba, num, ei_lba); 2609 2646 2610 2647 if (prot_ret) { ··· 2616 2653 2617 2654 ret = do_device_access(scp, lba, num, false); 2618 2655 read_unlock_irqrestore(&atomic_rw, iflags); 2619 - if (ret == -1) 2656 + if (unlikely(ret == -1)) 2620 2657 return DID_ERROR << 16; 2621 2658 2622 2659 scsi_in(scp)->resid = scsi_bufflen(scp) - ret; 2623 2660 2624 - if (sdebug_any_injecting_opt) { 2625 - struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); 2626 - 2627 - if (ep->inj_recovered) { 2661 + if (unlikely(sqcp)) { 2662 + if (sqcp->inj_recovered) { 2628 2663 mk_sense_buffer(scp, RECOVERED_ERROR, 2629 2664 THRESHOLD_EXCEEDED, 0); 2630 2665 return check_condition_result; 2631 - } else if (ep->inj_transport) { 2666 + } else if (sqcp->inj_transport) { 2632 2667 mk_sense_buffer(scp, ABORTED_COMMAND, 2633 2668 TRANSPORT_PROBLEM, ACK_NAK_TO); 2634 2669 return check_condition_result; 2635 - } else if (ep->inj_dif) { 2670 + } else if (sqcp->inj_dif) { 2636 2671 /* Logical block guard check failed */ 2637 2672 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); 2638 2673 return illegal_condition_result; 2639 - } else if (ep->inj_dix) { 2674 + } else if (sqcp->inj_dix) { 2640 2675 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); 2641 2676 return illegal_condition_result; 2642 2677 } ··· 2711 2750 2712 2751 ret = dif_verify(sdt, daddr, sector, ei_lba); 2713 2752 if (ret) { 2714 - dump_sector(daddr, scsi_debug_sector_size); 2753 + dump_sector(daddr, sdebug_sector_size); 2715 2754 goto out; 2716 2755 } 2717 2756 2718 2757 sector++; 2719 2758 ei_lba++; 2720 - dpage_offset += scsi_debug_sector_size; 2759 + dpage_offset += sdebug_sector_size; 2721 2760 } 2722 2761 diter.consumed = dpage_offset; 2723 2762 sg_miter_stop(&diter); ··· 2738 2777 2739 2778 static unsigned long lba_to_map_index(sector_t lba) 2740 2779 { 2741 - if (scsi_debug_unmap_alignment) { 2742 - lba += scsi_debug_unmap_granularity - 2743 - scsi_debug_unmap_alignment; 2744 - } 2745 - sector_div(lba, scsi_debug_unmap_granularity); 2746 - 2780 + if (sdebug_unmap_alignment) 2781 + lba += sdebug_unmap_granularity - sdebug_unmap_alignment; 2782 + sector_div(lba, sdebug_unmap_granularity); 2747 2783 return lba; 2748 2784 } 2749 2785 2750 2786 static sector_t map_index_to_lba(unsigned long index) 2751 2787 { 2752 - sector_t lba = index * scsi_debug_unmap_granularity; 2788 + sector_t lba = index * sdebug_unmap_granularity; 2753 2789 2754 - if (scsi_debug_unmap_alignment) { 2755 - lba -= scsi_debug_unmap_granularity - 2756 - scsi_debug_unmap_alignment; 2757 - } 2758 - 2790 + if (sdebug_unmap_alignment) 2791 + lba -= sdebug_unmap_granularity - sdebug_unmap_alignment; 2759 2792 return lba; 2760 2793 } 2761 2794 ··· 2770 2815 2771 2816 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); 2772 2817 *num = end - lba; 2773 - 2774 2818 return mapped; 2775 2819 } 2776 2820 ··· 2795 2841 unsigned long index = lba_to_map_index(lba); 2796 2842 2797 2843 if (lba == map_index_to_lba(index) && 2798 - lba + scsi_debug_unmap_granularity <= end && 2844 + lba + sdebug_unmap_granularity <= end && 2799 2845 index < map_size) { 2800 2846 clear_bit(index, map_storep); 2801 - if (scsi_debug_lbprz) { 2847 + if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */ 2802 2848 memset(fake_storep + 2803 - lba * scsi_debug_sector_size, 0, 2804 - scsi_debug_sector_size * 2805 - scsi_debug_unmap_granularity); 2849 + lba * sdebug_sector_size, 2850 + (sdebug_lbprz & 1) ? 0 : 0xff, 2851 + sdebug_sector_size * 2852 + sdebug_unmap_granularity); 2806 2853 } 2807 2854 if (dif_storep) { 2808 2855 memset(dif_storep + lba, 0xff, 2809 2856 sizeof(*dif_storep) * 2810 - scsi_debug_unmap_granularity); 2857 + sdebug_unmap_granularity); 2811 2858 } 2812 2859 } 2813 2860 lba = map_index_to_lba(index + 1); 2814 2861 } 2815 2862 } 2816 2863 2817 - static int 2818 - resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 2864 + static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 2819 2865 { 2820 2866 u8 *cmd = scp->cmnd; 2821 2867 u64 lba; ··· 2864 2910 check_prot = false; 2865 2911 break; 2866 2912 } 2867 - if (check_prot) { 2868 - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && 2913 + if (unlikely(have_dif_prot && check_prot)) { 2914 + if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && 2869 2915 (cmd[1] & 0xe0)) { 2870 2916 mk_sense_invalid_opcode(scp); 2871 2917 return check_condition_result; 2872 2918 } 2873 - if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || 2874 - scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && 2919 + if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || 2920 + sdebug_dif == SD_DIF_TYPE3_PROTECTION) && 2875 2921 (cmd[1] & 0xe0) == 0) 2876 2922 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " 2877 2923 "to DIF device\n"); 2878 2924 } 2879 2925 2880 2926 /* inline check_device_access_params() */ 2881 - if (lba + num > sdebug_capacity) { 2927 + if (unlikely(lba + num > sdebug_capacity)) { 2882 2928 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 2883 2929 return check_condition_result; 2884 2930 } 2885 2931 /* transfer length excessive (tie in to block limits VPD page) */ 2886 - if (num > sdebug_store_sectors) { 2932 + if (unlikely(num > sdebug_store_sectors)) { 2887 2933 /* needs work to find which cdb byte 'num' comes from */ 2888 2934 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 2889 2935 return check_condition_result; ··· 2892 2938 write_lock_irqsave(&atomic_rw, iflags); 2893 2939 2894 2940 /* DIX + T10 DIF */ 2895 - if (scsi_debug_dix && scsi_prot_sg_count(scp)) { 2941 + if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { 2896 2942 int prot_ret = prot_verify_write(scp, lba, num, ei_lba); 2897 2943 2898 2944 if (prot_ret) { ··· 2903 2949 } 2904 2950 2905 2951 ret = do_device_access(scp, lba, num, true); 2906 - if (scsi_debug_lbp()) 2952 + if (unlikely(scsi_debug_lbp())) 2907 2953 map_region(lba, num); 2908 2954 write_unlock_irqrestore(&atomic_rw, iflags); 2909 - if (-1 == ret) 2910 - return (DID_ERROR << 16); 2911 - else if ((ret < (num * scsi_debug_sector_size)) && 2912 - (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 2955 + if (unlikely(-1 == ret)) 2956 + return DID_ERROR << 16; 2957 + else if (unlikely(sdebug_verbose && 2958 + (ret < (num * sdebug_sector_size)))) 2913 2959 sdev_printk(KERN_INFO, scp->device, 2914 2960 "%s: write: cdb indicated=%u, IO sent=%d bytes\n", 2915 - my_name, num * scsi_debug_sector_size, ret); 2961 + my_name, num * sdebug_sector_size, ret); 2916 2962 2917 - if (sdebug_any_injecting_opt) { 2918 - struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); 2963 + if (unlikely(sdebug_any_injecting_opt)) { 2964 + struct sdebug_queued_cmd *sqcp = 2965 + (struct sdebug_queued_cmd *)scp->host_scribble; 2919 2966 2920 - if (ep->inj_recovered) { 2921 - mk_sense_buffer(scp, RECOVERED_ERROR, 2922 - THRESHOLD_EXCEEDED, 0); 2923 - return check_condition_result; 2924 - } else if (ep->inj_dif) { 2925 - /* Logical block guard check failed */ 2926 - mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); 2927 - return illegal_condition_result; 2928 - } else if (ep->inj_dix) { 2929 - mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); 2930 - return illegal_condition_result; 2967 + if (sqcp) { 2968 + if (sqcp->inj_recovered) { 2969 + mk_sense_buffer(scp, RECOVERED_ERROR, 2970 + THRESHOLD_EXCEEDED, 0); 2971 + return check_condition_result; 2972 + } else if (sqcp->inj_dif) { 2973 + /* Logical block guard check failed */ 2974 + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); 2975 + return illegal_condition_result; 2976 + } else if (sqcp->inj_dix) { 2977 + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); 2978 + return illegal_condition_result; 2979 + } 2931 2980 } 2932 2981 } 2933 2982 return 0; 2934 2983 } 2935 2984 2936 - static int 2937 - resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba, 2938 - bool unmap, bool ndob) 2985 + static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, 2986 + u32 ei_lba, bool unmap, bool ndob) 2939 2987 { 2940 2988 unsigned long iflags; 2941 2989 unsigned long long i; 2942 2990 int ret; 2991 + u64 lba_off; 2943 2992 2944 2993 ret = check_device_access_params(scp, lba, num); 2945 2994 if (ret) ··· 2955 2998 goto out; 2956 2999 } 2957 3000 3001 + lba_off = lba * sdebug_sector_size; 2958 3002 /* if ndob then zero 1 logical block, else fetch 1 logical block */ 2959 3003 if (ndob) { 2960 - memset(fake_storep + (lba * scsi_debug_sector_size), 0, 2961 - scsi_debug_sector_size); 3004 + memset(fake_storep + lba_off, 0, sdebug_sector_size); 2962 3005 ret = 0; 2963 3006 } else 2964 - ret = fetch_to_dev_buffer(scp, fake_storep + 2965 - (lba * scsi_debug_sector_size), 2966 - scsi_debug_sector_size); 3007 + ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, 3008 + sdebug_sector_size); 2967 3009 2968 3010 if (-1 == ret) { 2969 3011 write_unlock_irqrestore(&atomic_rw, iflags); 2970 - return (DID_ERROR << 16); 2971 - } else if ((ret < (num * scsi_debug_sector_size)) && 2972 - (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 3012 + return DID_ERROR << 16; 3013 + } else if (sdebug_verbose && (ret < (num * sdebug_sector_size))) 2973 3014 sdev_printk(KERN_INFO, scp->device, 2974 3015 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", 2975 3016 my_name, "write same", 2976 - num * scsi_debug_sector_size, ret); 3017 + num * sdebug_sector_size, ret); 2977 3018 2978 3019 /* Copy first sector to remaining blocks */ 2979 3020 for (i = 1 ; i < num ; i++) 2980 - memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size), 2981 - fake_storep + (lba * scsi_debug_sector_size), 2982 - scsi_debug_sector_size); 3021 + memcpy(fake_storep + ((lba + i) * sdebug_sector_size), 3022 + fake_storep + lba_off, 3023 + sdebug_sector_size); 2983 3024 2984 3025 if (scsi_debug_lbp()) 2985 3026 map_region(lba, num); ··· 2987 3032 return 0; 2988 3033 } 2989 3034 2990 - static int 2991 - resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3035 + static int resp_write_same_10(struct scsi_cmnd *scp, 3036 + struct sdebug_dev_info *devip) 2992 3037 { 2993 3038 u8 *cmd = scp->cmnd; 2994 3039 u32 lba; ··· 2997 3042 bool unmap = false; 2998 3043 2999 3044 if (cmd[1] & 0x8) { 3000 - if (scsi_debug_lbpws10 == 0) { 3045 + if (sdebug_lbpws10 == 0) { 3001 3046 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); 3002 3047 return check_condition_result; 3003 3048 } else ··· 3005 3050 } 3006 3051 lba = get_unaligned_be32(cmd + 2); 3007 3052 num = get_unaligned_be16(cmd + 7); 3008 - if (num > scsi_debug_write_same_length) { 3053 + if (num > sdebug_write_same_length) { 3009 3054 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); 3010 3055 return check_condition_result; 3011 3056 } 3012 3057 return resp_write_same(scp, lba, num, ei_lba, unmap, false); 3013 3058 } 3014 3059 3015 - static int 3016 - resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3060 + static int resp_write_same_16(struct scsi_cmnd *scp, 3061 + struct sdebug_dev_info *devip) 3017 3062 { 3018 3063 u8 *cmd = scp->cmnd; 3019 3064 u64 lba; ··· 3023 3068 bool ndob = false; 3024 3069 3025 3070 if (cmd[1] & 0x8) { /* UNMAP */ 3026 - if (scsi_debug_lbpws == 0) { 3071 + if (sdebug_lbpws == 0) { 3027 3072 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); 3028 3073 return check_condition_result; 3029 3074 } else ··· 3033 3078 ndob = true; 3034 3079 lba = get_unaligned_be64(cmd + 2); 3035 3080 num = get_unaligned_be32(cmd + 10); 3036 - if (num > scsi_debug_write_same_length) { 3081 + if (num > sdebug_write_same_length) { 3037 3082 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); 3038 3083 return check_condition_result; 3039 3084 } ··· 3043 3088 /* Note the mode field is in the same position as the (lower) service action 3044 3089 * field. For the Report supported operation codes command, SPC-4 suggests 3045 3090 * each mode of this command should be reported separately; for future. */ 3046 - static int 3047 - resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3091 + static int resp_write_buffer(struct scsi_cmnd *scp, 3092 + struct sdebug_dev_info *devip) 3048 3093 { 3049 3094 u8 *cmd = scp->cmnd; 3050 3095 struct scsi_device *sdp = scp->device; ··· 3089 3134 return 0; 3090 3135 } 3091 3136 3092 - static int 3093 - resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3137 + static int resp_comp_write(struct scsi_cmnd *scp, 3138 + struct sdebug_dev_info *devip) 3094 3139 { 3095 3140 u8 *cmd = scp->cmnd; 3096 3141 u8 *arr; 3097 3142 u8 *fake_storep_hold; 3098 3143 u64 lba; 3099 3144 u32 dnum; 3100 - u32 lb_size = scsi_debug_sector_size; 3145 + u32 lb_size = sdebug_sector_size; 3101 3146 u8 num; 3102 3147 unsigned long iflags; 3103 3148 int ret; ··· 3107 3152 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ 3108 3153 if (0 == num) 3109 3154 return 0; /* degenerate case, not an error */ 3110 - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && 3155 + if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && 3111 3156 (cmd[1] & 0xe0)) { 3112 3157 mk_sense_invalid_opcode(scp); 3113 3158 return check_condition_result; 3114 3159 } 3115 - if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || 3116 - scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && 3160 + if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || 3161 + sdebug_dif == SD_DIF_TYPE3_PROTECTION) && 3117 3162 (cmd[1] & 0xe0) == 0) 3118 3163 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " 3119 3164 "to DIF device\n"); ··· 3148 3193 if (ret == -1) { 3149 3194 retval = DID_ERROR << 16; 3150 3195 goto cleanup; 3151 - } else if ((ret < (dnum * lb_size)) && 3152 - (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 3196 + } else if (sdebug_verbose && (ret < (dnum * lb_size))) 3153 3197 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " 3154 3198 "indicated=%u, IO sent=%d bytes\n", my_name, 3155 3199 dnum * lb_size, ret); ··· 3171 3217 __be32 __reserved; 3172 3218 }; 3173 3219 3174 - static int 3175 - resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3220 + static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3176 3221 { 3177 3222 unsigned char *buf; 3178 3223 struct unmap_block_desc *desc; ··· 3186 3233 BUG_ON(scsi_bufflen(scp) != payload_len); 3187 3234 3188 3235 descriptors = (payload_len - 8) / 16; 3189 - if (descriptors > scsi_debug_unmap_max_desc) { 3236 + if (descriptors > sdebug_unmap_max_desc) { 3190 3237 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); 3191 3238 return check_condition_result; 3192 3239 } 3193 3240 3194 - buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); 3241 + buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC); 3195 3242 if (!buf) { 3196 3243 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 3197 3244 INSUFF_RES_ASCQ); ··· 3229 3276 3230 3277 #define SDEBUG_GET_LBA_STATUS_LEN 32 3231 3278 3232 - static int 3233 - resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3279 + static int resp_get_lba_status(struct scsi_cmnd *scp, 3280 + struct sdebug_dev_info *devip) 3234 3281 { 3235 3282 u8 *cmd = scp->cmnd; 3236 3283 u64 lba; ··· 3269 3316 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN); 3270 3317 } 3271 3318 3272 - #define SDEBUG_RLUN_ARR_SZ 256 3273 - 3274 - static int resp_report_luns(struct scsi_cmnd * scp, 3275 - struct sdebug_dev_info * devip) 3319 + /* Even though each pseudo target has a REPORT LUNS "well known logical unit" 3320 + * (W-LUN), the normal Linux scanning logic does not associate it with a 3321 + * device (e.g. /dev/sg7). The following magic will make that association: 3322 + * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan" 3323 + * where <n> is a host number. If there are multiple targets in a host then 3324 + * the above will associate a W-LUN to each target. To only get a W-LUN 3325 + * for target 2, then use "echo '- 2 49409' > scan" . 3326 + */ 3327 + static int resp_report_luns(struct scsi_cmnd *scp, 3328 + struct sdebug_dev_info *devip) 3276 3329 { 3277 - unsigned int alloc_len; 3278 - int lun_cnt, i, upper, num, n, want_wlun, shortish; 3279 - u64 lun; 3280 3330 unsigned char *cmd = scp->cmnd; 3281 - int select_report = (int)cmd[2]; 3282 - struct scsi_lun *one_lun; 3283 - unsigned char arr[SDEBUG_RLUN_ARR_SZ]; 3284 - unsigned char * max_addr; 3331 + unsigned int alloc_len; 3332 + unsigned char select_report; 3333 + u64 lun; 3334 + struct scsi_lun *lun_p; 3335 + u8 *arr; 3336 + unsigned int lun_cnt; /* normal LUN count (max: 256) */ 3337 + unsigned int wlun_cnt; /* report luns W-LUN count */ 3338 + unsigned int tlun_cnt; /* total LUN count */ 3339 + unsigned int rlen; /* response length (in bytes) */ 3340 + int i, res; 3285 3341 3286 3342 clear_luns_changed_on_target(devip); 3287 - alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); 3288 - shortish = (alloc_len < 4); 3289 - if (shortish || (select_report > 2)) { 3290 - mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1); 3343 + 3344 + select_report = cmd[2]; 3345 + alloc_len = get_unaligned_be32(cmd + 6); 3346 + 3347 + if (alloc_len < 4) { 3348 + pr_err("alloc len too small %d\n", alloc_len); 3349 + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); 3291 3350 return check_condition_result; 3292 3351 } 3293 - /* can produce response with up to 16k luns (lun 0 to lun 16383) */ 3294 - memset(arr, 0, SDEBUG_RLUN_ARR_SZ); 3295 - lun_cnt = scsi_debug_max_luns; 3296 - if (1 == select_report) 3352 + 3353 + switch (select_report) { 3354 + case 0: /* all LUNs apart from W-LUNs */ 3355 + lun_cnt = sdebug_max_luns; 3356 + wlun_cnt = 0; 3357 + break; 3358 + case 1: /* only W-LUNs */ 3297 3359 lun_cnt = 0; 3298 - else if (scsi_debug_no_lun_0 && (lun_cnt > 0)) 3360 + wlun_cnt = 1; 3361 + break; 3362 + case 2: /* all LUNs */ 3363 + lun_cnt = sdebug_max_luns; 3364 + wlun_cnt = 1; 3365 + break; 3366 + case 0x10: /* only administrative LUs */ 3367 + case 0x11: /* see SPC-5 */ 3368 + case 0x12: /* only subsiduary LUs owned by referenced LU */ 3369 + default: 3370 + pr_debug("select report invalid %d\n", select_report); 3371 + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); 3372 + return check_condition_result; 3373 + } 3374 + 3375 + if (sdebug_no_lun_0 && (lun_cnt > 0)) 3299 3376 --lun_cnt; 3300 - want_wlun = (select_report > 0) ? 1 : 0; 3301 - num = lun_cnt + want_wlun; 3302 - arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff; 3303 - arr[3] = (sizeof(struct scsi_lun) * num) & 0xff; 3304 - n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) / 3305 - sizeof(struct scsi_lun)), num); 3306 - if (n < num) { 3307 - want_wlun = 0; 3308 - lun_cnt = n; 3377 + 3378 + tlun_cnt = lun_cnt + wlun_cnt; 3379 + 3380 + rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8; 3381 + arr = vmalloc(rlen); 3382 + if (!arr) { 3383 + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 3384 + INSUFF_RES_ASCQ); 3385 + return check_condition_result; 3309 3386 } 3310 - one_lun = (struct scsi_lun *) &arr[8]; 3311 - max_addr = arr + SDEBUG_RLUN_ARR_SZ; 3312 - for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0); 3313 - ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr)); 3314 - i++, lun++) { 3315 - upper = (lun >> 8) & 0x3f; 3316 - if (upper) 3317 - one_lun[i].scsi_lun[0] = 3318 - (upper | (SAM2_LUN_ADDRESS_METHOD << 6)); 3319 - one_lun[i].scsi_lun[1] = lun & 0xff; 3320 - } 3321 - if (want_wlun) { 3322 - one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff; 3323 - one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff; 3324 - i++; 3325 - } 3326 - alloc_len = (unsigned char *)(one_lun + i) - arr; 3327 - return fill_from_dev_buffer(scp, arr, 3328 - min((int)alloc_len, SDEBUG_RLUN_ARR_SZ)); 3387 + memset(arr, 0, rlen); 3388 + pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n", 3389 + select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0); 3390 + 3391 + /* luns start at byte 8 in response following the header */ 3392 + lun_p = (struct scsi_lun *)&arr[8]; 3393 + 3394 + /* LUNs use single level peripheral device addressing method */ 3395 + lun = sdebug_no_lun_0 ? 1 : 0; 3396 + for (i = 0; i < lun_cnt; i++) 3397 + int_to_scsilun(lun++, lun_p++); 3398 + 3399 + if (wlun_cnt) 3400 + int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++); 3401 + 3402 + put_unaligned_be32(rlen - 8, &arr[0]); 3403 + 3404 + res = fill_from_dev_buffer(scp, arr, rlen); 3405 + vfree(arr); 3406 + return res; 3329 3407 } 3330 3408 3331 3409 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, ··· 3369 3385 struct sg_mapping_iter miter; 3370 3386 3371 3387 /* better not to use temporary buffer. */ 3372 - buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); 3388 + buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC); 3373 3389 if (!buf) { 3374 3390 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 3375 3391 INSUFF_RES_ASCQ); ··· 3395 3411 return 0; 3396 3412 } 3397 3413 3398 - static int 3399 - resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3414 + static int resp_xdwriteread_10(struct scsi_cmnd *scp, 3415 + struct sdebug_dev_info *devip) 3400 3416 { 3401 3417 u8 *cmd = scp->cmnd; 3402 3418 u64 lba; ··· 3421 3437 return resp_xdwriteread(scp, lba, num, devip); 3422 3438 } 3423 3439 3424 - /* When timer or tasklet goes off this function is called. */ 3425 - static void sdebug_q_cmd_complete(unsigned long indx) 3440 + static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd) 3426 3441 { 3427 - int qa_indx; 3442 + struct sdebug_queue *sqp = sdebug_q_arr; 3443 + 3444 + if (sdebug_mq_active) { 3445 + u32 tag = blk_mq_unique_tag(cmnd->request); 3446 + u16 hwq = blk_mq_unique_tag_to_hwq(tag); 3447 + 3448 + if (unlikely(hwq >= submit_queues)) { 3449 + pr_warn("Unexpected hwq=%d, apply modulo\n", hwq); 3450 + hwq %= submit_queues; 3451 + } 3452 + pr_debug("tag=%u, hwq=%d\n", tag, hwq); 3453 + return sqp + hwq; 3454 + } else 3455 + return sqp; 3456 + } 3457 + 3458 + /* Queued (deferred) command completions converge here. */ 3459 + static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) 3460 + { 3461 + int qc_idx; 3428 3462 int retiring = 0; 3429 3463 unsigned long iflags; 3464 + struct sdebug_queue *sqp; 3430 3465 struct sdebug_queued_cmd *sqcp; 3431 3466 struct scsi_cmnd *scp; 3432 3467 struct sdebug_dev_info *devip; 3433 3468 3434 - atomic_inc(&sdebug_completions); 3435 - qa_indx = indx; 3436 - if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { 3437 - pr_err("wild qa_indx=%d\n", qa_indx); 3469 + qc_idx = sd_dp->qc_idx; 3470 + sqp = sdebug_q_arr + sd_dp->sqa_idx; 3471 + if (sdebug_statistics) { 3472 + atomic_inc(&sdebug_completions); 3473 + if (raw_smp_processor_id() != sd_dp->issuing_cpu) 3474 + atomic_inc(&sdebug_miss_cpus); 3475 + } 3476 + if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) { 3477 + pr_err("wild qc_idx=%d\n", qc_idx); 3438 3478 return; 3439 3479 } 3440 - spin_lock_irqsave(&queued_arr_lock, iflags); 3441 - sqcp = &queued_arr[qa_indx]; 3480 + spin_lock_irqsave(&sqp->qc_lock, iflags); 3481 + sqcp = &sqp->qc_arr[qc_idx]; 3442 3482 scp = sqcp->a_cmnd; 3443 - if (NULL == scp) { 3444 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3445 - pr_err("scp is NULL\n"); 3483 + if (unlikely(scp == NULL)) { 3484 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3485 + pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n", 3486 + sd_dp->sqa_idx, qc_idx); 3446 3487 return; 3447 3488 } 3448 3489 devip = (struct sdebug_dev_info *)scp->device->hostdata; 3449 - if (devip) 3490 + if (likely(devip)) 3450 3491 atomic_dec(&devip->num_in_q); 3451 3492 else 3452 3493 pr_err("devip=NULL\n"); 3453 - if (atomic_read(&retired_max_queue) > 0) 3494 + if (unlikely(atomic_read(&retired_max_queue) > 0)) 3454 3495 retiring = 1; 3455 3496 3456 3497 sqcp->a_cmnd = NULL; 3457 - if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { 3458 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3498 + if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) { 3499 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3459 3500 pr_err("Unexpected completion\n"); 3460 3501 return; 3461 3502 } ··· 3489 3480 int k, retval; 3490 3481 3491 3482 retval = atomic_read(&retired_max_queue); 3492 - if (qa_indx >= retval) { 3493 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3483 + if (qc_idx >= retval) { 3484 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3494 3485 pr_err("index %d too large\n", retval); 3495 3486 return; 3496 3487 } 3497 - k = find_last_bit(queued_in_use_bm, retval); 3498 - if ((k < scsi_debug_max_queue) || (k == retval)) 3488 + k = find_last_bit(sqp->in_use_bm, retval); 3489 + if ((k < sdebug_max_queue) || (k == retval)) 3499 3490 atomic_set(&retired_max_queue, 0); 3500 3491 else 3501 3492 atomic_set(&retired_max_queue, k + 1); 3502 3493 } 3503 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3494 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3504 3495 scp->scsi_done(scp); /* callback to mid level */ 3505 3496 } 3506 3497 3507 3498 /* When high resolution timer goes off this function is called. */ 3508 - static enum hrtimer_restart 3509 - sdebug_q_cmd_hrt_complete(struct hrtimer *timer) 3499 + static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer) 3510 3500 { 3511 - int qa_indx; 3512 - int retiring = 0; 3513 - unsigned long iflags; 3514 - struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer; 3515 - struct sdebug_queued_cmd *sqcp; 3516 - struct scsi_cmnd *scp; 3517 - struct sdebug_dev_info *devip; 3518 - 3519 - atomic_inc(&sdebug_completions); 3520 - qa_indx = sd_hrtp->qa_indx; 3521 - if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { 3522 - pr_err("wild qa_indx=%d\n", qa_indx); 3523 - goto the_end; 3524 - } 3525 - spin_lock_irqsave(&queued_arr_lock, iflags); 3526 - sqcp = &queued_arr[qa_indx]; 3527 - scp = sqcp->a_cmnd; 3528 - if (NULL == scp) { 3529 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3530 - pr_err("scp is NULL\n"); 3531 - goto the_end; 3532 - } 3533 - devip = (struct sdebug_dev_info *)scp->device->hostdata; 3534 - if (devip) 3535 - atomic_dec(&devip->num_in_q); 3536 - else 3537 - pr_err("devip=NULL\n"); 3538 - if (atomic_read(&retired_max_queue) > 0) 3539 - retiring = 1; 3540 - 3541 - sqcp->a_cmnd = NULL; 3542 - if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { 3543 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3544 - pr_err("Unexpected completion\n"); 3545 - goto the_end; 3546 - } 3547 - 3548 - if (unlikely(retiring)) { /* user has reduced max_queue */ 3549 - int k, retval; 3550 - 3551 - retval = atomic_read(&retired_max_queue); 3552 - if (qa_indx >= retval) { 3553 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3554 - pr_err("index %d too large\n", retval); 3555 - goto the_end; 3556 - } 3557 - k = find_last_bit(queued_in_use_bm, retval); 3558 - if ((k < scsi_debug_max_queue) || (k == retval)) 3559 - atomic_set(&retired_max_queue, 0); 3560 - else 3561 - atomic_set(&retired_max_queue, k + 1); 3562 - } 3563 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3564 - scp->scsi_done(scp); /* callback to mid level */ 3565 - the_end: 3501 + struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer, 3502 + hrt); 3503 + sdebug_q_cmd_complete(sd_dp); 3566 3504 return HRTIMER_NORESTART; 3567 3505 } 3568 3506 3569 - static struct sdebug_dev_info * 3570 - sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags) 3507 + /* When work queue schedules work, it calls this function. */ 3508 + static void sdebug_q_cmd_wq_complete(struct work_struct *work) 3509 + { 3510 + struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer, 3511 + ew.work); 3512 + sdebug_q_cmd_complete(sd_dp); 3513 + } 3514 + 3515 + static bool got_shared_uuid; 3516 + static uuid_be shared_uuid; 3517 + 3518 + static struct sdebug_dev_info *sdebug_device_create( 3519 + struct sdebug_host_info *sdbg_host, gfp_t flags) 3571 3520 { 3572 3521 struct sdebug_dev_info *devip; 3573 3522 3574 3523 devip = kzalloc(sizeof(*devip), flags); 3575 3524 if (devip) { 3525 + if (sdebug_uuid_ctl == 1) 3526 + uuid_be_gen(&devip->lu_name); 3527 + else if (sdebug_uuid_ctl == 2) { 3528 + if (got_shared_uuid) 3529 + devip->lu_name = shared_uuid; 3530 + else { 3531 + uuid_be_gen(&shared_uuid); 3532 + got_shared_uuid = true; 3533 + devip->lu_name = shared_uuid; 3534 + } 3535 + } 3576 3536 devip->sdbg_host = sdbg_host; 3577 3537 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); 3578 3538 } 3579 3539 return devip; 3580 3540 } 3581 3541 3582 - static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) 3542 + static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev) 3583 3543 { 3584 - struct sdebug_host_info * sdbg_host; 3585 - struct sdebug_dev_info * open_devip = NULL; 3586 - struct sdebug_dev_info * devip = 3587 - (struct sdebug_dev_info *)sdev->hostdata; 3544 + struct sdebug_host_info *sdbg_host; 3545 + struct sdebug_dev_info *open_devip = NULL; 3546 + struct sdebug_dev_info *devip; 3588 3547 3589 - if (devip) 3590 - return devip; 3591 3548 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); 3592 3549 if (!sdbg_host) { 3593 3550 pr_err("Host info NULL\n"); ··· 3589 3614 3590 3615 static int scsi_debug_slave_alloc(struct scsi_device *sdp) 3591 3616 { 3592 - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3617 + if (sdebug_verbose) 3593 3618 pr_info("slave_alloc <%u %u %u %llu>\n", 3594 3619 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3595 3620 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); ··· 3598 3623 3599 3624 static int scsi_debug_slave_configure(struct scsi_device *sdp) 3600 3625 { 3601 - struct sdebug_dev_info *devip; 3626 + struct sdebug_dev_info *devip = 3627 + (struct sdebug_dev_info *)sdp->hostdata; 3602 3628 3603 - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3629 + if (sdebug_verbose) 3604 3630 pr_info("slave_configure <%u %u %u %llu>\n", 3605 3631 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3606 - if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) 3607 - sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; 3608 - devip = devInfoReg(sdp); 3609 - if (NULL == devip) 3610 - return 1; /* no resources, will be marked offline */ 3632 + if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN) 3633 + sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN; 3634 + if (devip == NULL) { 3635 + devip = find_build_dev_info(sdp); 3636 + if (devip == NULL) 3637 + return 1; /* no resources, will be marked offline */ 3638 + } 3611 3639 sdp->hostdata = devip; 3612 3640 blk_queue_max_segment_size(sdp->request_queue, -1U); 3613 - if (scsi_debug_no_uld) 3641 + if (sdebug_no_uld) 3614 3642 sdp->no_uld_attach = 1; 3615 3643 return 0; 3616 3644 } ··· 3623 3645 struct sdebug_dev_info *devip = 3624 3646 (struct sdebug_dev_info *)sdp->hostdata; 3625 3647 3626 - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3648 + if (sdebug_verbose) 3627 3649 pr_info("slave_destroy <%u %u %u %llu>\n", 3628 3650 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3629 3651 if (devip) { ··· 3633 3655 } 3634 3656 } 3635 3657 3636 - /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */ 3637 - static int stop_queued_cmnd(struct scsi_cmnd *cmnd) 3658 + static void stop_qc_helper(struct sdebug_defer *sd_dp) 3659 + { 3660 + if (!sd_dp) 3661 + return; 3662 + if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) 3663 + hrtimer_cancel(&sd_dp->hrt); 3664 + else if (sdebug_jdelay < 0) 3665 + cancel_work_sync(&sd_dp->ew.work); 3666 + } 3667 + 3668 + /* If @cmnd found deletes its timer or work queue and returns true; else 3669 + returns false */ 3670 + static bool stop_queued_cmnd(struct scsi_cmnd *cmnd) 3638 3671 { 3639 3672 unsigned long iflags; 3640 - int k, qmax, r_qmax; 3673 + int j, k, qmax, r_qmax; 3674 + struct sdebug_queue *sqp; 3641 3675 struct sdebug_queued_cmd *sqcp; 3642 3676 struct sdebug_dev_info *devip; 3677 + struct sdebug_defer *sd_dp; 3643 3678 3644 - spin_lock_irqsave(&queued_arr_lock, iflags); 3645 - qmax = scsi_debug_max_queue; 3646 - r_qmax = atomic_read(&retired_max_queue); 3647 - if (r_qmax > qmax) 3648 - qmax = r_qmax; 3649 - for (k = 0; k < qmax; ++k) { 3650 - if (test_bit(k, queued_in_use_bm)) { 3651 - sqcp = &queued_arr[k]; 3652 - if (cmnd == sqcp->a_cmnd) { 3679 + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { 3680 + spin_lock_irqsave(&sqp->qc_lock, iflags); 3681 + qmax = sdebug_max_queue; 3682 + r_qmax = atomic_read(&retired_max_queue); 3683 + if (r_qmax > qmax) 3684 + qmax = r_qmax; 3685 + for (k = 0; k < qmax; ++k) { 3686 + if (test_bit(k, sqp->in_use_bm)) { 3687 + sqcp = &sqp->qc_arr[k]; 3688 + if (cmnd != sqcp->a_cmnd) 3689 + continue; 3690 + /* found */ 3653 3691 devip = (struct sdebug_dev_info *) 3654 - cmnd->device->hostdata; 3692 + cmnd->device->hostdata; 3655 3693 if (devip) 3656 3694 atomic_dec(&devip->num_in_q); 3657 3695 sqcp->a_cmnd = NULL; 3658 - spin_unlock_irqrestore(&queued_arr_lock, 3659 - iflags); 3660 - if (scsi_debug_ndelay > 0) { 3661 - if (sqcp->sd_hrtp) 3662 - hrtimer_cancel( 3663 - &sqcp->sd_hrtp->hrt); 3664 - } else if (scsi_debug_delay > 0) { 3665 - if (sqcp->cmnd_timerp) 3666 - del_timer_sync( 3667 - sqcp->cmnd_timerp); 3668 - } else if (scsi_debug_delay < 0) { 3669 - if (sqcp->tletp) 3670 - tasklet_kill(sqcp->tletp); 3671 - } 3672 - clear_bit(k, queued_in_use_bm); 3673 - return 1; 3696 + sd_dp = sqcp->sd_dp; 3697 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3698 + stop_qc_helper(sd_dp); 3699 + clear_bit(k, sqp->in_use_bm); 3700 + return true; 3674 3701 } 3675 3702 } 3703 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3676 3704 } 3677 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3678 - return 0; 3705 + return false; 3679 3706 } 3680 3707 3681 - /* Deletes (stops) timers or tasklets of all queued commands */ 3708 + /* Deletes (stops) timers or work queues of all queued commands */ 3682 3709 static void stop_all_queued(void) 3683 3710 { 3684 3711 unsigned long iflags; 3685 - int k; 3712 + int j, k; 3713 + struct sdebug_queue *sqp; 3686 3714 struct sdebug_queued_cmd *sqcp; 3687 3715 struct sdebug_dev_info *devip; 3716 + struct sdebug_defer *sd_dp; 3688 3717 3689 - spin_lock_irqsave(&queued_arr_lock, iflags); 3690 - for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { 3691 - if (test_bit(k, queued_in_use_bm)) { 3692 - sqcp = &queued_arr[k]; 3693 - if (sqcp->a_cmnd) { 3718 + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { 3719 + spin_lock_irqsave(&sqp->qc_lock, iflags); 3720 + for (k = 0; k < SDEBUG_CANQUEUE; ++k) { 3721 + if (test_bit(k, sqp->in_use_bm)) { 3722 + sqcp = &sqp->qc_arr[k]; 3723 + if (sqcp->a_cmnd == NULL) 3724 + continue; 3694 3725 devip = (struct sdebug_dev_info *) 3695 3726 sqcp->a_cmnd->device->hostdata; 3696 3727 if (devip) 3697 3728 atomic_dec(&devip->num_in_q); 3698 3729 sqcp->a_cmnd = NULL; 3699 - spin_unlock_irqrestore(&queued_arr_lock, 3700 - iflags); 3701 - if (scsi_debug_ndelay > 0) { 3702 - if (sqcp->sd_hrtp) 3703 - hrtimer_cancel( 3704 - &sqcp->sd_hrtp->hrt); 3705 - } else if (scsi_debug_delay > 0) { 3706 - if (sqcp->cmnd_timerp) 3707 - del_timer_sync( 3708 - sqcp->cmnd_timerp); 3709 - } else if (scsi_debug_delay < 0) { 3710 - if (sqcp->tletp) 3711 - tasklet_kill(sqcp->tletp); 3712 - } 3713 - clear_bit(k, queued_in_use_bm); 3714 - spin_lock_irqsave(&queued_arr_lock, iflags); 3730 + sd_dp = sqcp->sd_dp; 3731 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3732 + stop_qc_helper(sd_dp); 3733 + clear_bit(k, sqp->in_use_bm); 3734 + spin_lock_irqsave(&sqp->qc_lock, iflags); 3715 3735 } 3716 3736 } 3737 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3717 3738 } 3718 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3719 3739 } 3720 3740 3721 3741 /* Free queued command memory on heap */ 3722 3742 static void free_all_queued(void) 3723 3743 { 3724 - unsigned long iflags; 3725 - int k; 3744 + int j, k; 3745 + struct sdebug_queue *sqp; 3726 3746 struct sdebug_queued_cmd *sqcp; 3727 3747 3728 - spin_lock_irqsave(&queued_arr_lock, iflags); 3729 - for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { 3730 - sqcp = &queued_arr[k]; 3731 - kfree(sqcp->cmnd_timerp); 3732 - sqcp->cmnd_timerp = NULL; 3733 - kfree(sqcp->tletp); 3734 - sqcp->tletp = NULL; 3735 - kfree(sqcp->sd_hrtp); 3736 - sqcp->sd_hrtp = NULL; 3748 + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { 3749 + for (k = 0; k < SDEBUG_CANQUEUE; ++k) { 3750 + sqcp = &sqp->qc_arr[k]; 3751 + kfree(sqcp->sd_dp); 3752 + sqcp->sd_dp = NULL; 3753 + } 3737 3754 } 3738 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 3739 3755 } 3740 3756 3741 3757 static int scsi_debug_abort(struct scsi_cmnd *SCpnt) 3742 3758 { 3759 + bool ok; 3760 + 3743 3761 ++num_aborts; 3744 3762 if (SCpnt) { 3745 - if (SCpnt->device && 3746 - (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)) 3747 - sdev_printk(KERN_INFO, SCpnt->device, "%s\n", 3748 - __func__); 3749 - stop_queued_cmnd(SCpnt); 3763 + ok = stop_queued_cmnd(SCpnt); 3764 + if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) 3765 + sdev_printk(KERN_INFO, SCpnt->device, 3766 + "%s: command%s found\n", __func__, 3767 + ok ? "" : " not"); 3750 3768 } 3751 3769 return SUCCESS; 3752 3770 } 3753 3771 3754 3772 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt) 3755 3773 { 3756 - struct sdebug_dev_info * devip; 3757 - 3758 3774 ++num_dev_resets; 3759 3775 if (SCpnt && SCpnt->device) { 3760 3776 struct scsi_device *sdp = SCpnt->device; 3777 + struct sdebug_dev_info *devip = 3778 + (struct sdebug_dev_info *)sdp->hostdata; 3761 3779 3762 - if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) 3780 + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 3763 3781 sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 3764 - devip = devInfoReg(sdp); 3765 3782 if (devip) 3766 3783 set_bit(SDEBUG_UA_POR, devip->uas_bm); 3767 3784 } ··· 3777 3804 sdp = SCpnt->device; 3778 3805 if (!sdp) 3779 3806 goto lie; 3780 - if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) 3807 + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 3781 3808 sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 3782 3809 hp = sdp->host; 3783 3810 if (!hp) ··· 3792 3819 ++k; 3793 3820 } 3794 3821 } 3795 - if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) 3822 + if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 3796 3823 sdev_printk(KERN_INFO, sdp, 3797 3824 "%s: %d device(s) found in target\n", __func__, k); 3798 3825 lie: ··· 3811 3838 if (!(SCpnt && SCpnt->device)) 3812 3839 goto lie; 3813 3840 sdp = SCpnt->device; 3814 - if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) 3841 + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 3815 3842 sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 3816 3843 hp = sdp->host; 3817 3844 if (hp) { ··· 3825 3852 } 3826 3853 } 3827 3854 } 3828 - if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) 3855 + if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 3829 3856 sdev_printk(KERN_INFO, sdp, 3830 3857 "%s: %d device(s) found in host\n", __func__, k); 3831 3858 lie: ··· 3839 3866 int k = 0; 3840 3867 3841 3868 ++num_host_resets; 3842 - if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)) 3869 + if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) 3843 3870 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); 3844 3871 spin_lock(&sdebug_host_list_lock); 3845 3872 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { ··· 3851 3878 } 3852 3879 spin_unlock(&sdebug_host_list_lock); 3853 3880 stop_all_queued(); 3854 - if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) 3881 + if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 3855 3882 sdev_printk(KERN_INFO, SCpnt->device, 3856 3883 "%s: %d device(s) found\n", __func__, k); 3857 3884 return SUCCESS; ··· 3866 3893 int heads_by_sects, start_sec, end_sec; 3867 3894 3868 3895 /* assume partition table already zeroed */ 3869 - if ((scsi_debug_num_parts < 1) || (store_size < 1048576)) 3896 + if ((sdebug_num_parts < 1) || (store_size < 1048576)) 3870 3897 return; 3871 - if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { 3872 - scsi_debug_num_parts = SDEBUG_MAX_PARTS; 3898 + if (sdebug_num_parts > SDEBUG_MAX_PARTS) { 3899 + sdebug_num_parts = SDEBUG_MAX_PARTS; 3873 3900 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS); 3874 3901 } 3875 3902 num_sectors = (int)sdebug_store_sectors; 3876 3903 sectors_per_part = (num_sectors - sdebug_sectors_per) 3877 - / scsi_debug_num_parts; 3904 + / sdebug_num_parts; 3878 3905 heads_by_sects = sdebug_heads * sdebug_sectors_per; 3879 3906 starts[0] = sdebug_sectors_per; 3880 - for (k = 1; k < scsi_debug_num_parts; ++k) 3907 + for (k = 1; k < sdebug_num_parts; ++k) 3881 3908 starts[k] = ((k * sectors_per_part) / heads_by_sects) 3882 3909 * heads_by_sects; 3883 - starts[scsi_debug_num_parts] = num_sectors; 3884 - starts[scsi_debug_num_parts + 1] = 0; 3910 + starts[sdebug_num_parts] = num_sectors; 3911 + starts[sdebug_num_parts + 1] = 0; 3885 3912 3886 3913 ramp[510] = 0x55; /* magic partition markings */ 3887 3914 ramp[511] = 0xAA; ··· 3907 3934 } 3908 3935 } 3909 3936 3910 - static int 3911 - schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, 3912 - int scsi_result, int delta_jiff) 3937 + static void block_unblock_all_queues(bool block) 3938 + { 3939 + int j; 3940 + struct sdebug_queue *sqp; 3941 + 3942 + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) 3943 + atomic_set(&sqp->blocked, (int)block); 3944 + } 3945 + 3946 + /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1 3947 + * commands will be processed normally before triggers occur. 3948 + */ 3949 + static void tweak_cmnd_count(void) 3950 + { 3951 + int count, modulo; 3952 + 3953 + modulo = abs(sdebug_every_nth); 3954 + if (modulo < 2) 3955 + return; 3956 + block_unblock_all_queues(true); 3957 + count = atomic_read(&sdebug_cmnd_count); 3958 + atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo); 3959 + block_unblock_all_queues(false); 3960 + } 3961 + 3962 + static void clear_queue_stats(void) 3963 + { 3964 + atomic_set(&sdebug_cmnd_count, 0); 3965 + atomic_set(&sdebug_completions, 0); 3966 + atomic_set(&sdebug_miss_cpus, 0); 3967 + atomic_set(&sdebug_a_tsf, 0); 3968 + } 3969 + 3970 + static void setup_inject(struct sdebug_queue *sqp, 3971 + struct sdebug_queued_cmd *sqcp) 3972 + { 3973 + if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) 3974 + return; 3975 + sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts); 3976 + sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts); 3977 + sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts); 3978 + sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts); 3979 + sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts); 3980 + } 3981 + 3982 + /* Complete the processing of the thread that queued a SCSI command to this 3983 + * driver. It either completes the command by calling cmnd_done() or 3984 + * schedules a hr timer or work queue then returns 0. Returns 3985 + * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources. 3986 + */ 3987 + static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, 3988 + int scsi_result, int delta_jiff) 3913 3989 { 3914 3990 unsigned long iflags; 3915 3991 int k, num_in_q, qdepth, inject; 3916 - struct sdebug_queued_cmd *sqcp = NULL; 3992 + struct sdebug_queue *sqp; 3993 + struct sdebug_queued_cmd *sqcp; 3917 3994 struct scsi_device *sdp; 3995 + struct sdebug_defer *sd_dp; 3918 3996 3919 - /* this should never happen */ 3920 - if (WARN_ON(!cmnd)) 3921 - return SCSI_MLQUEUE_HOST_BUSY; 3922 - 3923 - if (NULL == devip) { 3924 - pr_warn("called devip == NULL\n"); 3925 - /* no particularly good error to report back */ 3926 - return SCSI_MLQUEUE_HOST_BUSY; 3997 + if (unlikely(devip == NULL)) { 3998 + if (scsi_result == 0) 3999 + scsi_result = DID_NO_CONNECT << 16; 4000 + goto respond_in_thread; 3927 4001 } 3928 - 3929 4002 sdp = cmnd->device; 3930 4003 3931 - if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 4004 + if (unlikely(sdebug_verbose && scsi_result)) 3932 4005 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", 3933 4006 __func__, scsi_result); 3934 4007 if (delta_jiff == 0) 3935 4008 goto respond_in_thread; 3936 4009 3937 4010 /* schedule the response at a later time if resources permit */ 3938 - spin_lock_irqsave(&queued_arr_lock, iflags); 4011 + sqp = get_queue(cmnd); 4012 + spin_lock_irqsave(&sqp->qc_lock, iflags); 4013 + if (unlikely(atomic_read(&sqp->blocked))) { 4014 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4015 + return SCSI_MLQUEUE_HOST_BUSY; 4016 + } 3939 4017 num_in_q = atomic_read(&devip->num_in_q); 3940 4018 qdepth = cmnd->device->queue_depth; 3941 4019 inject = 0; 3942 - if ((qdepth > 0) && (num_in_q >= qdepth)) { 4020 + if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) { 3943 4021 if (scsi_result) { 3944 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 4022 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3945 4023 goto respond_in_thread; 3946 4024 } else 3947 4025 scsi_result = device_qfull_result; 3948 - } else if ((scsi_debug_every_nth != 0) && 3949 - (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) && 3950 - (scsi_result == 0)) { 4026 + } else if (unlikely(sdebug_every_nth && 4027 + (SDEBUG_OPT_RARE_TSF & sdebug_opts) && 4028 + (scsi_result == 0))) { 3951 4029 if ((num_in_q == (qdepth - 1)) && 3952 4030 (atomic_inc_return(&sdebug_a_tsf) >= 3953 - abs(scsi_debug_every_nth))) { 4031 + abs(sdebug_every_nth))) { 3954 4032 atomic_set(&sdebug_a_tsf, 0); 3955 4033 inject = 1; 3956 4034 scsi_result = device_qfull_result; 3957 4035 } 3958 4036 } 3959 4037 3960 - k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue); 3961 - if (k >= scsi_debug_max_queue) { 3962 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 4038 + k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue); 4039 + if (unlikely(k >= sdebug_max_queue)) { 4040 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3963 4041 if (scsi_result) 3964 4042 goto respond_in_thread; 3965 - else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts) 4043 + else if (SDEBUG_OPT_ALL_TSF & sdebug_opts) 3966 4044 scsi_result = device_qfull_result; 3967 - if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) 4045 + if (SDEBUG_OPT_Q_NOISE & sdebug_opts) 3968 4046 sdev_printk(KERN_INFO, sdp, 3969 4047 "%s: max_queue=%d exceeded, %s\n", 3970 - __func__, scsi_debug_max_queue, 4048 + __func__, sdebug_max_queue, 3971 4049 (scsi_result ? "status: TASK SET FULL" : 3972 4050 "report: host busy")); 3973 4051 if (scsi_result) ··· 4026 4002 else 4027 4003 return SCSI_MLQUEUE_HOST_BUSY; 4028 4004 } 4029 - __set_bit(k, queued_in_use_bm); 4005 + __set_bit(k, sqp->in_use_bm); 4030 4006 atomic_inc(&devip->num_in_q); 4031 - sqcp = &queued_arr[k]; 4007 + sqcp = &sqp->qc_arr[k]; 4032 4008 sqcp->a_cmnd = cmnd; 4009 + cmnd->host_scribble = (unsigned char *)sqcp; 4033 4010 cmnd->result = scsi_result; 4034 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 4035 - if (delta_jiff > 0) { 4036 - if (NULL == sqcp->cmnd_timerp) { 4037 - sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list), 4038 - GFP_ATOMIC); 4039 - if (NULL == sqcp->cmnd_timerp) 4040 - return SCSI_MLQUEUE_HOST_BUSY; 4041 - init_timer(sqcp->cmnd_timerp); 4042 - } 4043 - sqcp->cmnd_timerp->function = sdebug_q_cmd_complete; 4044 - sqcp->cmnd_timerp->data = k; 4045 - sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff; 4046 - add_timer(sqcp->cmnd_timerp); 4047 - } else if (scsi_debug_ndelay > 0) { 4048 - ktime_t kt = ktime_set(0, scsi_debug_ndelay); 4049 - struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp; 4011 + sd_dp = sqcp->sd_dp; 4012 + spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4013 + if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt)) 4014 + setup_inject(sqp, sqcp); 4015 + if (delta_jiff > 0 || sdebug_ndelay > 0) { 4016 + ktime_t kt; 4050 4017 4051 - if (NULL == sd_hp) { 4052 - sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC); 4053 - if (NULL == sd_hp) 4018 + if (delta_jiff > 0) { 4019 + struct timespec ts; 4020 + 4021 + jiffies_to_timespec(delta_jiff, &ts); 4022 + kt = ktime_set(ts.tv_sec, ts.tv_nsec); 4023 + } else 4024 + kt = ktime_set(0, sdebug_ndelay); 4025 + if (NULL == sd_dp) { 4026 + sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); 4027 + if (NULL == sd_dp) 4054 4028 return SCSI_MLQUEUE_HOST_BUSY; 4055 - sqcp->sd_hrtp = sd_hp; 4056 - hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC, 4057 - HRTIMER_MODE_REL); 4058 - sd_hp->hrt.function = sdebug_q_cmd_hrt_complete; 4059 - sd_hp->qa_indx = k; 4029 + sqcp->sd_dp = sd_dp; 4030 + hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, 4031 + HRTIMER_MODE_REL_PINNED); 4032 + sd_dp->hrt.function = sdebug_q_cmd_hrt_complete; 4033 + sd_dp->sqa_idx = sqp - sdebug_q_arr; 4034 + sd_dp->qc_idx = k; 4060 4035 } 4061 - hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL); 4062 - } else { /* delay < 0 */ 4063 - if (NULL == sqcp->tletp) { 4064 - sqcp->tletp = kmalloc(sizeof(*sqcp->tletp), 4065 - GFP_ATOMIC); 4066 - if (NULL == sqcp->tletp) 4036 + if (sdebug_statistics) 4037 + sd_dp->issuing_cpu = raw_smp_processor_id(); 4038 + hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); 4039 + } else { /* jdelay < 0, use work queue */ 4040 + if (NULL == sd_dp) { 4041 + sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC); 4042 + if (NULL == sd_dp) 4067 4043 return SCSI_MLQUEUE_HOST_BUSY; 4068 - tasklet_init(sqcp->tletp, 4069 - sdebug_q_cmd_complete, k); 4044 + sqcp->sd_dp = sd_dp; 4045 + sd_dp->sqa_idx = sqp - sdebug_q_arr; 4046 + sd_dp->qc_idx = k; 4047 + INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); 4070 4048 } 4071 - if (-1 == delta_jiff) 4072 - tasklet_hi_schedule(sqcp->tletp); 4073 - else 4074 - tasklet_schedule(sqcp->tletp); 4049 + if (sdebug_statistics) 4050 + sd_dp->issuing_cpu = raw_smp_processor_id(); 4051 + schedule_work(&sd_dp->ew.work); 4075 4052 } 4076 - if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) && 4077 - (scsi_result == device_qfull_result)) 4053 + if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && 4054 + (scsi_result == device_qfull_result))) 4078 4055 sdev_printk(KERN_INFO, sdp, 4079 4056 "%s: num_in_q=%d +1, %s%s\n", __func__, 4080 4057 num_in_q, (inject ? "<inject> " : ""), ··· 4094 4069 as it can when the corresponding attribute in the 4095 4070 /sys/bus/pseudo/drivers/scsi_debug directory is changed. 4096 4071 */ 4097 - module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR); 4098 - module_param_named(ato, scsi_debug_ato, int, S_IRUGO); 4099 - module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR); 4100 - module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR); 4101 - module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO); 4102 - module_param_named(dif, scsi_debug_dif, int, S_IRUGO); 4103 - module_param_named(dix, scsi_debug_dix, int, S_IRUGO); 4104 - module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); 4105 - module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); 4106 - module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); 4107 - module_param_named(guard, scsi_debug_guard, uint, S_IRUGO); 4108 - module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR); 4109 - module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); 4110 - module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); 4111 - module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); 4112 - module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO); 4113 - module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); 4114 - module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); 4115 - module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); 4116 - module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR); 4117 - module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); 4118 - module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO); 4119 - module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); 4120 - module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR); 4121 - module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO); 4122 - module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR); 4123 - module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); 4124 - module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR); 4125 - module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR); 4126 - module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); 4127 - module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); 4128 - module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR); 4129 - module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); 4130 - module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); 4131 - module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); 4132 - module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); 4133 - module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); 4134 - module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, 4072 + module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR); 4073 + module_param_named(ato, sdebug_ato, int, S_IRUGO); 4074 + module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR); 4075 + module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR); 4076 + module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO); 4077 + module_param_named(dif, sdebug_dif, int, S_IRUGO); 4078 + module_param_named(dix, sdebug_dix, int, S_IRUGO); 4079 + module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR); 4080 + module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR); 4081 + module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR); 4082 + module_param_named(guard, sdebug_guard, uint, S_IRUGO); 4083 + module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR); 4084 + module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO); 4085 + module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO); 4086 + module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO); 4087 + module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO); 4088 + module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO); 4089 + module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR); 4090 + module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR); 4091 + module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR); 4092 + module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR); 4093 + module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO); 4094 + module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO); 4095 + module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR); 4096 + module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO); 4097 + module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR); 4098 + module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO); 4099 + module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR); 4100 + module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR); 4101 + module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO); 4102 + module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO); 4103 + module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR); 4104 + module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR); 4105 + module_param_named(submit_queues, submit_queues, int, S_IRUGO); 4106 + module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO); 4107 + module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO); 4108 + module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO); 4109 + module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO); 4110 + module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR); 4111 + module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO); 4112 + module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int, 4135 4113 S_IRUGO | S_IWUSR); 4136 - module_param_named(write_same_length, scsi_debug_write_same_length, int, 4114 + module_param_named(write_same_length, sdebug_write_same_length, int, 4137 4115 S_IRUGO | S_IWUSR); 4138 4116 4139 4117 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 4140 4118 MODULE_DESCRIPTION("SCSI debug adapter driver"); 4141 4119 MODULE_LICENSE("GPL"); 4142 - MODULE_VERSION(SCSI_DEBUG_VERSION); 4120 + MODULE_VERSION(SDEBUG_VERSION); 4143 4121 4144 4122 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); 4145 4123 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); ··· 4155 4127 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); 4156 4128 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); 4157 4129 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); 4158 - MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)"); 4130 + MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)"); 4159 4131 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); 4160 4132 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); 4161 4133 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); 4162 - MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)"); 4134 + MODULE_PARM_DESC(lbprz, 4135 + "on read unmapped LBs return 0 when 1 (def), return 0xff when 2"); 4163 4136 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); 4164 4137 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); 4165 4138 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))"); ··· 4174 4145 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); 4175 4146 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 4176 4147 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); 4177 - MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])"); 4148 + MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])"); 4178 4149 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); 4150 + MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)"); 4179 4151 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)"); 4152 + MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)"); 4180 4153 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); 4181 4154 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); 4182 4155 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); 4183 4156 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); 4157 + MODULE_PARM_DESC(uuid_ctl, 4158 + "1->use uuid for lu name, 0->don't, 2->all use same (def=0)"); 4184 4159 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); 4185 4160 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 4186 4161 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); 4187 4162 4188 - static char sdebug_info[256]; 4163 + #define SDEBUG_INFO_LEN 256 4164 + static char sdebug_info[SDEBUG_INFO_LEN]; 4189 4165 4190 4166 static const char * scsi_debug_info(struct Scsi_Host * shp) 4191 4167 { 4192 - sprintf(sdebug_info, "scsi_debug, version %s [%s], " 4193 - "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION, 4194 - scsi_debug_version_date, scsi_debug_dev_size_mb, 4195 - scsi_debug_opts); 4168 + int k; 4169 + 4170 + k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n", 4171 + my_name, SDEBUG_VERSION, sdebug_version_date); 4172 + if (k >= (SDEBUG_INFO_LEN - 1)) 4173 + return sdebug_info; 4174 + scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k, 4175 + " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d", 4176 + sdebug_dev_size_mb, sdebug_opts, submit_queues, 4177 + "statistics", (int)sdebug_statistics); 4196 4178 return sdebug_info; 4197 4179 } 4198 4180 4199 4181 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */ 4200 - static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length) 4182 + static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, 4183 + int length) 4201 4184 { 4202 4185 char arr[16]; 4203 4186 int opts; ··· 4221 4180 arr[minLen] = '\0'; 4222 4181 if (1 != sscanf(arr, "%d", &opts)) 4223 4182 return -EINVAL; 4224 - scsi_debug_opts = opts; 4225 - if (scsi_debug_every_nth != 0) 4226 - atomic_set(&sdebug_cmnd_count, 0); 4183 + sdebug_opts = opts; 4184 + sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts); 4185 + sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts); 4186 + if (sdebug_every_nth != 0) 4187 + tweak_cmnd_count(); 4227 4188 return length; 4228 4189 } 4229 4190 ··· 4234 4191 * output are not atomics so might be inaccurate in a busy system. */ 4235 4192 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) 4236 4193 { 4237 - int f, l; 4238 - char b[32]; 4194 + int f, j, l; 4195 + struct sdebug_queue *sqp; 4239 4196 4240 - if (scsi_debug_every_nth > 0) 4241 - snprintf(b, sizeof(b), " (curr:%d)", 4242 - ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ? 4243 - atomic_read(&sdebug_a_tsf) : 4244 - atomic_read(&sdebug_cmnd_count))); 4245 - else 4246 - b[0] = '\0'; 4197 + seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n", 4198 + SDEBUG_VERSION, sdebug_version_date); 4199 + seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n", 4200 + sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb, 4201 + sdebug_opts, sdebug_every_nth); 4202 + seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n", 4203 + sdebug_jdelay, sdebug_ndelay, sdebug_max_luns, 4204 + sdebug_sector_size, "bytes"); 4205 + seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n", 4206 + sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, 4207 + num_aborts); 4208 + seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n", 4209 + num_dev_resets, num_target_resets, num_bus_resets, 4210 + num_host_resets); 4211 + seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n", 4212 + dix_reads, dix_writes, dif_errors); 4213 + seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n", 4214 + TICK_NSEC / 1000, "statistics", sdebug_statistics, 4215 + sdebug_mq_active); 4216 + seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n", 4217 + atomic_read(&sdebug_cmnd_count), 4218 + atomic_read(&sdebug_completions), 4219 + "miss_cpus", atomic_read(&sdebug_miss_cpus), 4220 + atomic_read(&sdebug_a_tsf)); 4247 4221 4248 - seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n" 4249 - "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, " 4250 - "every_nth=%d%s\n" 4251 - "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n" 4252 - "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n" 4253 - "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, " 4254 - "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d " 4255 - "usec_in_jiffy=%lu\n", 4256 - SCSI_DEBUG_VERSION, scsi_debug_version_date, 4257 - scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts, 4258 - scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay, 4259 - scsi_debug_max_luns, atomic_read(&sdebug_completions), 4260 - scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads, 4261 - sdebug_sectors_per, num_aborts, num_dev_resets, 4262 - num_target_resets, num_bus_resets, num_host_resets, 4263 - dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000); 4264 - 4265 - f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue); 4266 - if (f != scsi_debug_max_queue) { 4267 - l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue); 4268 - seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n", 4269 - "queued_in_use_bm", f, l); 4222 + seq_printf(m, "submit_queues=%d\n", submit_queues); 4223 + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { 4224 + seq_printf(m, " queue %d:\n", j); 4225 + f = find_first_bit(sqp->in_use_bm, sdebug_max_queue); 4226 + if (f != sdebug_max_queue) { 4227 + l = find_last_bit(sqp->in_use_bm, sdebug_max_queue); 4228 + seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n", 4229 + "first,last bits", f, l); 4230 + } 4270 4231 } 4271 4232 return 0; 4272 4233 } 4273 4234 4274 4235 static ssize_t delay_show(struct device_driver *ddp, char *buf) 4275 4236 { 4276 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay); 4237 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay); 4277 4238 } 4278 - /* Returns -EBUSY if delay is being changed and commands are queued */ 4239 + /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit 4240 + * of delay is jiffies. 4241 + */ 4279 4242 static ssize_t delay_store(struct device_driver *ddp, const char *buf, 4280 4243 size_t count) 4281 4244 { 4282 - int delay, res; 4245 + int jdelay, res; 4283 4246 4284 - if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) { 4247 + if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) { 4285 4248 res = count; 4286 - if (scsi_debug_delay != delay) { 4287 - unsigned long iflags; 4288 - int k; 4249 + if (sdebug_jdelay != jdelay) { 4250 + int j, k; 4251 + struct sdebug_queue *sqp; 4289 4252 4290 - spin_lock_irqsave(&queued_arr_lock, iflags); 4291 - k = find_first_bit(queued_in_use_bm, 4292 - scsi_debug_max_queue); 4293 - if (k != scsi_debug_max_queue) 4294 - res = -EBUSY; /* have queued commands */ 4295 - else { 4296 - scsi_debug_delay = delay; 4297 - scsi_debug_ndelay = 0; 4253 + block_unblock_all_queues(true); 4254 + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; 4255 + ++j, ++sqp) { 4256 + k = find_first_bit(sqp->in_use_bm, 4257 + sdebug_max_queue); 4258 + if (k != sdebug_max_queue) { 4259 + res = -EBUSY; /* queued commands */ 4260 + break; 4261 + } 4298 4262 } 4299 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 4263 + if (res > 0) { 4264 + /* make sure sdebug_defer instances get 4265 + * re-allocated for new delay variant */ 4266 + free_all_queued(); 4267 + sdebug_jdelay = jdelay; 4268 + sdebug_ndelay = 0; 4269 + } 4270 + block_unblock_all_queues(false); 4300 4271 } 4301 4272 return res; 4302 4273 } ··· 4320 4263 4321 4264 static ssize_t ndelay_show(struct device_driver *ddp, char *buf) 4322 4265 { 4323 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay); 4266 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay); 4324 4267 } 4325 4268 /* Returns -EBUSY if ndelay is being changed and commands are queued */ 4326 - /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */ 4269 + /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */ 4327 4270 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, 4328 - size_t count) 4271 + size_t count) 4329 4272 { 4330 - unsigned long iflags; 4331 - int ndelay, res, k; 4273 + int ndelay, res; 4332 4274 4333 4275 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) && 4334 - (ndelay >= 0) && (ndelay < 1000000000)) { 4276 + (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) { 4335 4277 res = count; 4336 - if (scsi_debug_ndelay != ndelay) { 4337 - spin_lock_irqsave(&queued_arr_lock, iflags); 4338 - k = find_first_bit(queued_in_use_bm, 4339 - scsi_debug_max_queue); 4340 - if (k != scsi_debug_max_queue) 4341 - res = -EBUSY; /* have queued commands */ 4342 - else { 4343 - scsi_debug_ndelay = ndelay; 4344 - scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN 4345 - : DEF_DELAY; 4278 + if (sdebug_ndelay != ndelay) { 4279 + int j, k; 4280 + struct sdebug_queue *sqp; 4281 + 4282 + block_unblock_all_queues(true); 4283 + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; 4284 + ++j, ++sqp) { 4285 + k = find_first_bit(sqp->in_use_bm, 4286 + sdebug_max_queue); 4287 + if (k != sdebug_max_queue) { 4288 + res = -EBUSY; /* queued commands */ 4289 + break; 4290 + } 4346 4291 } 4347 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 4292 + if (res > 0) { 4293 + /* make sure sdebug_defer instances get 4294 + * re-allocated for new delay variant */ 4295 + free_all_queued(); 4296 + sdebug_ndelay = ndelay; 4297 + sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN 4298 + : DEF_JDELAY; 4299 + } 4300 + block_unblock_all_queues(false); 4348 4301 } 4349 4302 return res; 4350 4303 } ··· 4364 4297 4365 4298 static ssize_t opts_show(struct device_driver *ddp, char *buf) 4366 4299 { 4367 - return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts); 4300 + return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts); 4368 4301 } 4369 4302 4370 4303 static ssize_t opts_store(struct device_driver *ddp, const char *buf, ··· 4384 4317 } 4385 4318 return -EINVAL; 4386 4319 opts_done: 4387 - scsi_debug_opts = opts; 4388 - if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) 4389 - sdebug_any_injecting_opt = true; 4390 - else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) 4391 - sdebug_any_injecting_opt = true; 4392 - else if (SCSI_DEBUG_OPT_DIF_ERR & opts) 4393 - sdebug_any_injecting_opt = true; 4394 - else if (SCSI_DEBUG_OPT_DIX_ERR & opts) 4395 - sdebug_any_injecting_opt = true; 4396 - else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) 4397 - sdebug_any_injecting_opt = true; 4398 - atomic_set(&sdebug_cmnd_count, 0); 4399 - atomic_set(&sdebug_a_tsf, 0); 4320 + sdebug_opts = opts; 4321 + sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts); 4322 + sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts); 4323 + tweak_cmnd_count(); 4400 4324 return count; 4401 4325 } 4402 4326 static DRIVER_ATTR_RW(opts); 4403 4327 4404 4328 static ssize_t ptype_show(struct device_driver *ddp, char *buf) 4405 4329 { 4406 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype); 4330 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype); 4407 4331 } 4408 4332 static ssize_t ptype_store(struct device_driver *ddp, const char *buf, 4409 4333 size_t count) ··· 4402 4344 int n; 4403 4345 4404 4346 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4405 - scsi_debug_ptype = n; 4347 + sdebug_ptype = n; 4406 4348 return count; 4407 4349 } 4408 4350 return -EINVAL; ··· 4411 4353 4412 4354 static ssize_t dsense_show(struct device_driver *ddp, char *buf) 4413 4355 { 4414 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense); 4356 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense); 4415 4357 } 4416 4358 static ssize_t dsense_store(struct device_driver *ddp, const char *buf, 4417 4359 size_t count) ··· 4419 4361 int n; 4420 4362 4421 4363 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4422 - scsi_debug_dsense = n; 4364 + sdebug_dsense = n; 4423 4365 return count; 4424 4366 } 4425 4367 return -EINVAL; ··· 4428 4370 4429 4371 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf) 4430 4372 { 4431 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw); 4373 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw); 4432 4374 } 4433 4375 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, 4434 4376 size_t count) ··· 4437 4379 4438 4380 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4439 4381 n = (n > 0); 4440 - scsi_debug_fake_rw = (scsi_debug_fake_rw > 0); 4441 - if (scsi_debug_fake_rw != n) { 4382 + sdebug_fake_rw = (sdebug_fake_rw > 0); 4383 + if (sdebug_fake_rw != n) { 4442 4384 if ((0 == n) && (NULL == fake_storep)) { 4443 4385 unsigned long sz = 4444 - (unsigned long)scsi_debug_dev_size_mb * 4386 + (unsigned long)sdebug_dev_size_mb * 4445 4387 1048576; 4446 4388 4447 4389 fake_storep = vmalloc(sz); ··· 4451 4393 } 4452 4394 memset(fake_storep, 0, sz); 4453 4395 } 4454 - scsi_debug_fake_rw = n; 4396 + sdebug_fake_rw = n; 4455 4397 } 4456 4398 return count; 4457 4399 } ··· 4461 4403 4462 4404 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf) 4463 4405 { 4464 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0); 4406 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0); 4465 4407 } 4466 4408 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, 4467 4409 size_t count) ··· 4469 4411 int n; 4470 4412 4471 4413 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4472 - scsi_debug_no_lun_0 = n; 4414 + sdebug_no_lun_0 = n; 4473 4415 return count; 4474 4416 } 4475 4417 return -EINVAL; ··· 4478 4420 4479 4421 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf) 4480 4422 { 4481 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts); 4423 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts); 4482 4424 } 4483 4425 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, 4484 4426 size_t count) ··· 4486 4428 int n; 4487 4429 4488 4430 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4489 - scsi_debug_num_tgts = n; 4431 + sdebug_num_tgts = n; 4490 4432 sdebug_max_tgts_luns(); 4491 4433 return count; 4492 4434 } ··· 4496 4438 4497 4439 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf) 4498 4440 { 4499 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb); 4441 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb); 4500 4442 } 4501 4443 static DRIVER_ATTR_RO(dev_size_mb); 4502 4444 4503 4445 static ssize_t num_parts_show(struct device_driver *ddp, char *buf) 4504 4446 { 4505 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts); 4447 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts); 4506 4448 } 4507 4449 static DRIVER_ATTR_RO(num_parts); 4508 4450 4509 4451 static ssize_t every_nth_show(struct device_driver *ddp, char *buf) 4510 4452 { 4511 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth); 4453 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth); 4512 4454 } 4513 4455 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, 4514 4456 size_t count) ··· 4516 4458 int nth; 4517 4459 4518 4460 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) { 4519 - scsi_debug_every_nth = nth; 4520 - atomic_set(&sdebug_cmnd_count, 0); 4461 + sdebug_every_nth = nth; 4462 + if (nth && !sdebug_statistics) { 4463 + pr_info("every_nth needs statistics=1, set it\n"); 4464 + sdebug_statistics = true; 4465 + } 4466 + tweak_cmnd_count(); 4521 4467 return count; 4522 4468 } 4523 4469 return -EINVAL; ··· 4530 4468 4531 4469 static ssize_t max_luns_show(struct device_driver *ddp, char *buf) 4532 4470 { 4533 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns); 4471 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns); 4534 4472 } 4535 4473 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, 4536 4474 size_t count) ··· 4539 4477 bool changed; 4540 4478 4541 4479 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4542 - changed = (scsi_debug_max_luns != n); 4543 - scsi_debug_max_luns = n; 4480 + if (n > 256) { 4481 + pr_warn("max_luns can be no more than 256\n"); 4482 + return -EINVAL; 4483 + } 4484 + changed = (sdebug_max_luns != n); 4485 + sdebug_max_luns = n; 4544 4486 sdebug_max_tgts_luns(); 4545 - if (changed && (scsi_debug_scsi_level >= 5)) { /* >= SPC-3 */ 4487 + if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */ 4546 4488 struct sdebug_host_info *sdhp; 4547 4489 struct sdebug_dev_info *dp; 4548 4490 ··· 4569 4503 4570 4504 static ssize_t max_queue_show(struct device_driver *ddp, char *buf) 4571 4505 { 4572 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue); 4506 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue); 4573 4507 } 4574 4508 /* N.B. max_queue can be changed while there are queued commands. In flight 4575 4509 * commands beyond the new max_queue will be completed. */ 4576 4510 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, 4577 4511 size_t count) 4578 4512 { 4579 - unsigned long iflags; 4580 - int n, k; 4513 + int j, n, k, a; 4514 + struct sdebug_queue *sqp; 4581 4515 4582 4516 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && 4583 - (n <= SCSI_DEBUG_CANQUEUE)) { 4584 - spin_lock_irqsave(&queued_arr_lock, iflags); 4585 - k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE); 4586 - scsi_debug_max_queue = n; 4587 - if (SCSI_DEBUG_CANQUEUE == k) 4517 + (n <= SDEBUG_CANQUEUE)) { 4518 + block_unblock_all_queues(true); 4519 + k = 0; 4520 + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; 4521 + ++j, ++sqp) { 4522 + a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE); 4523 + if (a > k) 4524 + k = a; 4525 + } 4526 + sdebug_max_queue = n; 4527 + if (k == SDEBUG_CANQUEUE) 4588 4528 atomic_set(&retired_max_queue, 0); 4589 4529 else if (k >= n) 4590 4530 atomic_set(&retired_max_queue, k + 1); 4591 4531 else 4592 4532 atomic_set(&retired_max_queue, 0); 4593 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 4533 + block_unblock_all_queues(false); 4594 4534 return count; 4595 4535 } 4596 4536 return -EINVAL; ··· 4605 4533 4606 4534 static ssize_t no_uld_show(struct device_driver *ddp, char *buf) 4607 4535 { 4608 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld); 4536 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld); 4609 4537 } 4610 4538 static DRIVER_ATTR_RO(no_uld); 4611 4539 4612 4540 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf) 4613 4541 { 4614 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level); 4542 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level); 4615 4543 } 4616 4544 static DRIVER_ATTR_RO(scsi_level); 4617 4545 4618 4546 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf) 4619 4547 { 4620 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb); 4548 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb); 4621 4549 } 4622 4550 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, 4623 4551 size_t count) ··· 4626 4554 bool changed; 4627 4555 4628 4556 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4629 - changed = (scsi_debug_virtual_gb != n); 4630 - scsi_debug_virtual_gb = n; 4557 + changed = (sdebug_virtual_gb != n); 4558 + sdebug_virtual_gb = n; 4631 4559 sdebug_capacity = get_sdebug_capacity(); 4632 4560 if (changed) { 4633 4561 struct sdebug_host_info *sdhp; ··· 4652 4580 4653 4581 static ssize_t add_host_show(struct device_driver *ddp, char *buf) 4654 4582 { 4655 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host); 4583 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host); 4656 4584 } 4585 + 4586 + static int sdebug_add_adapter(void); 4587 + static void sdebug_remove_adapter(void); 4657 4588 4658 4589 static ssize_t add_host_store(struct device_driver *ddp, const char *buf, 4659 4590 size_t count) ··· 4680 4605 4681 4606 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf) 4682 4607 { 4683 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno); 4608 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno); 4684 4609 } 4685 4610 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf, 4686 4611 size_t count) ··· 4688 4613 int n; 4689 4614 4690 4615 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4691 - scsi_debug_vpd_use_hostno = n; 4616 + sdebug_vpd_use_hostno = n; 4692 4617 return count; 4693 4618 } 4694 4619 return -EINVAL; 4695 4620 } 4696 4621 static DRIVER_ATTR_RW(vpd_use_hostno); 4697 4622 4623 + static ssize_t statistics_show(struct device_driver *ddp, char *buf) 4624 + { 4625 + return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics); 4626 + } 4627 + static ssize_t statistics_store(struct device_driver *ddp, const char *buf, 4628 + size_t count) 4629 + { 4630 + int n; 4631 + 4632 + if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) { 4633 + if (n > 0) 4634 + sdebug_statistics = true; 4635 + else { 4636 + clear_queue_stats(); 4637 + sdebug_statistics = false; 4638 + } 4639 + return count; 4640 + } 4641 + return -EINVAL; 4642 + } 4643 + static DRIVER_ATTR_RW(statistics); 4644 + 4698 4645 static ssize_t sector_size_show(struct device_driver *ddp, char *buf) 4699 4646 { 4700 - return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size); 4647 + return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size); 4701 4648 } 4702 4649 static DRIVER_ATTR_RO(sector_size); 4703 4650 4651 + static ssize_t submit_queues_show(struct device_driver *ddp, char *buf) 4652 + { 4653 + return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues); 4654 + } 4655 + static DRIVER_ATTR_RO(submit_queues); 4656 + 4704 4657 static ssize_t dix_show(struct device_driver *ddp, char *buf) 4705 4658 { 4706 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix); 4659 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix); 4707 4660 } 4708 4661 static DRIVER_ATTR_RO(dix); 4709 4662 4710 4663 static ssize_t dif_show(struct device_driver *ddp, char *buf) 4711 4664 { 4712 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif); 4665 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif); 4713 4666 } 4714 4667 static DRIVER_ATTR_RO(dif); 4715 4668 4716 4669 static ssize_t guard_show(struct device_driver *ddp, char *buf) 4717 4670 { 4718 - return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard); 4671 + return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard); 4719 4672 } 4720 4673 static DRIVER_ATTR_RO(guard); 4721 4674 4722 4675 static ssize_t ato_show(struct device_driver *ddp, char *buf) 4723 4676 { 4724 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato); 4677 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato); 4725 4678 } 4726 4679 static DRIVER_ATTR_RO(ato); 4727 4680 ··· 4772 4669 4773 4670 static ssize_t removable_show(struct device_driver *ddp, char *buf) 4774 4671 { 4775 - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0); 4672 + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0); 4776 4673 } 4777 4674 static ssize_t removable_store(struct device_driver *ddp, const char *buf, 4778 4675 size_t count) ··· 4780 4677 int n; 4781 4678 4782 4679 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4783 - scsi_debug_removable = (n > 0); 4680 + sdebug_removable = (n > 0); 4784 4681 return count; 4785 4682 } 4786 4683 return -EINVAL; ··· 4789 4686 4790 4687 static ssize_t host_lock_show(struct device_driver *ddp, char *buf) 4791 4688 { 4792 - return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock); 4689 + return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock); 4793 4690 } 4794 - /* Returns -EBUSY if host_lock is being changed and commands are queued */ 4691 + /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */ 4795 4692 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf, 4796 4693 size_t count) 4797 4694 { 4798 - int n, res; 4695 + int n; 4799 4696 4800 4697 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4801 - bool new_host_lock = (n > 0); 4802 - 4803 - res = count; 4804 - if (new_host_lock != scsi_debug_host_lock) { 4805 - unsigned long iflags; 4806 - int k; 4807 - 4808 - spin_lock_irqsave(&queued_arr_lock, iflags); 4809 - k = find_first_bit(queued_in_use_bm, 4810 - scsi_debug_max_queue); 4811 - if (k != scsi_debug_max_queue) 4812 - res = -EBUSY; /* have queued commands */ 4813 - else 4814 - scsi_debug_host_lock = new_host_lock; 4815 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 4816 - } 4817 - return res; 4698 + sdebug_host_lock = (n > 0); 4699 + return count; 4818 4700 } 4819 4701 return -EINVAL; 4820 4702 } ··· 4807 4719 4808 4720 static ssize_t strict_show(struct device_driver *ddp, char *buf) 4809 4721 { 4810 - return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict); 4722 + return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict); 4811 4723 } 4812 4724 static ssize_t strict_store(struct device_driver *ddp, const char *buf, 4813 4725 size_t count) ··· 4815 4727 int n; 4816 4728 4817 4729 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4818 - scsi_debug_strict = (n > 0); 4730 + sdebug_strict = (n > 0); 4819 4731 return count; 4820 4732 } 4821 4733 return -EINVAL; 4822 4734 } 4823 4735 static DRIVER_ATTR_RW(strict); 4736 + 4737 + static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf) 4738 + { 4739 + return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl); 4740 + } 4741 + static DRIVER_ATTR_RO(uuid_ctl); 4824 4742 4825 4743 4826 4744 /* Note: The following array creates attribute files in the ··· 4855 4761 &driver_attr_add_host.attr, 4856 4762 &driver_attr_vpd_use_hostno.attr, 4857 4763 &driver_attr_sector_size.attr, 4764 + &driver_attr_statistics.attr, 4765 + &driver_attr_submit_queues.attr, 4858 4766 &driver_attr_dix.attr, 4859 4767 &driver_attr_dif.attr, 4860 4768 &driver_attr_guard.attr, ··· 4866 4770 &driver_attr_host_lock.attr, 4867 4771 &driver_attr_ndelay.attr, 4868 4772 &driver_attr_strict.attr, 4773 + &driver_attr_uuid_ctl.attr, 4869 4774 NULL, 4870 4775 }; 4871 4776 ATTRIBUTE_GROUPS(sdebug_drv); ··· 4880 4783 int k; 4881 4784 int ret; 4882 4785 4883 - atomic_set(&sdebug_cmnd_count, 0); 4884 - atomic_set(&sdebug_completions, 0); 4885 4786 atomic_set(&retired_max_queue, 0); 4886 4787 4887 - if (scsi_debug_ndelay >= 1000000000) { 4788 + if (sdebug_ndelay >= 1000 * 1000 * 1000) { 4888 4789 pr_warn("ndelay must be less than 1 second, ignored\n"); 4889 - scsi_debug_ndelay = 0; 4890 - } else if (scsi_debug_ndelay > 0) 4891 - scsi_debug_delay = DELAY_OVERRIDDEN; 4790 + sdebug_ndelay = 0; 4791 + } else if (sdebug_ndelay > 0) 4792 + sdebug_jdelay = JDELAY_OVERRIDDEN; 4892 4793 4893 - switch (scsi_debug_sector_size) { 4794 + switch (sdebug_sector_size) { 4894 4795 case 512: 4895 4796 case 1024: 4896 4797 case 2048: 4897 4798 case 4096: 4898 4799 break; 4899 4800 default: 4900 - pr_err("invalid sector_size %d\n", scsi_debug_sector_size); 4801 + pr_err("invalid sector_size %d\n", sdebug_sector_size); 4901 4802 return -EINVAL; 4902 4803 } 4903 4804 4904 - switch (scsi_debug_dif) { 4805 + switch (sdebug_dif) { 4905 4806 4906 4807 case SD_DIF_TYPE0_PROTECTION: 4808 + break; 4907 4809 case SD_DIF_TYPE1_PROTECTION: 4908 4810 case SD_DIF_TYPE2_PROTECTION: 4909 4811 case SD_DIF_TYPE3_PROTECTION: 4812 + have_dif_prot = true; 4910 4813 break; 4911 4814 4912 4815 default: ··· 4914 4817 return -EINVAL; 4915 4818 } 4916 4819 4917 - if (scsi_debug_guard > 1) { 4820 + if (sdebug_guard > 1) { 4918 4821 pr_err("guard must be 0 or 1\n"); 4919 4822 return -EINVAL; 4920 4823 } 4921 4824 4922 - if (scsi_debug_ato > 1) { 4825 + if (sdebug_ato > 1) { 4923 4826 pr_err("ato must be 0 or 1\n"); 4924 4827 return -EINVAL; 4925 4828 } 4926 4829 4927 - if (scsi_debug_physblk_exp > 15) { 4928 - pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp); 4830 + if (sdebug_physblk_exp > 15) { 4831 + pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp); 4832 + return -EINVAL; 4833 + } 4834 + if (sdebug_max_luns > 256) { 4835 + pr_warn("max_luns can be no more than 256, use default\n"); 4836 + sdebug_max_luns = DEF_MAX_LUNS; 4837 + } 4838 + 4839 + if (sdebug_lowest_aligned > 0x3fff) { 4840 + pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned); 4929 4841 return -EINVAL; 4930 4842 } 4931 4843 4932 - if (scsi_debug_lowest_aligned > 0x3fff) { 4933 - pr_err("lowest_aligned too big: %u\n", 4934 - scsi_debug_lowest_aligned); 4844 + if (submit_queues < 1) { 4845 + pr_err("submit_queues must be 1 or more\n"); 4935 4846 return -EINVAL; 4936 4847 } 4848 + sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue), 4849 + GFP_KERNEL); 4850 + if (sdebug_q_arr == NULL) 4851 + return -ENOMEM; 4852 + for (k = 0; k < submit_queues; ++k) 4853 + spin_lock_init(&sdebug_q_arr[k].qc_lock); 4937 4854 4938 - if (scsi_debug_dev_size_mb < 1) 4939 - scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 4940 - sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; 4941 - sdebug_store_sectors = sz / scsi_debug_sector_size; 4855 + if (sdebug_dev_size_mb < 1) 4856 + sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 4857 + sz = (unsigned long)sdebug_dev_size_mb * 1048576; 4858 + sdebug_store_sectors = sz / sdebug_sector_size; 4942 4859 sdebug_capacity = get_sdebug_capacity(); 4943 4860 4944 4861 /* play around with geometry, don't waste too much on track 0 */ 4945 4862 sdebug_heads = 8; 4946 4863 sdebug_sectors_per = 32; 4947 - if (scsi_debug_dev_size_mb >= 256) 4864 + if (sdebug_dev_size_mb >= 256) 4948 4865 sdebug_heads = 64; 4949 - else if (scsi_debug_dev_size_mb >= 16) 4866 + else if (sdebug_dev_size_mb >= 16) 4950 4867 sdebug_heads = 32; 4951 4868 sdebug_cylinders_per = (unsigned long)sdebug_capacity / 4952 4869 (sdebug_sectors_per * sdebug_heads); ··· 4972 4861 (sdebug_sectors_per * sdebug_heads); 4973 4862 } 4974 4863 4975 - if (0 == scsi_debug_fake_rw) { 4864 + if (sdebug_fake_rw == 0) { 4976 4865 fake_storep = vmalloc(sz); 4977 4866 if (NULL == fake_storep) { 4978 4867 pr_err("out of memory, 1\n"); 4979 - return -ENOMEM; 4868 + ret = -ENOMEM; 4869 + goto free_q_arr; 4980 4870 } 4981 4871 memset(fake_storep, 0, sz); 4982 - if (scsi_debug_num_parts > 0) 4872 + if (sdebug_num_parts > 0) 4983 4873 sdebug_build_parts(fake_storep, sz); 4984 4874 } 4985 4875 4986 - if (scsi_debug_dix) { 4876 + if (sdebug_dix) { 4987 4877 int dif_size; 4988 4878 4989 4879 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); ··· 5003 4891 5004 4892 /* Logical Block Provisioning */ 5005 4893 if (scsi_debug_lbp()) { 5006 - scsi_debug_unmap_max_blocks = 5007 - clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); 4894 + sdebug_unmap_max_blocks = 4895 + clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU); 5008 4896 5009 - scsi_debug_unmap_max_desc = 5010 - clamp(scsi_debug_unmap_max_desc, 0U, 256U); 4897 + sdebug_unmap_max_desc = 4898 + clamp(sdebug_unmap_max_desc, 0U, 256U); 5011 4899 5012 - scsi_debug_unmap_granularity = 5013 - clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); 4900 + sdebug_unmap_granularity = 4901 + clamp(sdebug_unmap_granularity, 1U, 0xffffffffU); 5014 4902 5015 - if (scsi_debug_unmap_alignment && 5016 - scsi_debug_unmap_granularity <= 5017 - scsi_debug_unmap_alignment) { 4903 + if (sdebug_unmap_alignment && 4904 + sdebug_unmap_granularity <= 4905 + sdebug_unmap_alignment) { 5018 4906 pr_err("ERR: unmap_granularity <= unmap_alignment\n"); 5019 - return -EINVAL; 4907 + ret = -EINVAL; 4908 + goto free_vm; 5020 4909 } 5021 4910 5022 4911 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; ··· 5034 4921 bitmap_zero(map_storep, map_size); 5035 4922 5036 4923 /* Map first 1KB for partition table */ 5037 - if (scsi_debug_num_parts) 4924 + if (sdebug_num_parts) 5038 4925 map_region(0, 2); 5039 4926 } 5040 4927 ··· 5055 4942 goto bus_unreg; 5056 4943 } 5057 4944 5058 - host_to_add = scsi_debug_add_host; 5059 - scsi_debug_add_host = 0; 4945 + host_to_add = sdebug_add_host; 4946 + sdebug_add_host = 0; 5060 4947 5061 4948 for (k = 0; k < host_to_add; k++) { 5062 4949 if (sdebug_add_adapter()) { ··· 5065 4952 } 5066 4953 } 5067 4954 5068 - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 5069 - pr_info("built %d host(s)\n", scsi_debug_add_host); 4955 + if (sdebug_verbose) 4956 + pr_info("built %d host(s)\n", sdebug_add_host); 5070 4957 5071 4958 return 0; 5072 4959 ··· 5078 4965 vfree(map_storep); 5079 4966 vfree(dif_storep); 5080 4967 vfree(fake_storep); 5081 - 4968 + free_q_arr: 4969 + kfree(sdebug_q_arr); 5082 4970 return ret; 5083 4971 } 5084 4972 5085 4973 static void __exit scsi_debug_exit(void) 5086 4974 { 5087 - int k = scsi_debug_add_host; 4975 + int k = sdebug_add_host; 5088 4976 5089 4977 stop_all_queued(); 5090 4978 free_all_queued(); ··· 5097 4983 5098 4984 vfree(dif_storep); 5099 4985 vfree(fake_storep); 4986 + kfree(sdebug_q_arr); 5100 4987 } 5101 4988 5102 4989 device_initcall(scsi_debug_init); ··· 5126 5011 5127 5012 INIT_LIST_HEAD(&sdbg_host->dev_info_list); 5128 5013 5129 - devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns; 5014 + devs_per_host = sdebug_num_tgts * sdebug_max_luns; 5130 5015 for (k = 0; k < devs_per_host; k++) { 5131 5016 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); 5132 5017 if (!sdbg_devinfo) { ··· 5143 5028 sdbg_host->dev.bus = &pseudo_lld_bus; 5144 5029 sdbg_host->dev.parent = pseudo_primary; 5145 5030 sdbg_host->dev.release = &sdebug_release_adapter; 5146 - dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host); 5031 + dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host); 5147 5032 5148 5033 error = device_register(&sdbg_host->dev); 5149 5034 5150 5035 if (error) 5151 5036 goto clean; 5152 5037 5153 - ++scsi_debug_add_host; 5038 + ++sdebug_add_host; 5154 5039 return error; 5155 5040 5156 5041 clean: ··· 5179 5064 if (!sdbg_host) 5180 5065 return; 5181 5066 5182 - device_unregister(&sdbg_host->dev); 5183 - --scsi_debug_add_host; 5067 + device_unregister(&sdbg_host->dev); 5068 + --sdebug_add_host; 5184 5069 } 5185 5070 5186 - static int 5187 - sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) 5071 + static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) 5188 5072 { 5189 5073 int num_in_q = 0; 5190 - unsigned long iflags; 5191 5074 struct sdebug_dev_info *devip; 5192 5075 5193 - spin_lock_irqsave(&queued_arr_lock, iflags); 5076 + block_unblock_all_queues(true); 5194 5077 devip = (struct sdebug_dev_info *)sdev->hostdata; 5195 5078 if (NULL == devip) { 5196 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 5079 + block_unblock_all_queues(false); 5197 5080 return -ENODEV; 5198 5081 } 5199 5082 num_in_q = atomic_read(&devip->num_in_q); 5200 - spin_unlock_irqrestore(&queued_arr_lock, iflags); 5201 5083 5202 5084 if (qdepth < 1) 5203 5085 qdepth = 1; 5204 - /* allow to exceed max host queued_arr elements for testing */ 5205 - if (qdepth > SCSI_DEBUG_CANQUEUE + 10) 5206 - qdepth = SCSI_DEBUG_CANQUEUE + 10; 5086 + /* allow to exceed max host qc_arr elements for testing */ 5087 + if (qdepth > SDEBUG_CANQUEUE + 10) 5088 + qdepth = SDEBUG_CANQUEUE + 10; 5207 5089 scsi_change_queue_depth(sdev, qdepth); 5208 5090 5209 - if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { 5210 - sdev_printk(KERN_INFO, sdev, 5211 - "%s: qdepth=%d, num_in_q=%d\n", 5091 + if (SDEBUG_OPT_Q_NOISE & sdebug_opts) { 5092 + sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n", 5212 5093 __func__, qdepth, num_in_q); 5213 5094 } 5095 + block_unblock_all_queues(false); 5214 5096 return sdev->queue_depth; 5215 5097 } 5216 5098 5217 - static int 5218 - check_inject(struct scsi_cmnd *scp) 5099 + static bool fake_timeout(struct scsi_cmnd *scp) 5219 5100 { 5220 - struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); 5221 - 5222 - memset(ep, 0, sizeof(struct sdebug_scmd_extra_t)); 5223 - 5224 - if (atomic_inc_return(&sdebug_cmnd_count) >= 5225 - abs(scsi_debug_every_nth)) { 5226 - atomic_set(&sdebug_cmnd_count, 0); 5227 - if (scsi_debug_every_nth < -1) 5228 - scsi_debug_every_nth = -1; 5229 - if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) 5230 - return 1; /* ignore command causing timeout */ 5231 - else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts && 5101 + if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) { 5102 + if (sdebug_every_nth < -1) 5103 + sdebug_every_nth = -1; 5104 + if (SDEBUG_OPT_TIMEOUT & sdebug_opts) 5105 + return true; /* ignore command causing timeout */ 5106 + else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts && 5232 5107 scsi_medium_access_command(scp)) 5233 - return 1; /* time out reads and writes */ 5234 - if (sdebug_any_injecting_opt) { 5235 - int opts = scsi_debug_opts; 5236 - 5237 - if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) 5238 - ep->inj_recovered = true; 5239 - else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) 5240 - ep->inj_transport = true; 5241 - else if (SCSI_DEBUG_OPT_DIF_ERR & opts) 5242 - ep->inj_dif = true; 5243 - else if (SCSI_DEBUG_OPT_DIX_ERR & opts) 5244 - ep->inj_dix = true; 5245 - else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) 5246 - ep->inj_short = true; 5247 - } 5108 + return true; /* time out reads and writes */ 5248 5109 } 5249 - return 0; 5110 + return false; 5250 5111 } 5251 5112 5252 - static int 5253 - scsi_debug_queuecommand(struct scsi_cmnd *scp) 5113 + static int scsi_debug_queuecommand(struct Scsi_Host *shost, 5114 + struct scsi_cmnd *scp) 5254 5115 { 5255 5116 u8 sdeb_i; 5256 5117 struct scsi_device *sdp = scp->device; ··· 5237 5146 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); 5238 5147 int k, na; 5239 5148 int errsts = 0; 5240 - int errsts_no_connect = DID_NO_CONNECT << 16; 5241 5149 u32 flags; 5242 5150 u16 sa; 5243 5151 u8 opcode = cmd[0]; 5244 5152 bool has_wlun_rl; 5245 - bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts); 5246 5153 5247 5154 scsi_set_resid(scp, 0); 5248 - if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) { 5155 + if (sdebug_statistics) 5156 + atomic_inc(&sdebug_cmnd_count); 5157 + if (unlikely(sdebug_verbose && 5158 + !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) { 5249 5159 char b[120]; 5250 5160 int n, len, sb; 5251 5161 ··· 5259 5167 n += scnprintf(b + n, sb - n, "%02x ", 5260 5168 (u32)cmd[k]); 5261 5169 } 5262 - sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b); 5170 + if (sdebug_mq_active) 5171 + sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n", 5172 + my_name, blk_mq_unique_tag(scp->request), 5173 + b); 5174 + else 5175 + sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, 5176 + b); 5263 5177 } 5264 5178 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); 5265 - if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl) 5266 - return schedule_resp(scp, NULL, errsts_no_connect, 0); 5179 + if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl)) 5180 + goto err_out; 5267 5181 5268 5182 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */ 5269 5183 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */ 5270 5184 devip = (struct sdebug_dev_info *)sdp->hostdata; 5271 - if (!devip) { 5272 - devip = devInfoReg(sdp); 5185 + if (unlikely(!devip)) { 5186 + devip = find_build_dev_info(sdp); 5273 5187 if (NULL == devip) 5274 - return schedule_resp(scp, NULL, errsts_no_connect, 0); 5188 + goto err_out; 5275 5189 } 5276 5190 na = oip->num_attached; 5277 5191 r_pfp = oip->pfp; ··· 5309 5211 } 5310 5212 } /* else (when na==0) we assume the oip is a match */ 5311 5213 flags = oip->flags; 5312 - if (F_INV_OP & flags) { 5214 + if (unlikely(F_INV_OP & flags)) { 5313 5215 mk_sense_invalid_opcode(scp); 5314 5216 goto check_cond; 5315 5217 } 5316 - if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) { 5317 - if (debug) 5318 - sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: " 5319 - "0x%x not supported for wlun\n", opcode); 5218 + if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) { 5219 + if (sdebug_verbose) 5220 + sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n", 5221 + my_name, opcode, " supported for wlun"); 5320 5222 mk_sense_invalid_opcode(scp); 5321 5223 goto check_cond; 5322 5224 } 5323 - if (scsi_debug_strict) { /* check cdb against mask */ 5225 + if (unlikely(sdebug_strict)) { /* check cdb against mask */ 5324 5226 u8 rem; 5325 5227 int j; 5326 5228 ··· 5336 5238 } 5337 5239 } 5338 5240 } 5339 - if (!(F_SKIP_UA & flags) && 5340 - SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) { 5341 - errsts = check_readiness(scp, UAS_ONLY, devip); 5241 + if (unlikely(!(F_SKIP_UA & flags) && 5242 + find_first_bit(devip->uas_bm, 5243 + SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) { 5244 + errsts = make_ua(scp, devip); 5342 5245 if (errsts) 5343 5246 goto check_cond; 5344 5247 } 5345 - if ((F_M_ACCESS & flags) && devip->stopped) { 5248 + if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) { 5346 5249 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2); 5347 - if (debug) 5250 + if (sdebug_verbose) 5348 5251 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: " 5349 5252 "%s\n", my_name, "initializing command " 5350 5253 "required"); 5351 5254 errsts = check_condition_result; 5352 5255 goto fini; 5353 5256 } 5354 - if (scsi_debug_fake_rw && (F_FAKE_RW & flags)) 5257 + if (sdebug_fake_rw && (F_FAKE_RW & flags)) 5355 5258 goto fini; 5356 - if (scsi_debug_every_nth) { 5357 - if (check_inject(scp)) 5259 + if (unlikely(sdebug_every_nth)) { 5260 + if (fake_timeout(scp)) 5358 5261 return 0; /* ignore command: make trouble */ 5359 5262 } 5360 - if (oip->pfp) /* if this command has a resp_* function, call it */ 5361 - errsts = oip->pfp(scp, devip); 5263 + if (likely(oip->pfp)) 5264 + errsts = oip->pfp(scp, devip); /* calls a resp_* function */ 5362 5265 else if (r_pfp) /* if leaf function ptr NULL, try the root's */ 5363 5266 errsts = r_pfp(scp, devip); 5364 5267 5365 5268 fini: 5366 5269 return schedule_resp(scp, devip, errsts, 5367 - ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay)); 5270 + ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay)); 5368 5271 check_cond: 5369 5272 return schedule_resp(scp, devip, check_condition_result, 0); 5370 - } 5371 - 5372 - static int 5373 - sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd) 5374 - { 5375 - if (scsi_debug_host_lock) { 5376 - unsigned long iflags; 5377 - int rc; 5378 - 5379 - spin_lock_irqsave(shost->host_lock, iflags); 5380 - rc = scsi_debug_queuecommand(cmd); 5381 - spin_unlock_irqrestore(shost->host_lock, iflags); 5382 - return rc; 5383 - } else 5384 - return scsi_debug_queuecommand(cmd); 5273 + err_out: 5274 + return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0); 5385 5275 } 5386 5276 5387 5277 static struct scsi_host_template sdebug_driver_template = { ··· 5382 5296 .slave_configure = scsi_debug_slave_configure, 5383 5297 .slave_destroy = scsi_debug_slave_destroy, 5384 5298 .ioctl = scsi_debug_ioctl, 5385 - .queuecommand = sdebug_queuecommand_lock_or_not, 5299 + .queuecommand = scsi_debug_queuecommand, 5386 5300 .change_queue_depth = sdebug_change_qdepth, 5387 5301 .eh_abort_handler = scsi_debug_abort, 5388 5302 .eh_device_reset_handler = scsi_debug_device_reset, 5389 5303 .eh_target_reset_handler = scsi_debug_target_reset, 5390 5304 .eh_bus_reset_handler = scsi_debug_bus_reset, 5391 5305 .eh_host_reset_handler = scsi_debug_host_reset, 5392 - .can_queue = SCSI_DEBUG_CANQUEUE, 5306 + .can_queue = SDEBUG_CANQUEUE, 5393 5307 .this_id = 7, 5394 - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, 5308 + .sg_tablesize = SG_MAX_SEGMENTS, 5395 5309 .cmd_per_lun = DEF_CMD_PER_LUN, 5396 5310 .max_sectors = -1U, 5397 5311 .use_clustering = DISABLE_CLUSTERING, 5398 5312 .module = THIS_MODULE, 5399 5313 .track_queue_depth = 1, 5400 - .cmd_size = sizeof(struct sdebug_scmd_extra_t), 5401 5314 }; 5402 5315 5403 5316 static int sdebug_driver_probe(struct device * dev) 5404 5317 { 5405 5318 int error = 0; 5406 - int opts; 5407 5319 struct sdebug_host_info *sdbg_host; 5408 5320 struct Scsi_Host *hpnt; 5409 - int host_prot; 5321 + int hprot; 5410 5322 5411 5323 sdbg_host = to_sdebug_host(dev); 5412 5324 5413 - sdebug_driver_template.can_queue = scsi_debug_max_queue; 5414 - if (scsi_debug_clustering) 5325 + sdebug_driver_template.can_queue = sdebug_max_queue; 5326 + if (sdebug_clustering) 5415 5327 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; 5416 5328 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 5417 5329 if (NULL == hpnt) { ··· 5417 5333 error = -ENODEV; 5418 5334 return error; 5419 5335 } 5336 + if (submit_queues > nr_cpu_ids) { 5337 + pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n", 5338 + my_name, submit_queues, nr_cpu_ids); 5339 + submit_queues = nr_cpu_ids; 5340 + } 5341 + /* Decide whether to tell scsi subsystem that we want mq */ 5342 + /* Following should give the same answer for each host */ 5343 + sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1); 5344 + if (sdebug_mq_active) 5345 + hpnt->nr_hw_queues = submit_queues; 5420 5346 5421 5347 sdbg_host->shost = hpnt; 5422 5348 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host; 5423 - if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id)) 5424 - hpnt->max_id = scsi_debug_num_tgts + 1; 5349 + if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id)) 5350 + hpnt->max_id = sdebug_num_tgts + 1; 5425 5351 else 5426 - hpnt->max_id = scsi_debug_num_tgts; 5427 - /* = scsi_debug_max_luns; */ 5352 + hpnt->max_id = sdebug_num_tgts; 5353 + /* = sdebug_max_luns; */ 5428 5354 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; 5429 5355 5430 - host_prot = 0; 5356 + hprot = 0; 5431 5357 5432 - switch (scsi_debug_dif) { 5358 + switch (sdebug_dif) { 5433 5359 5434 5360 case SD_DIF_TYPE1_PROTECTION: 5435 - host_prot = SHOST_DIF_TYPE1_PROTECTION; 5436 - if (scsi_debug_dix) 5437 - host_prot |= SHOST_DIX_TYPE1_PROTECTION; 5361 + hprot = SHOST_DIF_TYPE1_PROTECTION; 5362 + if (sdebug_dix) 5363 + hprot |= SHOST_DIX_TYPE1_PROTECTION; 5438 5364 break; 5439 5365 5440 5366 case SD_DIF_TYPE2_PROTECTION: 5441 - host_prot = SHOST_DIF_TYPE2_PROTECTION; 5442 - if (scsi_debug_dix) 5443 - host_prot |= SHOST_DIX_TYPE2_PROTECTION; 5367 + hprot = SHOST_DIF_TYPE2_PROTECTION; 5368 + if (sdebug_dix) 5369 + hprot |= SHOST_DIX_TYPE2_PROTECTION; 5444 5370 break; 5445 5371 5446 5372 case SD_DIF_TYPE3_PROTECTION: 5447 - host_prot = SHOST_DIF_TYPE3_PROTECTION; 5448 - if (scsi_debug_dix) 5449 - host_prot |= SHOST_DIX_TYPE3_PROTECTION; 5373 + hprot = SHOST_DIF_TYPE3_PROTECTION; 5374 + if (sdebug_dix) 5375 + hprot |= SHOST_DIX_TYPE3_PROTECTION; 5450 5376 break; 5451 5377 5452 5378 default: 5453 - if (scsi_debug_dix) 5454 - host_prot |= SHOST_DIX_TYPE0_PROTECTION; 5379 + if (sdebug_dix) 5380 + hprot |= SHOST_DIX_TYPE0_PROTECTION; 5455 5381 break; 5456 5382 } 5457 5383 5458 - scsi_host_set_prot(hpnt, host_prot); 5384 + scsi_host_set_prot(hpnt, hprot); 5459 5385 5460 - pr_info("host protection%s%s%s%s%s%s%s\n", 5461 - (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5462 - (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5463 - (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5464 - (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5465 - (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5466 - (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5467 - (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5386 + if (have_dif_prot || sdebug_dix) 5387 + pr_info("host protection%s%s%s%s%s%s%s\n", 5388 + (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5389 + (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5390 + (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5391 + (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5392 + (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5393 + (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5394 + (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5468 5395 5469 - if (scsi_debug_guard == 1) 5396 + if (sdebug_guard == 1) 5470 5397 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP); 5471 5398 else 5472 5399 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC); 5473 5400 5474 - opts = scsi_debug_opts; 5475 - if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) 5476 - sdebug_any_injecting_opt = true; 5477 - else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) 5478 - sdebug_any_injecting_opt = true; 5479 - else if (SCSI_DEBUG_OPT_DIF_ERR & opts) 5480 - sdebug_any_injecting_opt = true; 5481 - else if (SCSI_DEBUG_OPT_DIX_ERR & opts) 5482 - sdebug_any_injecting_opt = true; 5483 - else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) 5484 - sdebug_any_injecting_opt = true; 5485 - 5401 + sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts); 5402 + sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts); 5403 + if (sdebug_every_nth) /* need stats counters for every_nth */ 5404 + sdebug_statistics = true; 5486 5405 error = scsi_add_host(hpnt, &sdbg_host->dev); 5487 5406 if (error) { 5488 5407 pr_err("scsi_add_host failed\n");
+36 -152
drivers/scsi/scsi_lib.c
··· 14 14 #include <linux/completion.h> 15 15 #include <linux/kernel.h> 16 16 #include <linux/export.h> 17 - #include <linux/mempool.h> 18 - #include <linux/slab.h> 19 17 #include <linux/init.h> 20 18 #include <linux/pci.h> 21 19 #include <linux/delay.h> ··· 37 39 #include "scsi_priv.h" 38 40 #include "scsi_logging.h" 39 41 40 - 41 - #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 42 - #define SG_MEMPOOL_SIZE 2 43 - 44 - struct scsi_host_sg_pool { 45 - size_t size; 46 - char *name; 47 - struct kmem_cache *slab; 48 - mempool_t *pool; 49 - }; 50 - 51 - #define SP(x) { .size = x, "sgpool-" __stringify(x) } 52 - #if (SCSI_MAX_SG_SEGMENTS < 32) 53 - #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 54 - #endif 55 - static struct scsi_host_sg_pool scsi_sg_pools[] = { 56 - SP(8), 57 - SP(16), 58 - #if (SCSI_MAX_SG_SEGMENTS > 32) 59 - SP(32), 60 - #if (SCSI_MAX_SG_SEGMENTS > 64) 61 - SP(64), 62 - #if (SCSI_MAX_SG_SEGMENTS > 128) 63 - SP(128), 64 - #if (SCSI_MAX_SG_SEGMENTS > 256) 65 - #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 66 - #endif 67 - #endif 68 - #endif 69 - #endif 70 - SP(SCSI_MAX_SG_SEGMENTS) 71 - }; 72 - #undef SP 73 42 74 43 struct kmem_cache *scsi_sdb_cache; 75 44 ··· 518 553 scsi_run_queue(sdev->request_queue); 519 554 } 520 555 521 - static inline unsigned int scsi_sgtable_index(unsigned short nents) 522 - { 523 - unsigned int index; 524 - 525 - BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 526 - 527 - if (nents <= 8) 528 - index = 0; 529 - else 530 - index = get_count_order(nents) - 3; 531 - 532 - return index; 533 - } 534 - 535 - static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 536 - { 537 - struct scsi_host_sg_pool *sgp; 538 - 539 - sgp = scsi_sg_pools + scsi_sgtable_index(nents); 540 - mempool_free(sgl, sgp->pool); 541 - } 542 - 543 - static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 544 - { 545 - struct scsi_host_sg_pool *sgp; 546 - 547 - sgp = scsi_sg_pools + scsi_sgtable_index(nents); 548 - return mempool_alloc(sgp->pool, gfp_mask); 549 - } 550 - 551 - static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) 552 - { 553 - if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS) 554 - return; 555 - __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 556 - } 557 - 558 - static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) 559 - { 560 - struct scatterlist *first_chunk = NULL; 561 - int ret; 562 - 563 - BUG_ON(!nents); 564 - 565 - if (mq) { 566 - if (nents <= SCSI_MAX_SG_SEGMENTS) { 567 - sdb->table.nents = sdb->table.orig_nents = nents; 568 - sg_init_table(sdb->table.sgl, nents); 569 - return 0; 570 - } 571 - first_chunk = sdb->table.sgl; 572 - } 573 - 574 - ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 575 - first_chunk, GFP_ATOMIC, scsi_sg_alloc); 576 - if (unlikely(ret)) 577 - scsi_free_sgtable(sdb, mq); 578 - return ret; 579 - } 580 - 581 556 static void scsi_uninit_cmd(struct scsi_cmnd *cmd) 582 557 { 583 558 if (cmd->request->cmd_type == REQ_TYPE_FS) { ··· 530 625 531 626 static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) 532 627 { 628 + struct scsi_data_buffer *sdb; 629 + 533 630 if (cmd->sdb.table.nents) 534 - scsi_free_sgtable(&cmd->sdb, true); 535 - if (cmd->request->next_rq && cmd->request->next_rq->special) 536 - scsi_free_sgtable(cmd->request->next_rq->special, true); 631 + sg_free_table_chained(&cmd->sdb.table, true); 632 + if (cmd->request->next_rq) { 633 + sdb = cmd->request->next_rq->special; 634 + if (sdb) 635 + sg_free_table_chained(&sdb->table, true); 636 + } 537 637 if (scsi_prot_sg_count(cmd)) 538 - scsi_free_sgtable(cmd->prot_sdb, true); 638 + sg_free_table_chained(&cmd->prot_sdb->table, true); 539 639 } 540 640 541 641 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) ··· 579 669 static void scsi_release_buffers(struct scsi_cmnd *cmd) 580 670 { 581 671 if (cmd->sdb.table.nents) 582 - scsi_free_sgtable(&cmd->sdb, false); 672 + sg_free_table_chained(&cmd->sdb.table, false); 583 673 584 674 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 585 675 586 676 if (scsi_prot_sg_count(cmd)) 587 - scsi_free_sgtable(cmd->prot_sdb, false); 677 + sg_free_table_chained(&cmd->prot_sdb->table, false); 588 678 } 589 679 590 680 static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) 591 681 { 592 682 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; 593 683 594 - scsi_free_sgtable(bidi_sdb, false); 684 + sg_free_table_chained(&bidi_sdb->table, false); 595 685 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 596 686 cmd->request->next_rq->special = NULL; 597 687 } ··· 995 1085 /* 996 1086 * If sg table allocation fails, requeue request later. 997 1087 */ 998 - if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 999 - req->mq_ctx != NULL))) 1088 + if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments, 1089 + sdb->table.sgl))) 1000 1090 return BLKPREP_DEFER; 1001 1091 1002 1092 /* ··· 1068 1158 1069 1159 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1070 1160 1071 - if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { 1161 + if (sg_alloc_table_chained(&prot_sdb->table, ivecs, 1162 + prot_sdb->table.sgl)) { 1072 1163 error = BLKPREP_DEFER; 1073 1164 goto err_exit; 1074 1165 } ··· 1843 1932 if (scsi_host_get_prot(shost)) { 1844 1933 cmd->prot_sdb = (void *)sg + 1845 1934 min_t(unsigned int, 1846 - shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) * 1935 + shost->sg_tablesize, SG_CHUNK_SIZE) * 1847 1936 sizeof(struct scatterlist); 1848 1937 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); 1849 1938 ··· 2016 2105 * this limit is imposed by hardware restrictions 2017 2106 */ 2018 2107 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 2019 - SCSI_MAX_SG_CHAIN_SEGMENTS)); 2108 + SG_MAX_SEGMENTS)); 2020 2109 2021 2110 if (scsi_host_prot_dma(shost)) { 2022 2111 shost->sg_prot_tablesize = ··· 2098 2187 unsigned int cmd_size, sgl_size, tbl_size; 2099 2188 2100 2189 tbl_size = shost->sg_tablesize; 2101 - if (tbl_size > SCSI_MAX_SG_SEGMENTS) 2102 - tbl_size = SCSI_MAX_SG_SEGMENTS; 2190 + if (tbl_size > SG_CHUNK_SIZE) 2191 + tbl_size = SG_CHUNK_SIZE; 2103 2192 sgl_size = tbl_size * sizeof(struct scatterlist); 2104 2193 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; 2105 2194 if (scsi_host_get_prot(shost)) ··· 2175 2264 2176 2265 int __init scsi_init_queue(void) 2177 2266 { 2178 - int i; 2179 - 2180 2267 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 2181 2268 sizeof(struct scsi_data_buffer), 2182 2269 0, 0, NULL); ··· 2183 2274 return -ENOMEM; 2184 2275 } 2185 2276 2186 - for (i = 0; i < SG_MEMPOOL_NR; i++) { 2187 - struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2188 - int size = sgp->size * sizeof(struct scatterlist); 2189 - 2190 - sgp->slab = kmem_cache_create(sgp->name, size, 0, 2191 - SLAB_HWCACHE_ALIGN, NULL); 2192 - if (!sgp->slab) { 2193 - printk(KERN_ERR "SCSI: can't init sg slab %s\n", 2194 - sgp->name); 2195 - goto cleanup_sdb; 2196 - } 2197 - 2198 - sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 2199 - sgp->slab); 2200 - if (!sgp->pool) { 2201 - printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 2202 - sgp->name); 2203 - goto cleanup_sdb; 2204 - } 2205 - } 2206 - 2207 2277 return 0; 2208 - 2209 - cleanup_sdb: 2210 - for (i = 0; i < SG_MEMPOOL_NR; i++) { 2211 - struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2212 - if (sgp->pool) 2213 - mempool_destroy(sgp->pool); 2214 - if (sgp->slab) 2215 - kmem_cache_destroy(sgp->slab); 2216 - } 2217 - kmem_cache_destroy(scsi_sdb_cache); 2218 - 2219 - return -ENOMEM; 2220 2278 } 2221 2279 2222 2280 void scsi_exit_queue(void) 2223 2281 { 2224 - int i; 2225 - 2226 2282 kmem_cache_destroy(scsi_sdb_cache); 2227 - 2228 - for (i = 0; i < SG_MEMPOOL_NR; i++) { 2229 - struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2230 - mempool_destroy(sgp->pool); 2231 - kmem_cache_destroy(sgp->slab); 2232 - } 2233 2283 } 2234 2284 2235 2285 /** ··· 3064 3196 * - EUI-64 based 12-byte 3065 3197 * - NAA IEEE Registered 3066 3198 * - NAA IEEE Extended 3199 + * - T10 Vendor ID 3067 3200 * as longer descriptors reduce the likelyhood 3068 3201 * of identification clashes. 3069 3202 */ ··· 3083 3214 goto next_desig; 3084 3215 3085 3216 switch (d[1] & 0xf) { 3217 + case 0x1: 3218 + /* T10 Vendor ID */ 3219 + if (cur_id_size > d[3]) 3220 + break; 3221 + /* Prefer anything */ 3222 + if (cur_id_type > 0x01 && cur_id_type != 0xff) 3223 + break; 3224 + cur_id_size = d[3]; 3225 + if (cur_id_size + 4 > id_len) 3226 + cur_id_size = id_len - 4; 3227 + cur_id_str = d + 4; 3228 + cur_id_type = d[1] & 0xf; 3229 + id_size = snprintf(id, id_len, "t10.%*pE", 3230 + cur_id_size, cur_id_str); 3231 + break; 3086 3232 case 0x2: 3087 3233 /* EUI-64 */ 3088 3234 if (cur_id_size > d[3])
+1 -1
drivers/scsi/scsi_priv.h
··· 116 116 extern char scsi_scan_type[]; 117 117 extern int scsi_complete_async_scans(void); 118 118 extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, 119 - unsigned int, u64, int); 119 + unsigned int, u64, enum scsi_scan_mode); 120 120 extern void scsi_forget_host(struct Scsi_Host *); 121 121 extern void scsi_rescan_device(struct device *); 122 122
+2 -1
drivers/scsi/scsi_proc.c
··· 251 251 if (shost->transportt->user_scan) 252 252 error = shost->transportt->user_scan(shost, channel, id, lun); 253 253 else 254 - error = scsi_scan_host_selected(shost, channel, id, lun, 1); 254 + error = scsi_scan_host_selected(shost, channel, id, lun, 255 + SCSI_SCAN_MANUAL); 255 256 scsi_host_put(shost); 256 257 return error; 257 258 }
+31 -14
drivers/scsi/scsi_scan.c
··· 96 96 #define SCSI_SCAN_TYPE_DEFAULT "sync" 97 97 #endif 98 98 99 - char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; 99 + char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT; 100 100 101 - module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); 102 - MODULE_PARM_DESC(scan, "sync, async or none"); 101 + module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), 102 + S_IRUGO|S_IWUSR); 103 + MODULE_PARM_DESC(scan, "sync, async, manual, or none. " 104 + "Setting to 'manual' disables automatic scanning, but allows " 105 + "for manual device scan via the 'scan' sysfs attribute."); 103 106 104 107 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; 105 108 ··· 319 316 struct Scsi_Host *shost = dev_to_shost(dev->parent); 320 317 unsigned long flags; 321 318 319 + BUG_ON(starget->state == STARGET_DEL); 322 320 starget->state = STARGET_DEL; 323 321 transport_destroy_device(dev); 324 322 spin_lock_irqsave(shost->host_lock, flags); ··· 1044 1040 * @lun: LUN of target device 1045 1041 * @bflagsp: store bflags here if not NULL 1046 1042 * @sdevp: probe the LUN corresponding to this scsi_device 1047 - * @rescan: if nonzero skip some code only needed on first scan 1043 + * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only 1044 + * needed on first scan 1048 1045 * @hostdata: passed to scsi_alloc_sdev() 1049 1046 * 1050 1047 * Description: ··· 1060 1055 **/ 1061 1056 static int scsi_probe_and_add_lun(struct scsi_target *starget, 1062 1057 u64 lun, int *bflagsp, 1063 - struct scsi_device **sdevp, int rescan, 1058 + struct scsi_device **sdevp, 1059 + enum scsi_scan_mode rescan, 1064 1060 void *hostdata) 1065 1061 { 1066 1062 struct scsi_device *sdev; ··· 1075 1069 */ 1076 1070 sdev = scsi_device_lookup_by_target(starget, lun); 1077 1071 if (sdev) { 1078 - if (rescan || !scsi_device_created(sdev)) { 1072 + if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) { 1079 1073 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, 1080 1074 "scsi scan: device exists on %s\n", 1081 1075 dev_name(&sdev->sdev_gendev))); ··· 1211 1205 * Modifies sdevscan->lun. 1212 1206 **/ 1213 1207 static void scsi_sequential_lun_scan(struct scsi_target *starget, 1214 - int bflags, int scsi_level, int rescan) 1208 + int bflags, int scsi_level, 1209 + enum scsi_scan_mode rescan) 1215 1210 { 1216 1211 uint max_dev_lun; 1217 1212 u64 sparse_lun, lun; ··· 1307 1300 * 1: could not scan with REPORT LUN 1308 1301 **/ 1309 1302 static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, 1310 - int rescan) 1303 + enum scsi_scan_mode rescan) 1311 1304 { 1312 1305 char devname[64]; 1313 1306 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; ··· 1553 1546 EXPORT_SYMBOL(scsi_rescan_device); 1554 1547 1555 1548 static void __scsi_scan_target(struct device *parent, unsigned int channel, 1556 - unsigned int id, u64 lun, int rescan) 1549 + unsigned int id, u64 lun, enum scsi_scan_mode rescan) 1557 1550 { 1558 1551 struct Scsi_Host *shost = dev_to_shost(parent); 1559 1552 int bflags = 0; ··· 1611 1604 * @channel: channel to scan 1612 1605 * @id: target id to scan 1613 1606 * @lun: Specific LUN to scan or SCAN_WILD_CARD 1614 - * @rescan: passed to LUN scanning routines 1607 + * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for 1608 + * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs, 1609 + * and SCSI_SCAN_MANUAL to force scanning even if 1610 + * 'scan=manual' is set. 1615 1611 * 1616 1612 * Description: 1617 1613 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, ··· 1624 1614 * sequential scan of LUNs on the target id. 1625 1615 **/ 1626 1616 void scsi_scan_target(struct device *parent, unsigned int channel, 1627 - unsigned int id, u64 lun, int rescan) 1617 + unsigned int id, u64 lun, enum scsi_scan_mode rescan) 1628 1618 { 1629 1619 struct Scsi_Host *shost = dev_to_shost(parent); 1630 1620 1631 1621 if (strncmp(scsi_scan_type, "none", 4) == 0) 1622 + return; 1623 + 1624 + if (rescan != SCSI_SCAN_MANUAL && 1625 + strncmp(scsi_scan_type, "manual", 6) == 0) 1632 1626 return; 1633 1627 1634 1628 mutex_lock(&shost->scan_mutex); ··· 1648 1634 EXPORT_SYMBOL(scsi_scan_target); 1649 1635 1650 1636 static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, 1651 - unsigned int id, u64 lun, int rescan) 1637 + unsigned int id, u64 lun, 1638 + enum scsi_scan_mode rescan) 1652 1639 { 1653 1640 uint order_id; 1654 1641 ··· 1680 1665 } 1681 1666 1682 1667 int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, 1683 - unsigned int id, u64 lun, int rescan) 1668 + unsigned int id, u64 lun, 1669 + enum scsi_scan_mode rescan) 1684 1670 { 1685 1671 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, 1686 1672 "%s: <%u:%u:%llu>\n", ··· 1860 1844 { 1861 1845 struct async_scan_data *data; 1862 1846 1863 - if (strncmp(scsi_scan_type, "none", 4) == 0) 1847 + if (strncmp(scsi_scan_type, "none", 4) == 0 || 1848 + strncmp(scsi_scan_type, "manual", 6) == 0) 1864 1849 return; 1865 1850 if (scsi_autopm_get_host(shost) < 0) 1866 1851 return;
+5 -4
drivers/scsi/scsi_sysfs.c
··· 145 145 if (shost->transportt->user_scan) 146 146 res = shost->transportt->user_scan(shost, channel, id, lun); 147 147 else 148 - res = scsi_scan_host_selected(shost, channel, id, lun, 1); 148 + res = scsi_scan_host_selected(shost, channel, id, lun, 149 + SCSI_SCAN_MANUAL); 149 150 return res; 150 151 } 151 152 ··· 1367 1366 void scsi_remove_target(struct device *dev) 1368 1367 { 1369 1368 struct Scsi_Host *shost = dev_to_shost(dev->parent); 1370 - struct scsi_target *starget, *last_target = NULL; 1369 + struct scsi_target *starget; 1371 1370 unsigned long flags; 1372 1371 1373 1372 restart: 1374 1373 spin_lock_irqsave(shost->host_lock, flags); 1375 1374 list_for_each_entry(starget, &shost->__targets, siblings) { 1376 1375 if (starget->state == STARGET_DEL || 1377 - starget == last_target) 1376 + starget->state == STARGET_REMOVE) 1378 1377 continue; 1379 1378 if (starget->dev.parent == dev || &starget->dev == dev) { 1380 1379 kref_get(&starget->reap_ref); 1381 - last_target = starget; 1380 + starget->state = STARGET_REMOVE; 1382 1381 spin_unlock_irqrestore(shost->host_lock, flags); 1383 1382 __scsi_remove_target(starget); 1384 1383 scsi_target_reap(starget);
+161
drivers/scsi/scsi_trace.c
··· 17 17 */ 18 18 #include <linux/kernel.h> 19 19 #include <linux/trace_seq.h> 20 + #include <asm/unaligned.h> 20 21 #include <trace/events/scsi.h> 21 22 22 23 #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) ··· 232 231 } 233 232 234 233 static const char * 234 + scsi_trace_maintenance_in(struct trace_seq *p, unsigned char *cdb, int len) 235 + { 236 + const char *ret = trace_seq_buffer_ptr(p), *cmd; 237 + u32 alloc_len; 238 + 239 + switch (SERVICE_ACTION16(cdb)) { 240 + case MI_REPORT_IDENTIFYING_INFORMATION: 241 + cmd = "REPORT_IDENTIFYING_INFORMATION"; 242 + break; 243 + case MI_REPORT_TARGET_PGS: 244 + cmd = "REPORT_TARGET_PORT_GROUPS"; 245 + break; 246 + case MI_REPORT_ALIASES: 247 + cmd = "REPORT_ALIASES"; 248 + break; 249 + case MI_REPORT_SUPPORTED_OPERATION_CODES: 250 + cmd = "REPORT_SUPPORTED_OPERATION_CODES"; 251 + break; 252 + case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS: 253 + cmd = "REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS"; 254 + break; 255 + case MI_REPORT_PRIORITY: 256 + cmd = "REPORT_PRIORITY"; 257 + break; 258 + case MI_REPORT_TIMESTAMP: 259 + cmd = "REPORT_TIMESTAMP"; 260 + break; 261 + case MI_MANAGEMENT_PROTOCOL_IN: 262 + cmd = "MANAGEMENT_PROTOCOL_IN"; 263 + break; 264 + default: 265 + trace_seq_puts(p, "UNKNOWN"); 266 + goto out; 267 + } 268 + 269 + alloc_len = get_unaligned_be32(&cdb[6]); 270 + 271 + trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len); 272 + 273 + out: 274 + trace_seq_putc(p, 0); 275 + 276 + return ret; 277 + } 278 + 279 + static const char * 280 + scsi_trace_maintenance_out(struct trace_seq *p, unsigned char *cdb, int len) 281 + { 282 + const char *ret = trace_seq_buffer_ptr(p), *cmd; 283 + u32 alloc_len; 284 + 285 + switch (SERVICE_ACTION16(cdb)) { 286 + case MO_SET_IDENTIFYING_INFORMATION: 287 + cmd = "SET_IDENTIFYING_INFORMATION"; 288 + break; 289 + case MO_SET_TARGET_PGS: 290 + cmd = "SET_TARGET_PORT_GROUPS"; 291 + break; 292 + case MO_CHANGE_ALIASES: 293 + cmd = "CHANGE_ALIASES"; 294 + break; 295 + case MO_SET_PRIORITY: 296 + cmd = "SET_PRIORITY"; 297 + break; 298 + case MO_SET_TIMESTAMP: 299 + cmd = "SET_TIMESTAMP"; 300 + break; 301 + case MO_MANAGEMENT_PROTOCOL_OUT: 302 + cmd = "MANAGEMENT_PROTOCOL_OUT"; 303 + break; 304 + default: 305 + trace_seq_puts(p, "UNKNOWN"); 306 + goto out; 307 + } 308 + 309 + alloc_len = get_unaligned_be32(&cdb[6]); 310 + 311 + trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len); 312 + 313 + out: 314 + trace_seq_putc(p, 0); 315 + 316 + return ret; 317 + } 318 + 319 + static const char * 320 + scsi_trace_zbc_in(struct trace_seq *p, unsigned char *cdb, int len) 321 + { 322 + const char *ret = trace_seq_buffer_ptr(p), *cmd; 323 + u64 zone_id; 324 + u32 alloc_len; 325 + u8 options; 326 + 327 + switch (SERVICE_ACTION16(cdb)) { 328 + case ZI_REPORT_ZONES: 329 + cmd = "REPORT_ZONES"; 330 + break; 331 + default: 332 + trace_seq_puts(p, "UNKNOWN"); 333 + goto out; 334 + } 335 + 336 + zone_id = get_unaligned_be64(&cdb[2]); 337 + alloc_len = get_unaligned_be32(&cdb[10]); 338 + options = cdb[14] & 0x3f; 339 + 340 + trace_seq_printf(p, "%s zone=%llu alloc_len=%u options=%u partial=%u", 341 + cmd, (unsigned long long)zone_id, alloc_len, 342 + options, (cdb[14] >> 7) & 1); 343 + 344 + out: 345 + trace_seq_putc(p, 0); 346 + 347 + return ret; 348 + } 349 + 350 + static const char * 351 + scsi_trace_zbc_out(struct trace_seq *p, unsigned char *cdb, int len) 352 + { 353 + const char *ret = trace_seq_buffer_ptr(p), *cmd; 354 + u64 zone_id; 355 + 356 + switch (SERVICE_ACTION16(cdb)) { 357 + case ZO_CLOSE_ZONE: 358 + cmd = "CLOSE_ZONE"; 359 + break; 360 + case ZO_FINISH_ZONE: 361 + cmd = "FINISH_ZONE"; 362 + break; 363 + case ZO_OPEN_ZONE: 364 + cmd = "OPEN_ZONE"; 365 + break; 366 + case ZO_RESET_WRITE_POINTER: 367 + cmd = "RESET_WRITE_POINTER"; 368 + break; 369 + default: 370 + trace_seq_puts(p, "UNKNOWN"); 371 + goto out; 372 + } 373 + 374 + zone_id = get_unaligned_be64(&cdb[2]); 375 + 376 + trace_seq_printf(p, "%s zone=%llu all=%u", cmd, 377 + (unsigned long long)zone_id, cdb[14] & 1); 378 + 379 + out: 380 + trace_seq_putc(p, 0); 381 + 382 + return ret; 383 + } 384 + 385 + static const char * 235 386 scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) 236 387 { 237 388 switch (SERVICE_ACTION32(cdb)) { ··· 435 282 return scsi_trace_service_action_in(p, cdb, len); 436 283 case VARIABLE_LENGTH_CMD: 437 284 return scsi_trace_varlen(p, cdb, len); 285 + case MAINTENANCE_IN: 286 + return scsi_trace_maintenance_in(p, cdb, len); 287 + case MAINTENANCE_OUT: 288 + return scsi_trace_maintenance_out(p, cdb, len); 289 + case ZBC_IN: 290 + return scsi_trace_zbc_in(p, cdb, len); 291 + case ZBC_OUT: 292 + return scsi_trace_zbc_out(p, cdb, len); 438 293 default: 439 294 return scsi_trace_misc(p, cdb, len); 440 295 }
+5 -4
drivers/scsi/scsi_transport_fc.c
··· 2027 2027 kfree(vport); 2028 2028 } 2029 2029 2030 - int scsi_is_fc_vport(const struct device *dev) 2030 + static int scsi_is_fc_vport(const struct device *dev) 2031 2031 { 2032 2032 return dev->release == fc_vport_dev_release; 2033 2033 } 2034 - EXPORT_SYMBOL(scsi_is_fc_vport); 2035 2034 2036 2035 static int fc_vport_match(struct attribute_container *cont, 2037 2036 struct device *dev) ··· 2109 2110 if ((channel == rport->channel) && 2110 2111 (id == rport->scsi_target_id)) { 2111 2112 spin_unlock_irqrestore(shost->host_lock, flags); 2112 - scsi_scan_target(&rport->dev, channel, id, lun, 1); 2113 + scsi_scan_target(&rport->dev, channel, id, lun, 2114 + SCSI_SCAN_MANUAL); 2113 2115 return; 2114 2116 } 2115 2117 } ··· 3277 3277 (rport->roles & FC_PORT_ROLE_FCP_TARGET) && 3278 3278 !(i->f->disable_target_scan)) { 3279 3279 scsi_scan_target(&rport->dev, rport->channel, 3280 - rport->scsi_target_id, SCAN_WILD_CARD, 1); 3280 + rport->scsi_target_id, SCAN_WILD_CARD, 3281 + SCSI_SCAN_RESCAN); 3281 3282 } 3282 3283 3283 3284 spin_lock_irqsave(shost->host_lock, flags);
+9 -10
drivers/scsi/scsi_transport_iscsi.c
··· 1009 1009 kfree(fnode_sess); 1010 1010 } 1011 1011 1012 - struct device_type iscsi_flashnode_sess_dev_type = { 1012 + static struct device_type iscsi_flashnode_sess_dev_type = { 1013 1013 .name = "iscsi_flashnode_sess_dev_type", 1014 1014 .groups = iscsi_flashnode_sess_attr_groups, 1015 1015 .release = iscsi_flashnode_sess_release, ··· 1195 1195 kfree(fnode_conn); 1196 1196 } 1197 1197 1198 - struct device_type iscsi_flashnode_conn_dev_type = { 1198 + static struct device_type iscsi_flashnode_conn_dev_type = { 1199 1199 .name = "iscsi_flashnode_conn_dev_type", 1200 1200 .groups = iscsi_flashnode_conn_attr_groups, 1201 1201 .release = iscsi_flashnode_conn_release, 1202 1202 }; 1203 1203 1204 - struct bus_type iscsi_flashnode_bus; 1204 + static struct bus_type iscsi_flashnode_bus; 1205 1205 1206 1206 int iscsi_flashnode_bus_match(struct device *dev, 1207 1207 struct device_driver *drv) ··· 1212 1212 } 1213 1213 EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match); 1214 1214 1215 - struct bus_type iscsi_flashnode_bus = { 1215 + static struct bus_type iscsi_flashnode_bus = { 1216 1216 .name = "iscsi_flashnode", 1217 1217 .match = &iscsi_flashnode_bus_match, 1218 1218 }; ··· 1324 1324 * 1 on success 1325 1325 * 0 on failure 1326 1326 */ 1327 - int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) 1327 + static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) 1328 1328 { 1329 1329 return dev->bus == &iscsi_flashnode_bus; 1330 1330 } 1331 - EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev); 1332 1331 1333 1332 static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) 1334 1333 { ··· 1782 1783 unsigned int channel; 1783 1784 unsigned int id; 1784 1785 u64 lun; 1786 + enum scsi_scan_mode rescan; 1785 1787 }; 1786 1788 1787 1789 static int iscsi_user_scan_session(struct device *dev, void *data) ··· 1819 1819 (scan_data->id == SCAN_WILD_CARD || 1820 1820 scan_data->id == id)) 1821 1821 scsi_scan_target(&session->dev, 0, id, 1822 - scan_data->lun, 1); 1822 + scan_data->lun, scan_data->rescan); 1823 1823 } 1824 1824 1825 1825 user_scan_exit: ··· 1836 1836 scan_data.channel = channel; 1837 1837 scan_data.id = id; 1838 1838 scan_data.lun = lun; 1839 + scan_data.rescan = SCSI_SCAN_MANUAL; 1839 1840 1840 1841 return device_for_each_child(&shost->shost_gendev, &scan_data, 1841 1842 iscsi_user_scan_session); ··· 1853 1852 scan_data.channel = 0; 1854 1853 scan_data.id = SCAN_WILD_CARD; 1855 1854 scan_data.lun = SCAN_WILD_CARD; 1855 + scan_data.rescan = SCSI_SCAN_RESCAN; 1856 1856 1857 1857 iscsi_user_scan_session(&session->dev, &scan_data); 1858 1858 atomic_dec(&ihost->nr_scans); ··· 2069 2067 2070 2068 int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) 2071 2069 { 2072 - struct Scsi_Host *shost = iscsi_session_to_shost(session); 2073 - struct iscsi_cls_host *ihost; 2074 2070 unsigned long flags; 2075 2071 int id = 0; 2076 2072 int err; 2077 2073 2078 - ihost = shost->shost_data; 2079 2074 session->sid = atomic_add_return(1, &iscsi_session_nr); 2080 2075 2081 2076 if (target_id == ISCSI_MAX_TARGET) {
+4 -3
drivers/scsi/scsi_transport_sas.c
··· 1614 1614 else 1615 1615 lun = 0; 1616 1616 1617 - scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 0); 1617 + scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 1618 + SCSI_SCAN_INITIAL); 1618 1619 } 1619 1620 1620 1621 return 0; ··· 1740 1739 1741 1740 if ((channel == SCAN_WILD_CARD || channel == 0) && 1742 1741 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { 1743 - scsi_scan_target(&rphy->dev, 0, 1744 - rphy->scsi_target_id, lun, 1); 1742 + scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, 1743 + lun, SCSI_SCAN_MANUAL); 1745 1744 } 1746 1745 } 1747 1746 mutex_unlock(&sas_host->lock);
+826
drivers/scsi/sense_codes.h
··· 1 + /* 2 + * The canonical list of T10 Additional Sense Codes is available at: 3 + * http://www.t10.org/lists/asc-num.txt [most recent: 20141221] 4 + */ 5 + 6 + SENSE_CODE(0x0000, "No additional sense information") 7 + SENSE_CODE(0x0001, "Filemark detected") 8 + SENSE_CODE(0x0002, "End-of-partition/medium detected") 9 + SENSE_CODE(0x0003, "Setmark detected") 10 + SENSE_CODE(0x0004, "Beginning-of-partition/medium detected") 11 + SENSE_CODE(0x0005, "End-of-data detected") 12 + SENSE_CODE(0x0006, "I/O process terminated") 13 + SENSE_CODE(0x0007, "Programmable early warning detected") 14 + SENSE_CODE(0x0011, "Audio play operation in progress") 15 + SENSE_CODE(0x0012, "Audio play operation paused") 16 + SENSE_CODE(0x0013, "Audio play operation successfully completed") 17 + SENSE_CODE(0x0014, "Audio play operation stopped due to error") 18 + SENSE_CODE(0x0015, "No current audio status to return") 19 + SENSE_CODE(0x0016, "Operation in progress") 20 + SENSE_CODE(0x0017, "Cleaning requested") 21 + SENSE_CODE(0x0018, "Erase operation in progress") 22 + SENSE_CODE(0x0019, "Locate operation in progress") 23 + SENSE_CODE(0x001A, "Rewind operation in progress") 24 + SENSE_CODE(0x001B, "Set capacity operation in progress") 25 + SENSE_CODE(0x001C, "Verify operation in progress") 26 + SENSE_CODE(0x001D, "ATA pass through information available") 27 + SENSE_CODE(0x001E, "Conflicting SA creation request") 28 + SENSE_CODE(0x001F, "Logical unit transitioning to another power condition") 29 + SENSE_CODE(0x0020, "Extended copy information available") 30 + SENSE_CODE(0x0021, "Atomic command aborted due to ACA") 31 + 32 + SENSE_CODE(0x0100, "No index/sector signal") 33 + 34 + SENSE_CODE(0x0200, "No seek complete") 35 + 36 + SENSE_CODE(0x0300, "Peripheral device write fault") 37 + SENSE_CODE(0x0301, "No write current") 38 + SENSE_CODE(0x0302, "Excessive write errors") 39 + 40 + SENSE_CODE(0x0400, "Logical unit not ready, cause not reportable") 41 + SENSE_CODE(0x0401, "Logical unit is in process of becoming ready") 42 + SENSE_CODE(0x0402, "Logical unit not ready, initializing command required") 43 + SENSE_CODE(0x0403, "Logical unit not ready, manual intervention required") 44 + SENSE_CODE(0x0404, "Logical unit not ready, format in progress") 45 + SENSE_CODE(0x0405, "Logical unit not ready, rebuild in progress") 46 + SENSE_CODE(0x0406, "Logical unit not ready, recalculation in progress") 47 + SENSE_CODE(0x0407, "Logical unit not ready, operation in progress") 48 + SENSE_CODE(0x0408, "Logical unit not ready, long write in progress") 49 + SENSE_CODE(0x0409, "Logical unit not ready, self-test in progress") 50 + SENSE_CODE(0x040A, "Logical unit not accessible, asymmetric access state transition") 51 + SENSE_CODE(0x040B, "Logical unit not accessible, target port in standby state") 52 + SENSE_CODE(0x040C, "Logical unit not accessible, target port in unavailable state") 53 + SENSE_CODE(0x040D, "Logical unit not ready, structure check required") 54 + SENSE_CODE(0x040E, "Logical unit not ready, security session in progress") 55 + SENSE_CODE(0x0410, "Logical unit not ready, auxiliary memory not accessible") 56 + SENSE_CODE(0x0411, "Logical unit not ready, notify (enable spinup) required") 57 + SENSE_CODE(0x0412, "Logical unit not ready, offline") 58 + SENSE_CODE(0x0413, "Logical unit not ready, SA creation in progress") 59 + SENSE_CODE(0x0414, "Logical unit not ready, space allocation in progress") 60 + SENSE_CODE(0x0415, "Logical unit not ready, robotics disabled") 61 + SENSE_CODE(0x0416, "Logical unit not ready, configuration required") 62 + SENSE_CODE(0x0417, "Logical unit not ready, calibration required") 63 + SENSE_CODE(0x0418, "Logical unit not ready, a door is open") 64 + SENSE_CODE(0x0419, "Logical unit not ready, operating in sequential mode") 65 + SENSE_CODE(0x041A, "Logical unit not ready, start stop unit command in progress") 66 + SENSE_CODE(0x041B, "Logical unit not ready, sanitize in progress") 67 + SENSE_CODE(0x041C, "Logical unit not ready, additional power use not yet granted") 68 + SENSE_CODE(0x041D, "Logical unit not ready, configuration in progress") 69 + SENSE_CODE(0x041E, "Logical unit not ready, microcode activation required") 70 + SENSE_CODE(0x041F, "Logical unit not ready, microcode download required") 71 + SENSE_CODE(0x0420, "Logical unit not ready, logical unit reset required") 72 + SENSE_CODE(0x0421, "Logical unit not ready, hard reset required") 73 + SENSE_CODE(0x0422, "Logical unit not ready, power cycle required") 74 + 75 + SENSE_CODE(0x0500, "Logical unit does not respond to selection") 76 + 77 + SENSE_CODE(0x0600, "No reference position found") 78 + 79 + SENSE_CODE(0x0700, "Multiple peripheral devices selected") 80 + 81 + SENSE_CODE(0x0800, "Logical unit communication failure") 82 + SENSE_CODE(0x0801, "Logical unit communication time-out") 83 + SENSE_CODE(0x0802, "Logical unit communication parity error") 84 + SENSE_CODE(0x0803, "Logical unit communication CRC error (Ultra-DMA/32)") 85 + SENSE_CODE(0x0804, "Unreachable copy target") 86 + 87 + SENSE_CODE(0x0900, "Track following error") 88 + SENSE_CODE(0x0901, "Tracking servo failure") 89 + SENSE_CODE(0x0902, "Focus servo failure") 90 + SENSE_CODE(0x0903, "Spindle servo failure") 91 + SENSE_CODE(0x0904, "Head select fault") 92 + SENSE_CODE(0x0905, "Vibration induced tracking error") 93 + 94 + SENSE_CODE(0x0A00, "Error log overflow") 95 + 96 + SENSE_CODE(0x0B00, "Warning") 97 + SENSE_CODE(0x0B01, "Warning - specified temperature exceeded") 98 + SENSE_CODE(0x0B02, "Warning - enclosure degraded") 99 + SENSE_CODE(0x0B03, "Warning - background self-test failed") 100 + SENSE_CODE(0x0B04, "Warning - background pre-scan detected medium error") 101 + SENSE_CODE(0x0B05, "Warning - background medium scan detected medium error") 102 + SENSE_CODE(0x0B06, "Warning - non-volatile cache now volatile") 103 + SENSE_CODE(0x0B07, "Warning - degraded power to non-volatile cache") 104 + SENSE_CODE(0x0B08, "Warning - power loss expected") 105 + SENSE_CODE(0x0B09, "Warning - device statistics notification active") 106 + 107 + SENSE_CODE(0x0C00, "Write error") 108 + SENSE_CODE(0x0C01, "Write error - recovered with auto reallocation") 109 + SENSE_CODE(0x0C02, "Write error - auto reallocation failed") 110 + SENSE_CODE(0x0C03, "Write error - recommend reassignment") 111 + SENSE_CODE(0x0C04, "Compression check miscompare error") 112 + SENSE_CODE(0x0C05, "Data expansion occurred during compression") 113 + SENSE_CODE(0x0C06, "Block not compressible") 114 + SENSE_CODE(0x0C07, "Write error - recovery needed") 115 + SENSE_CODE(0x0C08, "Write error - recovery failed") 116 + SENSE_CODE(0x0C09, "Write error - loss of streaming") 117 + SENSE_CODE(0x0C0A, "Write error - padding blocks added") 118 + SENSE_CODE(0x0C0B, "Auxiliary memory write error") 119 + SENSE_CODE(0x0C0C, "Write error - unexpected unsolicited data") 120 + SENSE_CODE(0x0C0D, "Write error - not enough unsolicited data") 121 + SENSE_CODE(0x0C0E, "Multiple write errors") 122 + SENSE_CODE(0x0C0F, "Defects in error window") 123 + SENSE_CODE(0x0C10, "Incomplete multiple atomic write operations") 124 + 125 + SENSE_CODE(0x0D00, "Error detected by third party temporary initiator") 126 + SENSE_CODE(0x0D01, "Third party device failure") 127 + SENSE_CODE(0x0D02, "Copy target device not reachable") 128 + SENSE_CODE(0x0D03, "Incorrect copy target device type") 129 + SENSE_CODE(0x0D04, "Copy target device data underrun") 130 + SENSE_CODE(0x0D05, "Copy target device data overrun") 131 + 132 + SENSE_CODE(0x0E00, "Invalid information unit") 133 + SENSE_CODE(0x0E01, "Information unit too short") 134 + SENSE_CODE(0x0E02, "Information unit too long") 135 + SENSE_CODE(0x0E03, "Invalid field in command information unit") 136 + 137 + SENSE_CODE(0x1000, "Id CRC or ECC error") 138 + SENSE_CODE(0x1001, "Logical block guard check failed") 139 + SENSE_CODE(0x1002, "Logical block application tag check failed") 140 + SENSE_CODE(0x1003, "Logical block reference tag check failed") 141 + SENSE_CODE(0x1004, "Logical block protection error on recover buffered data") 142 + SENSE_CODE(0x1005, "Logical block protection method error") 143 + 144 + SENSE_CODE(0x1100, "Unrecovered read error") 145 + SENSE_CODE(0x1101, "Read retries exhausted") 146 + SENSE_CODE(0x1102, "Error too long to correct") 147 + SENSE_CODE(0x1103, "Multiple read errors") 148 + SENSE_CODE(0x1104, "Unrecovered read error - auto reallocate failed") 149 + SENSE_CODE(0x1105, "L-EC uncorrectable error") 150 + SENSE_CODE(0x1106, "CIRC unrecovered error") 151 + SENSE_CODE(0x1107, "Data re-synchronization error") 152 + SENSE_CODE(0x1108, "Incomplete block read") 153 + SENSE_CODE(0x1109, "No gap found") 154 + SENSE_CODE(0x110A, "Miscorrected error") 155 + SENSE_CODE(0x110B, "Unrecovered read error - recommend reassignment") 156 + SENSE_CODE(0x110C, "Unrecovered read error - recommend rewrite the data") 157 + SENSE_CODE(0x110D, "De-compression CRC error") 158 + SENSE_CODE(0x110E, "Cannot decompress using declared algorithm") 159 + SENSE_CODE(0x110F, "Error reading UPC/EAN number") 160 + SENSE_CODE(0x1110, "Error reading ISRC number") 161 + SENSE_CODE(0x1111, "Read error - loss of streaming") 162 + SENSE_CODE(0x1112, "Auxiliary memory read error") 163 + SENSE_CODE(0x1113, "Read error - failed retransmission request") 164 + SENSE_CODE(0x1114, "Read error - lba marked bad by application client") 165 + SENSE_CODE(0x1115, "Write after sanitize required") 166 + 167 + SENSE_CODE(0x1200, "Address mark not found for id field") 168 + 169 + SENSE_CODE(0x1300, "Address mark not found for data field") 170 + 171 + SENSE_CODE(0x1400, "Recorded entity not found") 172 + SENSE_CODE(0x1401, "Record not found") 173 + SENSE_CODE(0x1402, "Filemark or setmark not found") 174 + SENSE_CODE(0x1403, "End-of-data not found") 175 + SENSE_CODE(0x1404, "Block sequence error") 176 + SENSE_CODE(0x1405, "Record not found - recommend reassignment") 177 + SENSE_CODE(0x1406, "Record not found - data auto-reallocated") 178 + SENSE_CODE(0x1407, "Locate operation failure") 179 + 180 + SENSE_CODE(0x1500, "Random positioning error") 181 + SENSE_CODE(0x1501, "Mechanical positioning error") 182 + SENSE_CODE(0x1502, "Positioning error detected by read of medium") 183 + 184 + SENSE_CODE(0x1600, "Data synchronization mark error") 185 + SENSE_CODE(0x1601, "Data sync error - data rewritten") 186 + SENSE_CODE(0x1602, "Data sync error - recommend rewrite") 187 + SENSE_CODE(0x1603, "Data sync error - data auto-reallocated") 188 + SENSE_CODE(0x1604, "Data sync error - recommend reassignment") 189 + 190 + SENSE_CODE(0x1700, "Recovered data with no error correction applied") 191 + SENSE_CODE(0x1701, "Recovered data with retries") 192 + SENSE_CODE(0x1702, "Recovered data with positive head offset") 193 + SENSE_CODE(0x1703, "Recovered data with negative head offset") 194 + SENSE_CODE(0x1704, "Recovered data with retries and/or circ applied") 195 + SENSE_CODE(0x1705, "Recovered data using previous sector id") 196 + SENSE_CODE(0x1706, "Recovered data without ECC - data auto-reallocated") 197 + SENSE_CODE(0x1707, "Recovered data without ECC - recommend reassignment") 198 + SENSE_CODE(0x1708, "Recovered data without ECC - recommend rewrite") 199 + SENSE_CODE(0x1709, "Recovered data without ECC - data rewritten") 200 + 201 + SENSE_CODE(0x1800, "Recovered data with error correction applied") 202 + SENSE_CODE(0x1801, "Recovered data with error corr. & retries applied") 203 + SENSE_CODE(0x1802, "Recovered data - data auto-reallocated") 204 + SENSE_CODE(0x1803, "Recovered data with CIRC") 205 + SENSE_CODE(0x1804, "Recovered data with L-EC") 206 + SENSE_CODE(0x1805, "Recovered data - recommend reassignment") 207 + SENSE_CODE(0x1806, "Recovered data - recommend rewrite") 208 + SENSE_CODE(0x1807, "Recovered data with ECC - data rewritten") 209 + SENSE_CODE(0x1808, "Recovered data with linking") 210 + 211 + SENSE_CODE(0x1900, "Defect list error") 212 + SENSE_CODE(0x1901, "Defect list not available") 213 + SENSE_CODE(0x1902, "Defect list error in primary list") 214 + SENSE_CODE(0x1903, "Defect list error in grown list") 215 + 216 + SENSE_CODE(0x1A00, "Parameter list length error") 217 + 218 + SENSE_CODE(0x1B00, "Synchronous data transfer error") 219 + 220 + SENSE_CODE(0x1C00, "Defect list not found") 221 + SENSE_CODE(0x1C01, "Primary defect list not found") 222 + SENSE_CODE(0x1C02, "Grown defect list not found") 223 + 224 + SENSE_CODE(0x1D00, "Miscompare during verify operation") 225 + SENSE_CODE(0x1D01, "Miscompare verify of unmapped LBA") 226 + 227 + SENSE_CODE(0x1E00, "Recovered id with ECC correction") 228 + 229 + SENSE_CODE(0x1F00, "Partial defect list transfer") 230 + 231 + SENSE_CODE(0x2000, "Invalid command operation code") 232 + SENSE_CODE(0x2001, "Access denied - initiator pending-enrolled") 233 + SENSE_CODE(0x2002, "Access denied - no access rights") 234 + SENSE_CODE(0x2003, "Access denied - invalid mgmt id key") 235 + SENSE_CODE(0x2004, "Illegal command while in write capable state") 236 + SENSE_CODE(0x2005, "Obsolete") 237 + SENSE_CODE(0x2006, "Illegal command while in explicit address mode") 238 + SENSE_CODE(0x2007, "Illegal command while in implicit address mode") 239 + SENSE_CODE(0x2008, "Access denied - enrollment conflict") 240 + SENSE_CODE(0x2009, "Access denied - invalid LU identifier") 241 + SENSE_CODE(0x200A, "Access denied - invalid proxy token") 242 + SENSE_CODE(0x200B, "Access denied - ACL LUN conflict") 243 + SENSE_CODE(0x200C, "Illegal command when not in append-only mode") 244 + 245 + SENSE_CODE(0x2100, "Logical block address out of range") 246 + SENSE_CODE(0x2101, "Invalid element address") 247 + SENSE_CODE(0x2102, "Invalid address for write") 248 + SENSE_CODE(0x2103, "Invalid write crossing layer jump") 249 + SENSE_CODE(0x2104, "Unaligned write command") 250 + SENSE_CODE(0x2105, "Write boundary violation") 251 + SENSE_CODE(0x2106, "Attempt to read invalid data") 252 + SENSE_CODE(0x2107, "Read boundary violation") 253 + 254 + SENSE_CODE(0x2200, "Illegal function (use 20 00, 24 00, or 26 00)") 255 + 256 + SENSE_CODE(0x2300, "Invalid token operation, cause not reportable") 257 + SENSE_CODE(0x2301, "Invalid token operation, unsupported token type") 258 + SENSE_CODE(0x2302, "Invalid token operation, remote token usage not supported") 259 + SENSE_CODE(0x2303, "Invalid token operation, remote rod token creation not supported") 260 + SENSE_CODE(0x2304, "Invalid token operation, token unknown") 261 + SENSE_CODE(0x2305, "Invalid token operation, token corrupt") 262 + SENSE_CODE(0x2306, "Invalid token operation, token revoked") 263 + SENSE_CODE(0x2307, "Invalid token operation, token expired") 264 + SENSE_CODE(0x2308, "Invalid token operation, token cancelled") 265 + SENSE_CODE(0x2309, "Invalid token operation, token deleted") 266 + SENSE_CODE(0x230A, "Invalid token operation, invalid token length") 267 + 268 + SENSE_CODE(0x2400, "Invalid field in cdb") 269 + SENSE_CODE(0x2401, "CDB decryption error") 270 + SENSE_CODE(0x2402, "Obsolete") 271 + SENSE_CODE(0x2403, "Obsolete") 272 + SENSE_CODE(0x2404, "Security audit value frozen") 273 + SENSE_CODE(0x2405, "Security working key frozen") 274 + SENSE_CODE(0x2406, "Nonce not unique") 275 + SENSE_CODE(0x2407, "Nonce timestamp out of range") 276 + SENSE_CODE(0x2408, "Invalid XCDB") 277 + 278 + SENSE_CODE(0x2500, "Logical unit not supported") 279 + 280 + SENSE_CODE(0x2600, "Invalid field in parameter list") 281 + SENSE_CODE(0x2601, "Parameter not supported") 282 + SENSE_CODE(0x2602, "Parameter value invalid") 283 + SENSE_CODE(0x2603, "Threshold parameters not supported") 284 + SENSE_CODE(0x2604, "Invalid release of persistent reservation") 285 + SENSE_CODE(0x2605, "Data decryption error") 286 + SENSE_CODE(0x2606, "Too many target descriptors") 287 + SENSE_CODE(0x2607, "Unsupported target descriptor type code") 288 + SENSE_CODE(0x2608, "Too many segment descriptors") 289 + SENSE_CODE(0x2609, "Unsupported segment descriptor type code") 290 + SENSE_CODE(0x260A, "Unexpected inexact segment") 291 + SENSE_CODE(0x260B, "Inline data length exceeded") 292 + SENSE_CODE(0x260C, "Invalid operation for copy source or destination") 293 + SENSE_CODE(0x260D, "Copy segment granularity violation") 294 + SENSE_CODE(0x260E, "Invalid parameter while port is enabled") 295 + SENSE_CODE(0x260F, "Invalid data-out buffer integrity check value") 296 + SENSE_CODE(0x2610, "Data decryption key fail limit reached") 297 + SENSE_CODE(0x2611, "Incomplete key-associated data set") 298 + SENSE_CODE(0x2612, "Vendor specific key reference not found") 299 + 300 + SENSE_CODE(0x2700, "Write protected") 301 + SENSE_CODE(0x2701, "Hardware write protected") 302 + SENSE_CODE(0x2702, "Logical unit software write protected") 303 + SENSE_CODE(0x2703, "Associated write protect") 304 + SENSE_CODE(0x2704, "Persistent write protect") 305 + SENSE_CODE(0x2705, "Permanent write protect") 306 + SENSE_CODE(0x2706, "Conditional write protect") 307 + SENSE_CODE(0x2707, "Space allocation failed write protect") 308 + SENSE_CODE(0x2708, "Zone is read only") 309 + 310 + SENSE_CODE(0x2800, "Not ready to ready change, medium may have changed") 311 + SENSE_CODE(0x2801, "Import or export element accessed") 312 + SENSE_CODE(0x2802, "Format-layer may have changed") 313 + SENSE_CODE(0x2803, "Import/export element accessed, medium changed") 314 + 315 + SENSE_CODE(0x2900, "Power on, reset, or bus device reset occurred") 316 + SENSE_CODE(0x2901, "Power on occurred") 317 + SENSE_CODE(0x2902, "Scsi bus reset occurred") 318 + SENSE_CODE(0x2903, "Bus device reset function occurred") 319 + SENSE_CODE(0x2904, "Device internal reset") 320 + SENSE_CODE(0x2905, "Transceiver mode changed to single-ended") 321 + SENSE_CODE(0x2906, "Transceiver mode changed to lvd") 322 + SENSE_CODE(0x2907, "I_T nexus loss occurred") 323 + 324 + SENSE_CODE(0x2A00, "Parameters changed") 325 + SENSE_CODE(0x2A01, "Mode parameters changed") 326 + SENSE_CODE(0x2A02, "Log parameters changed") 327 + SENSE_CODE(0x2A03, "Reservations preempted") 328 + SENSE_CODE(0x2A04, "Reservations released") 329 + SENSE_CODE(0x2A05, "Registrations preempted") 330 + SENSE_CODE(0x2A06, "Asymmetric access state changed") 331 + SENSE_CODE(0x2A07, "Implicit asymmetric access state transition failed") 332 + SENSE_CODE(0x2A08, "Priority changed") 333 + SENSE_CODE(0x2A09, "Capacity data has changed") 334 + SENSE_CODE(0x2A0A, "Error history I_T nexus cleared") 335 + SENSE_CODE(0x2A0B, "Error history snapshot released") 336 + SENSE_CODE(0x2A0C, "Error recovery attributes have changed") 337 + SENSE_CODE(0x2A0D, "Data encryption capabilities changed") 338 + SENSE_CODE(0x2A10, "Timestamp changed") 339 + SENSE_CODE(0x2A11, "Data encryption parameters changed by another i_t nexus") 340 + SENSE_CODE(0x2A12, "Data encryption parameters changed by vendor specific event") 341 + SENSE_CODE(0x2A13, "Data encryption key instance counter has changed") 342 + SENSE_CODE(0x2A14, "SA creation capabilities data has changed") 343 + SENSE_CODE(0x2A15, "Medium removal prevention preempted") 344 + 345 + SENSE_CODE(0x2B00, "Copy cannot execute since host cannot disconnect") 346 + 347 + SENSE_CODE(0x2C00, "Command sequence error") 348 + SENSE_CODE(0x2C01, "Too many windows specified") 349 + SENSE_CODE(0x2C02, "Invalid combination of windows specified") 350 + SENSE_CODE(0x2C03, "Current program area is not empty") 351 + SENSE_CODE(0x2C04, "Current program area is empty") 352 + SENSE_CODE(0x2C05, "Illegal power condition request") 353 + SENSE_CODE(0x2C06, "Persistent prevent conflict") 354 + SENSE_CODE(0x2C07, "Previous busy status") 355 + SENSE_CODE(0x2C08, "Previous task set full status") 356 + SENSE_CODE(0x2C09, "Previous reservation conflict status") 357 + SENSE_CODE(0x2C0A, "Partition or collection contains user objects") 358 + SENSE_CODE(0x2C0B, "Not reserved") 359 + SENSE_CODE(0x2C0C, "Orwrite generation does not match") 360 + SENSE_CODE(0x2C0D, "Reset write pointer not allowed") 361 + SENSE_CODE(0x2C0E, "Zone is offline") 362 + 363 + SENSE_CODE(0x2D00, "Overwrite error on update in place") 364 + 365 + SENSE_CODE(0x2E00, "Insufficient time for operation") 366 + SENSE_CODE(0x2E01, "Command timeout before processing") 367 + SENSE_CODE(0x2E02, "Command timeout during processing") 368 + SENSE_CODE(0x2E03, "Command timeout during processing due to error recovery") 369 + 370 + SENSE_CODE(0x2F00, "Commands cleared by another initiator") 371 + SENSE_CODE(0x2F01, "Commands cleared by power loss notification") 372 + SENSE_CODE(0x2F02, "Commands cleared by device server") 373 + SENSE_CODE(0x2F03, "Some commands cleared by queuing layer event") 374 + 375 + SENSE_CODE(0x3000, "Incompatible medium installed") 376 + SENSE_CODE(0x3001, "Cannot read medium - unknown format") 377 + SENSE_CODE(0x3002, "Cannot read medium - incompatible format") 378 + SENSE_CODE(0x3003, "Cleaning cartridge installed") 379 + SENSE_CODE(0x3004, "Cannot write medium - unknown format") 380 + SENSE_CODE(0x3005, "Cannot write medium - incompatible format") 381 + SENSE_CODE(0x3006, "Cannot format medium - incompatible medium") 382 + SENSE_CODE(0x3007, "Cleaning failure") 383 + SENSE_CODE(0x3008, "Cannot write - application code mismatch") 384 + SENSE_CODE(0x3009, "Current session not fixated for append") 385 + SENSE_CODE(0x300A, "Cleaning request rejected") 386 + SENSE_CODE(0x300C, "WORM medium - overwrite attempted") 387 + SENSE_CODE(0x300D, "WORM medium - integrity check") 388 + SENSE_CODE(0x3010, "Medium not formatted") 389 + SENSE_CODE(0x3011, "Incompatible volume type") 390 + SENSE_CODE(0x3012, "Incompatible volume qualifier") 391 + SENSE_CODE(0x3013, "Cleaning volume expired") 392 + 393 + SENSE_CODE(0x3100, "Medium format corrupted") 394 + SENSE_CODE(0x3101, "Format command failed") 395 + SENSE_CODE(0x3102, "Zoned formatting failed due to spare linking") 396 + SENSE_CODE(0x3103, "Sanitize command failed") 397 + 398 + SENSE_CODE(0x3200, "No defect spare location available") 399 + SENSE_CODE(0x3201, "Defect list update failure") 400 + 401 + SENSE_CODE(0x3300, "Tape length error") 402 + 403 + SENSE_CODE(0x3400, "Enclosure failure") 404 + 405 + SENSE_CODE(0x3500, "Enclosure services failure") 406 + SENSE_CODE(0x3501, "Unsupported enclosure function") 407 + SENSE_CODE(0x3502, "Enclosure services unavailable") 408 + SENSE_CODE(0x3503, "Enclosure services transfer failure") 409 + SENSE_CODE(0x3504, "Enclosure services transfer refused") 410 + SENSE_CODE(0x3505, "Enclosure services checksum error") 411 + 412 + SENSE_CODE(0x3600, "Ribbon, ink, or toner failure") 413 + 414 + SENSE_CODE(0x3700, "Rounded parameter") 415 + 416 + SENSE_CODE(0x3800, "Event status notification") 417 + SENSE_CODE(0x3802, "Esn - power management class event") 418 + SENSE_CODE(0x3804, "Esn - media class event") 419 + SENSE_CODE(0x3806, "Esn - device busy class event") 420 + SENSE_CODE(0x3807, "Thin Provisioning soft threshold reached") 421 + 422 + SENSE_CODE(0x3900, "Saving parameters not supported") 423 + 424 + SENSE_CODE(0x3A00, "Medium not present") 425 + SENSE_CODE(0x3A01, "Medium not present - tray closed") 426 + SENSE_CODE(0x3A02, "Medium not present - tray open") 427 + SENSE_CODE(0x3A03, "Medium not present - loadable") 428 + SENSE_CODE(0x3A04, "Medium not present - medium auxiliary memory accessible") 429 + 430 + SENSE_CODE(0x3B00, "Sequential positioning error") 431 + SENSE_CODE(0x3B01, "Tape position error at beginning-of-medium") 432 + SENSE_CODE(0x3B02, "Tape position error at end-of-medium") 433 + SENSE_CODE(0x3B03, "Tape or electronic vertical forms unit not ready") 434 + SENSE_CODE(0x3B04, "Slew failure") 435 + SENSE_CODE(0x3B05, "Paper jam") 436 + SENSE_CODE(0x3B06, "Failed to sense top-of-form") 437 + SENSE_CODE(0x3B07, "Failed to sense bottom-of-form") 438 + SENSE_CODE(0x3B08, "Reposition error") 439 + SENSE_CODE(0x3B09, "Read past end of medium") 440 + SENSE_CODE(0x3B0A, "Read past beginning of medium") 441 + SENSE_CODE(0x3B0B, "Position past end of medium") 442 + SENSE_CODE(0x3B0C, "Position past beginning of medium") 443 + SENSE_CODE(0x3B0D, "Medium destination element full") 444 + SENSE_CODE(0x3B0E, "Medium source element empty") 445 + SENSE_CODE(0x3B0F, "End of medium reached") 446 + SENSE_CODE(0x3B11, "Medium magazine not accessible") 447 + SENSE_CODE(0x3B12, "Medium magazine removed") 448 + SENSE_CODE(0x3B13, "Medium magazine inserted") 449 + SENSE_CODE(0x3B14, "Medium magazine locked") 450 + SENSE_CODE(0x3B15, "Medium magazine unlocked") 451 + SENSE_CODE(0x3B16, "Mechanical positioning or changer error") 452 + SENSE_CODE(0x3B17, "Read past end of user object") 453 + SENSE_CODE(0x3B18, "Element disabled") 454 + SENSE_CODE(0x3B19, "Element enabled") 455 + SENSE_CODE(0x3B1A, "Data transfer device removed") 456 + SENSE_CODE(0x3B1B, "Data transfer device inserted") 457 + SENSE_CODE(0x3B1C, "Too many logical objects on partition to support operation") 458 + 459 + SENSE_CODE(0x3D00, "Invalid bits in identify message") 460 + 461 + SENSE_CODE(0x3E00, "Logical unit has not self-configured yet") 462 + SENSE_CODE(0x3E01, "Logical unit failure") 463 + SENSE_CODE(0x3E02, "Timeout on logical unit") 464 + SENSE_CODE(0x3E03, "Logical unit failed self-test") 465 + SENSE_CODE(0x3E04, "Logical unit unable to update self-test log") 466 + 467 + SENSE_CODE(0x3F00, "Target operating conditions have changed") 468 + SENSE_CODE(0x3F01, "Microcode has been changed") 469 + SENSE_CODE(0x3F02, "Changed operating definition") 470 + SENSE_CODE(0x3F03, "Inquiry data has changed") 471 + SENSE_CODE(0x3F04, "Component device attached") 472 + SENSE_CODE(0x3F05, "Device identifier changed") 473 + SENSE_CODE(0x3F06, "Redundancy group created or modified") 474 + SENSE_CODE(0x3F07, "Redundancy group deleted") 475 + SENSE_CODE(0x3F08, "Spare created or modified") 476 + SENSE_CODE(0x3F09, "Spare deleted") 477 + SENSE_CODE(0x3F0A, "Volume set created or modified") 478 + SENSE_CODE(0x3F0B, "Volume set deleted") 479 + SENSE_CODE(0x3F0C, "Volume set deassigned") 480 + SENSE_CODE(0x3F0D, "Volume set reassigned") 481 + SENSE_CODE(0x3F0E, "Reported luns data has changed") 482 + SENSE_CODE(0x3F0F, "Echo buffer overwritten") 483 + SENSE_CODE(0x3F10, "Medium loadable") 484 + SENSE_CODE(0x3F11, "Medium auxiliary memory accessible") 485 + SENSE_CODE(0x3F12, "iSCSI IP address added") 486 + SENSE_CODE(0x3F13, "iSCSI IP address removed") 487 + SENSE_CODE(0x3F14, "iSCSI IP address changed") 488 + SENSE_CODE(0x3F15, "Inspect referrals sense descriptors") 489 + SENSE_CODE(0x3F16, "Microcode has been changed without reset") 490 + /* 491 + * SENSE_CODE(0x40NN, "Ram failure") 492 + * SENSE_CODE(0x40NN, "Diagnostic failure on component nn") 493 + * SENSE_CODE(0x41NN, "Data path failure") 494 + * SENSE_CODE(0x42NN, "Power-on or self-test failure") 495 + */ 496 + SENSE_CODE(0x4300, "Message error") 497 + 498 + SENSE_CODE(0x4400, "Internal target failure") 499 + SENSE_CODE(0x4401, "Persistent reservation information lost") 500 + SENSE_CODE(0x4471, "ATA device failed set features") 501 + 502 + SENSE_CODE(0x4500, "Select or reselect failure") 503 + 504 + SENSE_CODE(0x4600, "Unsuccessful soft reset") 505 + 506 + SENSE_CODE(0x4700, "Scsi parity error") 507 + SENSE_CODE(0x4701, "Data phase CRC error detected") 508 + SENSE_CODE(0x4702, "Scsi parity error detected during st data phase") 509 + SENSE_CODE(0x4703, "Information unit iuCRC error detected") 510 + SENSE_CODE(0x4704, "Asynchronous information protection error detected") 511 + SENSE_CODE(0x4705, "Protocol service CRC error") 512 + SENSE_CODE(0x4706, "Phy test function in progress") 513 + SENSE_CODE(0x477f, "Some commands cleared by iSCSI Protocol event") 514 + 515 + SENSE_CODE(0x4800, "Initiator detected error message received") 516 + 517 + SENSE_CODE(0x4900, "Invalid message error") 518 + 519 + SENSE_CODE(0x4A00, "Command phase error") 520 + 521 + SENSE_CODE(0x4B00, "Data phase error") 522 + SENSE_CODE(0x4B01, "Invalid target port transfer tag received") 523 + SENSE_CODE(0x4B02, "Too much write data") 524 + SENSE_CODE(0x4B03, "Ack/nak timeout") 525 + SENSE_CODE(0x4B04, "Nak received") 526 + SENSE_CODE(0x4B05, "Data offset error") 527 + SENSE_CODE(0x4B06, "Initiator response timeout") 528 + SENSE_CODE(0x4B07, "Connection lost") 529 + SENSE_CODE(0x4B08, "Data-in buffer overflow - data buffer size") 530 + SENSE_CODE(0x4B09, "Data-in buffer overflow - data buffer descriptor area") 531 + SENSE_CODE(0x4B0A, "Data-in buffer error") 532 + SENSE_CODE(0x4B0B, "Data-out buffer overflow - data buffer size") 533 + SENSE_CODE(0x4B0C, "Data-out buffer overflow - data buffer descriptor area") 534 + SENSE_CODE(0x4B0D, "Data-out buffer error") 535 + SENSE_CODE(0x4B0E, "PCIe fabric error") 536 + SENSE_CODE(0x4B0F, "PCIe completion timeout") 537 + SENSE_CODE(0x4B10, "PCIe completer abort") 538 + SENSE_CODE(0x4B11, "PCIe poisoned tlp received") 539 + SENSE_CODE(0x4B12, "PCIe eCRC check failed") 540 + SENSE_CODE(0x4B13, "PCIe unsupported request") 541 + SENSE_CODE(0x4B14, "PCIe acs violation") 542 + SENSE_CODE(0x4B15, "PCIe tlp prefix blocked") 543 + 544 + SENSE_CODE(0x4C00, "Logical unit failed self-configuration") 545 + /* 546 + * SENSE_CODE(0x4DNN, "Tagged overlapped commands (nn = queue tag)") 547 + */ 548 + SENSE_CODE(0x4E00, "Overlapped commands attempted") 549 + 550 + SENSE_CODE(0x5000, "Write append error") 551 + SENSE_CODE(0x5001, "Write append position error") 552 + SENSE_CODE(0x5002, "Position error related to timing") 553 + 554 + SENSE_CODE(0x5100, "Erase failure") 555 + SENSE_CODE(0x5101, "Erase failure - incomplete erase operation detected") 556 + 557 + SENSE_CODE(0x5200, "Cartridge fault") 558 + 559 + SENSE_CODE(0x5300, "Media load or eject failed") 560 + SENSE_CODE(0x5301, "Unload tape failure") 561 + SENSE_CODE(0x5302, "Medium removal prevented") 562 + SENSE_CODE(0x5303, "Medium removal prevented by data transfer element") 563 + SENSE_CODE(0x5304, "Medium thread or unthread failure") 564 + SENSE_CODE(0x5305, "Volume identifier invalid") 565 + SENSE_CODE(0x5306, "Volume identifier missing") 566 + SENSE_CODE(0x5307, "Duplicate volume identifier") 567 + SENSE_CODE(0x5308, "Element status unknown") 568 + SENSE_CODE(0x5309, "Data transfer device error - load failed") 569 + SENSE_CODE(0x530a, "Data transfer device error - unload failed") 570 + SENSE_CODE(0x530b, "Data transfer device error - unload missing") 571 + SENSE_CODE(0x530c, "Data transfer device error - eject failed") 572 + SENSE_CODE(0x530d, "Data transfer device error - library communication failed") 573 + 574 + SENSE_CODE(0x5400, "Scsi to host system interface failure") 575 + 576 + SENSE_CODE(0x5500, "System resource failure") 577 + SENSE_CODE(0x5501, "System buffer full") 578 + SENSE_CODE(0x5502, "Insufficient reservation resources") 579 + SENSE_CODE(0x5503, "Insufficient resources") 580 + SENSE_CODE(0x5504, "Insufficient registration resources") 581 + SENSE_CODE(0x5505, "Insufficient access control resources") 582 + SENSE_CODE(0x5506, "Auxiliary memory out of space") 583 + SENSE_CODE(0x5507, "Quota error") 584 + SENSE_CODE(0x5508, "Maximum number of supplemental decryption keys exceeded") 585 + SENSE_CODE(0x5509, "Medium auxiliary memory not accessible") 586 + SENSE_CODE(0x550A, "Data currently unavailable") 587 + SENSE_CODE(0x550B, "Insufficient power for operation") 588 + SENSE_CODE(0x550C, "Insufficient resources to create rod") 589 + SENSE_CODE(0x550D, "Insufficient resources to create rod token") 590 + SENSE_CODE(0x550E, "Insufficient zone resources") 591 + 592 + SENSE_CODE(0x5700, "Unable to recover table-of-contents") 593 + 594 + SENSE_CODE(0x5800, "Generation does not exist") 595 + 596 + SENSE_CODE(0x5900, "Updated block read") 597 + 598 + SENSE_CODE(0x5A00, "Operator request or state change input") 599 + SENSE_CODE(0x5A01, "Operator medium removal request") 600 + SENSE_CODE(0x5A02, "Operator selected write protect") 601 + SENSE_CODE(0x5A03, "Operator selected write permit") 602 + 603 + SENSE_CODE(0x5B00, "Log exception") 604 + SENSE_CODE(0x5B01, "Threshold condition met") 605 + SENSE_CODE(0x5B02, "Log counter at maximum") 606 + SENSE_CODE(0x5B03, "Log list codes exhausted") 607 + 608 + SENSE_CODE(0x5C00, "Rpl status change") 609 + SENSE_CODE(0x5C01, "Spindles synchronized") 610 + SENSE_CODE(0x5C02, "Spindles not synchronized") 611 + 612 + SENSE_CODE(0x5D00, "Failure prediction threshold exceeded") 613 + SENSE_CODE(0x5D01, "Media failure prediction threshold exceeded") 614 + SENSE_CODE(0x5D02, "Logical unit failure prediction threshold exceeded") 615 + SENSE_CODE(0x5D03, "Spare area exhaustion prediction threshold exceeded") 616 + SENSE_CODE(0x5D10, "Hardware impending failure general hard drive failure") 617 + SENSE_CODE(0x5D11, "Hardware impending failure drive error rate too high") 618 + SENSE_CODE(0x5D12, "Hardware impending failure data error rate too high") 619 + SENSE_CODE(0x5D13, "Hardware impending failure seek error rate too high") 620 + SENSE_CODE(0x5D14, "Hardware impending failure too many block reassigns") 621 + SENSE_CODE(0x5D15, "Hardware impending failure access times too high") 622 + SENSE_CODE(0x5D16, "Hardware impending failure start unit times too high") 623 + SENSE_CODE(0x5D17, "Hardware impending failure channel parametrics") 624 + SENSE_CODE(0x5D18, "Hardware impending failure controller detected") 625 + SENSE_CODE(0x5D19, "Hardware impending failure throughput performance") 626 + SENSE_CODE(0x5D1A, "Hardware impending failure seek time performance") 627 + SENSE_CODE(0x5D1B, "Hardware impending failure spin-up retry count") 628 + SENSE_CODE(0x5D1C, "Hardware impending failure drive calibration retry count") 629 + SENSE_CODE(0x5D20, "Controller impending failure general hard drive failure") 630 + SENSE_CODE(0x5D21, "Controller impending failure drive error rate too high") 631 + SENSE_CODE(0x5D22, "Controller impending failure data error rate too high") 632 + SENSE_CODE(0x5D23, "Controller impending failure seek error rate too high") 633 + SENSE_CODE(0x5D24, "Controller impending failure too many block reassigns") 634 + SENSE_CODE(0x5D25, "Controller impending failure access times too high") 635 + SENSE_CODE(0x5D26, "Controller impending failure start unit times too high") 636 + SENSE_CODE(0x5D27, "Controller impending failure channel parametrics") 637 + SENSE_CODE(0x5D28, "Controller impending failure controller detected") 638 + SENSE_CODE(0x5D29, "Controller impending failure throughput performance") 639 + SENSE_CODE(0x5D2A, "Controller impending failure seek time performance") 640 + SENSE_CODE(0x5D2B, "Controller impending failure spin-up retry count") 641 + SENSE_CODE(0x5D2C, "Controller impending failure drive calibration retry count") 642 + SENSE_CODE(0x5D30, "Data channel impending failure general hard drive failure") 643 + SENSE_CODE(0x5D31, "Data channel impending failure drive error rate too high") 644 + SENSE_CODE(0x5D32, "Data channel impending failure data error rate too high") 645 + SENSE_CODE(0x5D33, "Data channel impending failure seek error rate too high") 646 + SENSE_CODE(0x5D34, "Data channel impending failure too many block reassigns") 647 + SENSE_CODE(0x5D35, "Data channel impending failure access times too high") 648 + SENSE_CODE(0x5D36, "Data channel impending failure start unit times too high") 649 + SENSE_CODE(0x5D37, "Data channel impending failure channel parametrics") 650 + SENSE_CODE(0x5D38, "Data channel impending failure controller detected") 651 + SENSE_CODE(0x5D39, "Data channel impending failure throughput performance") 652 + SENSE_CODE(0x5D3A, "Data channel impending failure seek time performance") 653 + SENSE_CODE(0x5D3B, "Data channel impending failure spin-up retry count") 654 + SENSE_CODE(0x5D3C, "Data channel impending failure drive calibration retry count") 655 + SENSE_CODE(0x5D40, "Servo impending failure general hard drive failure") 656 + SENSE_CODE(0x5D41, "Servo impending failure drive error rate too high") 657 + SENSE_CODE(0x5D42, "Servo impending failure data error rate too high") 658 + SENSE_CODE(0x5D43, "Servo impending failure seek error rate too high") 659 + SENSE_CODE(0x5D44, "Servo impending failure too many block reassigns") 660 + SENSE_CODE(0x5D45, "Servo impending failure access times too high") 661 + SENSE_CODE(0x5D46, "Servo impending failure start unit times too high") 662 + SENSE_CODE(0x5D47, "Servo impending failure channel parametrics") 663 + SENSE_CODE(0x5D48, "Servo impending failure controller detected") 664 + SENSE_CODE(0x5D49, "Servo impending failure throughput performance") 665 + SENSE_CODE(0x5D4A, "Servo impending failure seek time performance") 666 + SENSE_CODE(0x5D4B, "Servo impending failure spin-up retry count") 667 + SENSE_CODE(0x5D4C, "Servo impending failure drive calibration retry count") 668 + SENSE_CODE(0x5D50, "Spindle impending failure general hard drive failure") 669 + SENSE_CODE(0x5D51, "Spindle impending failure drive error rate too high") 670 + SENSE_CODE(0x5D52, "Spindle impending failure data error rate too high") 671 + SENSE_CODE(0x5D53, "Spindle impending failure seek error rate too high") 672 + SENSE_CODE(0x5D54, "Spindle impending failure too many block reassigns") 673 + SENSE_CODE(0x5D55, "Spindle impending failure access times too high") 674 + SENSE_CODE(0x5D56, "Spindle impending failure start unit times too high") 675 + SENSE_CODE(0x5D57, "Spindle impending failure channel parametrics") 676 + SENSE_CODE(0x5D58, "Spindle impending failure controller detected") 677 + SENSE_CODE(0x5D59, "Spindle impending failure throughput performance") 678 + SENSE_CODE(0x5D5A, "Spindle impending failure seek time performance") 679 + SENSE_CODE(0x5D5B, "Spindle impending failure spin-up retry count") 680 + SENSE_CODE(0x5D5C, "Spindle impending failure drive calibration retry count") 681 + SENSE_CODE(0x5D60, "Firmware impending failure general hard drive failure") 682 + SENSE_CODE(0x5D61, "Firmware impending failure drive error rate too high") 683 + SENSE_CODE(0x5D62, "Firmware impending failure data error rate too high") 684 + SENSE_CODE(0x5D63, "Firmware impending failure seek error rate too high") 685 + SENSE_CODE(0x5D64, "Firmware impending failure too many block reassigns") 686 + SENSE_CODE(0x5D65, "Firmware impending failure access times too high") 687 + SENSE_CODE(0x5D66, "Firmware impending failure start unit times too high") 688 + SENSE_CODE(0x5D67, "Firmware impending failure channel parametrics") 689 + SENSE_CODE(0x5D68, "Firmware impending failure controller detected") 690 + SENSE_CODE(0x5D69, "Firmware impending failure throughput performance") 691 + SENSE_CODE(0x5D6A, "Firmware impending failure seek time performance") 692 + SENSE_CODE(0x5D6B, "Firmware impending failure spin-up retry count") 693 + SENSE_CODE(0x5D6C, "Firmware impending failure drive calibration retry count") 694 + SENSE_CODE(0x5DFF, "Failure prediction threshold exceeded (false)") 695 + 696 + SENSE_CODE(0x5E00, "Low power condition on") 697 + SENSE_CODE(0x5E01, "Idle condition activated by timer") 698 + SENSE_CODE(0x5E02, "Standby condition activated by timer") 699 + SENSE_CODE(0x5E03, "Idle condition activated by command") 700 + SENSE_CODE(0x5E04, "Standby condition activated by command") 701 + SENSE_CODE(0x5E05, "Idle_b condition activated by timer") 702 + SENSE_CODE(0x5E06, "Idle_b condition activated by command") 703 + SENSE_CODE(0x5E07, "Idle_c condition activated by timer") 704 + SENSE_CODE(0x5E08, "Idle_c condition activated by command") 705 + SENSE_CODE(0x5E09, "Standby_y condition activated by timer") 706 + SENSE_CODE(0x5E0A, "Standby_y condition activated by command") 707 + SENSE_CODE(0x5E41, "Power state change to active") 708 + SENSE_CODE(0x5E42, "Power state change to idle") 709 + SENSE_CODE(0x5E43, "Power state change to standby") 710 + SENSE_CODE(0x5E45, "Power state change to sleep") 711 + SENSE_CODE(0x5E47, "Power state change to device control") 712 + 713 + SENSE_CODE(0x6000, "Lamp failure") 714 + 715 + SENSE_CODE(0x6100, "Video acquisition error") 716 + SENSE_CODE(0x6101, "Unable to acquire video") 717 + SENSE_CODE(0x6102, "Out of focus") 718 + 719 + SENSE_CODE(0x6200, "Scan head positioning error") 720 + 721 + SENSE_CODE(0x6300, "End of user area encountered on this track") 722 + SENSE_CODE(0x6301, "Packet does not fit in available space") 723 + 724 + SENSE_CODE(0x6400, "Illegal mode for this track") 725 + SENSE_CODE(0x6401, "Invalid packet size") 726 + 727 + SENSE_CODE(0x6500, "Voltage fault") 728 + 729 + SENSE_CODE(0x6600, "Automatic document feeder cover up") 730 + SENSE_CODE(0x6601, "Automatic document feeder lift up") 731 + SENSE_CODE(0x6602, "Document jam in automatic document feeder") 732 + SENSE_CODE(0x6603, "Document miss feed automatic in document feeder") 733 + 734 + SENSE_CODE(0x6700, "Configuration failure") 735 + SENSE_CODE(0x6701, "Configuration of incapable logical units failed") 736 + SENSE_CODE(0x6702, "Add logical unit failed") 737 + SENSE_CODE(0x6703, "Modification of logical unit failed") 738 + SENSE_CODE(0x6704, "Exchange of logical unit failed") 739 + SENSE_CODE(0x6705, "Remove of logical unit failed") 740 + SENSE_CODE(0x6706, "Attachment of logical unit failed") 741 + SENSE_CODE(0x6707, "Creation of logical unit failed") 742 + SENSE_CODE(0x6708, "Assign failure occurred") 743 + SENSE_CODE(0x6709, "Multiply assigned logical unit") 744 + SENSE_CODE(0x670A, "Set target port groups command failed") 745 + SENSE_CODE(0x670B, "ATA device feature not enabled") 746 + 747 + SENSE_CODE(0x6800, "Logical unit not configured") 748 + SENSE_CODE(0x6801, "Subsidiary logical unit not configured") 749 + 750 + SENSE_CODE(0x6900, "Data loss on logical unit") 751 + SENSE_CODE(0x6901, "Multiple logical unit failures") 752 + SENSE_CODE(0x6902, "Parity/data mismatch") 753 + 754 + SENSE_CODE(0x6A00, "Informational, refer to log") 755 + 756 + SENSE_CODE(0x6B00, "State change has occurred") 757 + SENSE_CODE(0x6B01, "Redundancy level got better") 758 + SENSE_CODE(0x6B02, "Redundancy level got worse") 759 + 760 + SENSE_CODE(0x6C00, "Rebuild failure occurred") 761 + 762 + SENSE_CODE(0x6D00, "Recalculate failure occurred") 763 + 764 + SENSE_CODE(0x6E00, "Command to logical unit failed") 765 + 766 + SENSE_CODE(0x6F00, "Copy protection key exchange failure - authentication failure") 767 + SENSE_CODE(0x6F01, "Copy protection key exchange failure - key not present") 768 + SENSE_CODE(0x6F02, "Copy protection key exchange failure - key not established") 769 + SENSE_CODE(0x6F03, "Read of scrambled sector without authentication") 770 + SENSE_CODE(0x6F04, "Media region code is mismatched to logical unit region") 771 + SENSE_CODE(0x6F05, "Drive region must be permanent/region reset count error") 772 + SENSE_CODE(0x6F06, "Insufficient block count for binding nonce recording") 773 + SENSE_CODE(0x6F07, "Conflict in binding nonce recording") 774 + /* 775 + * SENSE_CODE(0x70NN, "Decompression exception short algorithm id of nn") 776 + */ 777 + SENSE_CODE(0x7100, "Decompression exception long algorithm id") 778 + 779 + SENSE_CODE(0x7200, "Session fixation error") 780 + SENSE_CODE(0x7201, "Session fixation error writing lead-in") 781 + SENSE_CODE(0x7202, "Session fixation error writing lead-out") 782 + SENSE_CODE(0x7203, "Session fixation error - incomplete track in session") 783 + SENSE_CODE(0x7204, "Empty or partially written reserved track") 784 + SENSE_CODE(0x7205, "No more track reservations allowed") 785 + SENSE_CODE(0x7206, "RMZ extension is not allowed") 786 + SENSE_CODE(0x7207, "No more test zone extensions are allowed") 787 + 788 + SENSE_CODE(0x7300, "Cd control error") 789 + SENSE_CODE(0x7301, "Power calibration area almost full") 790 + SENSE_CODE(0x7302, "Power calibration area is full") 791 + SENSE_CODE(0x7303, "Power calibration area error") 792 + SENSE_CODE(0x7304, "Program memory area update failure") 793 + SENSE_CODE(0x7305, "Program memory area is full") 794 + SENSE_CODE(0x7306, "RMA/PMA is almost full") 795 + SENSE_CODE(0x7310, "Current power calibration area almost full") 796 + SENSE_CODE(0x7311, "Current power calibration area is full") 797 + SENSE_CODE(0x7317, "RDZ is full") 798 + 799 + SENSE_CODE(0x7400, "Security error") 800 + SENSE_CODE(0x7401, "Unable to decrypt data") 801 + SENSE_CODE(0x7402, "Unencrypted data encountered while decrypting") 802 + SENSE_CODE(0x7403, "Incorrect data encryption key") 803 + SENSE_CODE(0x7404, "Cryptographic integrity validation failed") 804 + SENSE_CODE(0x7405, "Error decrypting data") 805 + SENSE_CODE(0x7406, "Unknown signature verification key") 806 + SENSE_CODE(0x7407, "Encryption parameters not useable") 807 + SENSE_CODE(0x7408, "Digital signature validation failure") 808 + SENSE_CODE(0x7409, "Encryption mode mismatch on read") 809 + SENSE_CODE(0x740A, "Encrypted block not raw read enabled") 810 + SENSE_CODE(0x740B, "Incorrect Encryption parameters") 811 + SENSE_CODE(0x740C, "Unable to decrypt parameter list") 812 + SENSE_CODE(0x740D, "Encryption algorithm disabled") 813 + SENSE_CODE(0x7410, "SA creation parameter value invalid") 814 + SENSE_CODE(0x7411, "SA creation parameter value rejected") 815 + SENSE_CODE(0x7412, "Invalid SA usage") 816 + SENSE_CODE(0x7421, "Data Encryption configuration prevented") 817 + SENSE_CODE(0x7430, "SA creation parameter not supported") 818 + SENSE_CODE(0x7440, "Authentication failed") 819 + SENSE_CODE(0x7461, "External data encryption key manager access error") 820 + SENSE_CODE(0x7462, "External data encryption key manager error") 821 + SENSE_CODE(0x7463, "External data encryption key not found") 822 + SENSE_CODE(0x7464, "External data encryption request not authorized") 823 + SENSE_CODE(0x746E, "External data encryption control timeout") 824 + SENSE_CODE(0x746F, "External data encryption control error") 825 + SENSE_CODE(0x7471, "Logical unit access not authorized") 826 + SENSE_CODE(0x7479, "Security conflict in translated device")
+4 -1
drivers/scsi/snic/snic.h
··· 95 95 #define SNIC_DEV_RST_NOTSUP BIT(25) 96 96 #define SNIC_SCSI_CLEANUP BIT(26) 97 97 #define SNIC_HOST_RESET_ISSUED BIT(27) 98 + #define SNIC_HOST_RESET_CMD_TERM \ 99 + (SNIC_DEV_RST_NOTSUP | SNIC_SCSI_CLEANUP | SNIC_HOST_RESET_ISSUED) 98 100 99 101 #define SNIC_ABTS_TIMEOUT 30000 /* msec */ 100 102 #define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */ ··· 218 216 SNIC_MSIX_INTR_MAX, 219 217 }; 220 218 219 + #define SNIC_INTRHDLR_NAMSZ (2 * IFNAMSIZ) 221 220 struct snic_msix_entry { 222 221 int requested; 223 - char devname[IFNAMSIZ]; 222 + char devname[SNIC_INTRHDLR_NAMSZ]; 224 223 irqreturn_t (*isr)(int, void *); 225 224 void *devid; 226 225 };
+3 -5
drivers/scsi/snic/snic_ctl.c
··· 39 39 { 40 40 struct snic *snic = container_of(work, struct snic, link_work); 41 41 42 - if (snic->config.xpt_type != SNIC_DAS) { 43 - SNIC_HOST_INFO(snic->shost, "Link Event Received.\n"); 44 - SNIC_ASSERT_NOT_IMPL(1); 45 - 42 + if (snic->config.xpt_type == SNIC_DAS) 46 43 return; 47 - } 48 44 49 45 snic->link_status = svnic_dev_link_status(snic->vdev); 50 46 snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev); 51 47 SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n", 52 48 ((snic->link_status) ? "Up" : "Down")); 49 + 50 + SNIC_ASSERT_NOT_IMPL(1); 53 51 } 54 52 55 53
+16 -4
drivers/scsi/snic/snic_debugfs.c
··· 264 264 "Aborts Fail : %lld\n" 265 265 "Aborts Driver Timeout : %lld\n" 266 266 "Abort FW Timeout : %lld\n" 267 - "Abort IO NOT Found : %lld\n", 267 + "Abort IO NOT Found : %lld\n" 268 + "Abort Queuing Failed : %lld\n", 268 269 (u64) atomic64_read(&stats->abts.num), 269 270 (u64) atomic64_read(&stats->abts.fail), 270 271 (u64) atomic64_read(&stats->abts.drv_tmo), 271 272 (u64) atomic64_read(&stats->abts.fw_tmo), 272 - (u64) atomic64_read(&stats->abts.io_not_found)); 273 + (u64) atomic64_read(&stats->abts.io_not_found), 274 + (u64) atomic64_read(&stats->abts.q_fail)); 273 275 274 276 /* Dump Reset Stats */ 275 277 seq_printf(sfp, ··· 318 316 seq_printf(sfp, 319 317 "Last ISR Time : %llu (%8lu.%8lu)\n" 320 318 "Last Ack Time : %llu (%8lu.%8lu)\n" 321 - "ISRs : %llu\n" 319 + "Ack ISRs : %llu\n" 320 + "IO Cmpl ISRs : %llu\n" 321 + "Err Notify ISRs : %llu\n" 322 322 "Max CQ Entries : %lld\n" 323 323 "Data Count Mismatch : %lld\n" 324 324 "IOs w/ Timeout Status : %lld\n" ··· 328 324 "IOs w/ SGL Invalid Stat : %lld\n" 329 325 "WQ Desc Alloc Fail : %lld\n" 330 326 "Queue Full : %lld\n" 327 + "Queue Ramp Up : %lld\n" 328 + "Queue Ramp Down : %lld\n" 329 + "Queue Last Queue Depth : %lld\n" 331 330 "Target Not Ready : %lld\n", 332 331 (u64) stats->misc.last_isr_time, 333 332 last_isr_tms.tv_sec, last_isr_tms.tv_nsec, 334 333 (u64)stats->misc.last_ack_time, 335 334 last_ack_tms.tv_sec, last_ack_tms.tv_nsec, 336 - (u64) atomic64_read(&stats->misc.isr_cnt), 335 + (u64) atomic64_read(&stats->misc.ack_isr_cnt), 336 + (u64) atomic64_read(&stats->misc.cmpl_isr_cnt), 337 + (u64) atomic64_read(&stats->misc.errnotify_isr_cnt), 337 338 (u64) atomic64_read(&stats->misc.max_cq_ents), 338 339 (u64) atomic64_read(&stats->misc.data_cnt_mismat), 339 340 (u64) atomic64_read(&stats->misc.io_tmo), ··· 346 337 (u64) atomic64_read(&stats->misc.sgl_inval), 347 338 (u64) atomic64_read(&stats->misc.wq_alloc_fail), 348 339 (u64) atomic64_read(&stats->misc.qfull), 340 + (u64) atomic64_read(&stats->misc.qsz_rampup), 341 + (u64) atomic64_read(&stats->misc.qsz_rampdown), 342 + (u64) atomic64_read(&stats->misc.last_qsz), 349 343 (u64) atomic64_read(&stats->misc.tgt_not_rdy)); 350 344 351 345 return 0;
+16 -3
drivers/scsi/snic/snic_disc.c
··· 171 171 tgt->channel, 172 172 tgt->scsi_tgt_id, 173 173 SCAN_WILD_CARD, 174 - 1); 174 + SCSI_SCAN_RESCAN); 175 175 176 176 spin_lock_irqsave(shost->host_lock, flags); 177 177 tgt->flags &= ~SNIC_TGT_SCAN_PENDING; ··· 480 480 snic_disc_start(struct snic *snic) 481 481 { 482 482 struct snic_disc *disc = &snic->disc; 483 + unsigned long flags; 483 484 int ret = 0; 484 485 485 486 SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n"); 487 + 488 + spin_lock_irqsave(&snic->snic_lock, flags); 489 + if (snic->in_remove) { 490 + spin_unlock_irqrestore(&snic->snic_lock, flags); 491 + SNIC_ERR("snic driver removal in progress ...\n"); 492 + ret = 0; 493 + 494 + return ret; 495 + } 496 + spin_unlock_irqrestore(&snic->snic_lock, flags); 486 497 487 498 mutex_lock(&disc->mutex); 488 499 if (disc->state == SNIC_DISC_PENDING) { ··· 544 533 struct list_head *cur, *nxt; 545 534 unsigned long flags; 546 535 536 + scsi_flush_work(snic->shost); 537 + 547 538 mutex_lock(&snic->disc.mutex); 548 539 spin_lock_irqsave(snic->shost->host_lock, flags); 549 540 ··· 558 545 tgt = NULL; 559 546 } 560 547 spin_unlock_irqrestore(snic->shost->host_lock, flags); 561 - 562 - scsi_flush_work(snic->shost); 563 548 mutex_unlock(&snic->disc.mutex); 549 + 550 + flush_workqueue(snic_glob->event_q); 564 551 } /* end of snic_tgt_del_all */
+3 -1
drivers/scsi/snic/snic_fwint.h
··· 414 414 /* Payload 88 bytes = 128 - 24 - 16 */ 415 415 #define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \ 416 416 sizeof(struct snic_io_hdr) - \ 417 - (2 * sizeof(u64)))) 417 + (2 * sizeof(u64)) - sizeof(ulong))) 418 418 419 419 /* 420 420 * snic_host_req: host -> firmware request ··· 448 448 /* hba reset */ 449 449 struct snic_hba_reset reset; 450 450 } u; 451 + 452 + ulong req_pa; 451 453 }; /* end of snic_host_req structure */ 452 454 453 455
+57 -7
drivers/scsi/snic/snic_io.c
··· 48 48 SNIC_TRC(snic->shost->host_no, 0, 0, 49 49 ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, 50 50 0); 51 - pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); 51 + 52 52 buf->os_buf = NULL; 53 53 } 54 54 ··· 137 137 return 0; 138 138 } 139 139 140 + static int 141 + snic_wqdesc_avail(struct snic *snic, int q_num, int req_type) 142 + { 143 + int nr_wqdesc = snic->config.wq_enet_desc_count; 144 + 145 + if (q_num > 0) { 146 + /* 147 + * Multi Queue case, additional care is required. 148 + * Per WQ active requests need to be maintained. 149 + */ 150 + SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n"); 151 + SNIC_BUG_ON(q_num > 0); 152 + 153 + return -1; 154 + } 155 + 156 + nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs); 157 + 158 + return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1); 159 + } 160 + 140 161 int 141 162 snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) 142 163 { 143 164 dma_addr_t pa = 0; 144 165 unsigned long flags; 145 166 struct snic_fw_stats *fwstats = &snic->s_stats.fw; 167 + struct snic_host_req *req = (struct snic_host_req *) os_buf; 146 168 long act_reqs; 169 + long desc_avail = 0; 147 170 int q_num = 0; 148 171 149 172 snic_print_desc(__func__, os_buf, len); ··· 179 156 return -ENOMEM; 180 157 } 181 158 159 + req->req_pa = (ulong)pa; 160 + 182 161 q_num = snic_select_wq(snic); 183 162 184 163 spin_lock_irqsave(&snic->wq_lock[q_num], flags); 185 - if (!svnic_wq_desc_avail(snic->wq)) { 164 + desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); 165 + if (desc_avail <= 0) { 186 166 pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); 167 + req->req_pa = 0; 187 168 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 188 169 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); 189 170 SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); ··· 196 169 } 197 170 198 171 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); 172 + /* 173 + * Update stats 174 + * note: when multi queue enabled, fw actv_reqs should be per queue. 175 + */ 176 + act_reqs = atomic64_inc_return(&fwstats->actv_reqs); 199 177 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 200 178 201 - /* Update stats */ 202 - act_reqs = atomic64_inc_return(&fwstats->actv_reqs); 203 179 if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) 204 180 atomic64_set(&fwstats->max_actv_reqs, act_reqs); 205 181 ··· 348 318 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n", 349 319 rqi, rqi->req, rqi->abort_req, rqi->dr_req); 350 320 351 - if (rqi->abort_req) 352 - mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 321 + if (rqi->abort_req) { 322 + if (rqi->abort_req->req_pa) 323 + pci_unmap_single(snic->pdev, 324 + rqi->abort_req->req_pa, 325 + sizeof(struct snic_host_req), 326 + PCI_DMA_TODEVICE); 353 327 354 - if (rqi->dr_req) 328 + mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 329 + } 330 + 331 + if (rqi->dr_req) { 332 + if (rqi->dr_req->req_pa) 333 + pci_unmap_single(snic->pdev, 334 + rqi->dr_req->req_pa, 335 + sizeof(struct snic_host_req), 336 + PCI_DMA_TODEVICE); 337 + 355 338 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 339 + } 340 + 341 + if (rqi->req->req_pa) 342 + pci_unmap_single(snic->pdev, 343 + rqi->req->req_pa, 344 + rqi->req_len, 345 + PCI_DMA_TODEVICE); 356 346 357 347 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); 358 348 }
+3 -3
drivers/scsi/snic/snic_isr.c
··· 38 38 unsigned long wq_work_done = 0; 39 39 40 40 snic->s_stats.misc.last_isr_time = jiffies; 41 - atomic64_inc(&snic->s_stats.misc.isr_cnt); 41 + atomic64_inc(&snic->s_stats.misc.ack_isr_cnt); 42 42 43 43 wq_work_done = snic_wq_cmpl_handler(snic, -1); 44 44 svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ], ··· 56 56 unsigned long iocmpl_work_done = 0; 57 57 58 58 snic->s_stats.misc.last_isr_time = jiffies; 59 - atomic64_inc(&snic->s_stats.misc.isr_cnt); 59 + atomic64_inc(&snic->s_stats.misc.cmpl_isr_cnt); 60 60 61 61 iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1); 62 62 svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL], ··· 73 73 struct snic *snic = data; 74 74 75 75 snic->s_stats.misc.last_isr_time = jiffies; 76 - atomic64_inc(&snic->s_stats.misc.isr_cnt); 76 + atomic64_inc(&snic->s_stats.misc.errnotify_isr_cnt); 77 77 78 78 svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]); 79 79 snic_log_q_error(snic);
+25 -19
drivers/scsi/snic/snic_main.c
··· 98 98 static int 99 99 snic_change_queue_depth(struct scsi_device *sdev, int qdepth) 100 100 { 101 + struct snic *snic = shost_priv(sdev->host); 101 102 int qsz = 0; 102 103 103 104 qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH); 105 + if (qsz < sdev->queue_depth) 106 + atomic64_inc(&snic->s_stats.misc.qsz_rampdown); 107 + else if (qsz > sdev->queue_depth) 108 + atomic64_inc(&snic->s_stats.misc.qsz_rampup); 109 + 110 + atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth); 111 + 104 112 scsi_change_queue_depth(sdev, qsz); 105 - SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth); 106 113 107 114 return sdev->queue_depth; 108 115 } ··· 631 624 goto err_free_tmreq_pool; 632 625 } 633 626 634 - /* 635 - * Initialization done with PCI system, hardware, firmware. 636 - * Add shost to SCSI 637 - */ 638 - ret = snic_add_host(shost, pdev); 639 - if (ret) { 640 - SNIC_HOST_ERR(shost, 641 - "Adding scsi host Failed ... exiting. %d\n", 642 - ret); 643 - 644 - goto err_notify_unset; 645 - } 646 - 647 627 spin_lock_irqsave(&snic_glob->snic_list_lock, flags); 648 628 list_add_tail(&snic->list, &snic_glob->snic_list); 649 629 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); ··· 663 669 for (i = 0; i < snic->intr_count; i++) 664 670 svnic_intr_unmask(&snic->intr[i]); 665 671 666 - snic_set_state(snic, SNIC_ONLINE); 667 - 668 672 /* Get snic params */ 669 673 ret = snic_get_conf(snic); 670 674 if (ret) { ··· 672 680 673 681 goto err_get_conf; 674 682 } 683 + 684 + /* 685 + * Initialization done with PCI system, hardware, firmware. 686 + * Add shost to SCSI 687 + */ 688 + ret = snic_add_host(shost, pdev); 689 + if (ret) { 690 + SNIC_HOST_ERR(shost, 691 + "Adding scsi host Failed ... exiting. %d\n", 692 + ret); 693 + 694 + goto err_get_conf; 695 + } 696 + 697 + snic_set_state(snic, SNIC_ONLINE); 675 698 676 699 ret = snic_disc_start(snic); 677 700 if (ret) { ··· 712 705 svnic_dev_disable(snic->vdev); 713 706 714 707 err_vdev_enable: 708 + svnic_dev_notify_unset(snic->vdev); 709 + 715 710 for (i = 0; i < snic->wq_count; i++) { 716 711 int rc = 0; 717 712 ··· 726 717 } 727 718 } 728 719 snic_del_host(snic->shost); 729 - 730 - err_notify_unset: 731 - svnic_dev_notify_unset(snic->vdev); 732 720 733 721 err_free_tmreq_pool: 734 722 mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
+50 -8
drivers/scsi/snic/snic_scsi.c
··· 221 221 pa, /* sense buffer pa */ 222 222 SCSI_SENSE_BUFFERSIZE); 223 223 224 + atomic64_inc(&snic->s_stats.io.active); 224 225 ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); 225 - if (ret) 226 + if (ret) { 227 + atomic64_dec(&snic->s_stats.io.active); 226 228 SNIC_HOST_ERR(snic->shost, 227 229 "QIcmnd: Queuing Icmnd Failed. ret = %d\n", 228 230 ret); 231 + } else 232 + snic_stats_update_active_ios(&snic->s_stats); 229 233 230 234 return ret; 231 235 } /* end of snic_queue_icmnd_req */ ··· 365 361 if (ret) { 366 362 SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret); 367 363 ret = SCSI_MLQUEUE_HOST_BUSY; 368 - } else 369 - snic_stats_update_active_ios(&snic->s_stats); 364 + } 370 365 371 366 atomic_dec(&snic->ios_inflight); 372 367 ··· 601 598 sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc), 602 599 CMD_FLAGS(sc), rqi); 603 600 601 + if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) { 602 + spin_unlock_irqrestore(io_lock, flags); 603 + 604 + return; 605 + } 606 + 604 607 SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx); 605 608 WARN_ON_ONCE(req); 606 609 if (!rqi) { ··· 788 779 789 780 io_lock = snic_io_lock_hash(snic, sc); 790 781 spin_lock_irqsave(io_lock, flags); 782 + if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) { 783 + spin_unlock_irqrestore(io_lock, flags); 784 + 785 + return ret; 786 + } 791 787 rqi = (struct snic_req_info *) CMD_SP(sc); 792 788 WARN_ON_ONCE(!rqi); 793 789 ··· 1015 1001 unsigned long flags, gflags; 1016 1002 int ret = 0; 1017 1003 1018 - SNIC_HOST_INFO(snic->shost, 1019 - "reset_cmpl:HBA Reset Completion received.\n"); 1020 - 1021 1004 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); 1005 + SNIC_HOST_INFO(snic->shost, 1006 + "reset_cmpl:Tag %d ctx %lx cmpl status %s HBA Reset Completion received.\n", 1007 + cmnd_id, ctx, snic_io_status_to_str(hdr_stat)); 1008 + 1022 1009 SNIC_SCSI_DBG(snic->shost, 1023 1010 "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n", 1024 1011 typ, hdr_stat, cmnd_id, hid, ctx); ··· 1027 1012 /* spl case, host reset issued through ioctl */ 1028 1013 if (cmnd_id == SCSI_NO_TAG) { 1029 1014 rqi = (struct snic_req_info *) ctx; 1015 + SNIC_HOST_INFO(snic->shost, 1016 + "reset_cmpl:Tag %d ctx %lx cmpl stat %s\n", 1017 + cmnd_id, ctx, snic_io_status_to_str(hdr_stat)); 1030 1018 sc = rqi->sc; 1031 1019 1032 1020 goto ioctl_hba_rst; ··· 1055 1037 1056 1038 return ret; 1057 1039 } 1040 + 1041 + SNIC_HOST_INFO(snic->shost, 1042 + "reset_cmpl: sc %p rqi %p Tag %d flags 0x%llx\n", 1043 + sc, rqi, cmnd_id, CMD_FLAGS(sc)); 1058 1044 1059 1045 io_lock = snic_io_lock_hash(snic, sc); 1060 1046 spin_lock_irqsave(io_lock, flags); ··· 1476 1454 case SNIC_STAT_IO_SUCCESS: 1477 1455 case SNIC_STAT_IO_NOT_FOUND: 1478 1456 ret = SUCCESS; 1457 + /* 1458 + * If abort path doesn't call scsi_done(), 1459 + * the # IO timeouts == 2, will cause the LUN offline. 1460 + * Call scsi_done to complete the IO. 1461 + */ 1462 + sc->result = (DID_ERROR << 16); 1463 + sc->scsi_done(sc); 1479 1464 break; 1480 1465 1481 1466 default: 1482 1467 /* Firmware completed abort with error */ 1483 1468 ret = FAILED; 1469 + rqi = NULL; 1484 1470 break; 1485 1471 } 1486 1472 ··· 1584 1554 /* Now Queue the abort command to firmware */ 1585 1555 ret = snic_queue_abort_req(snic, rqi, sc, tmf); 1586 1556 if (ret) { 1557 + atomic64_inc(&snic->s_stats.abts.q_fail); 1587 1558 SNIC_HOST_ERR(snic->shost, 1588 1559 "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n", 1589 1560 tag, ret, CMD_FLAGS(sc)); ··· 1860 1829 spin_unlock_irqrestore(io_lock, flags); 1861 1830 1862 1831 snic_release_req_buf(snic, rqi, sc); 1832 + 1833 + sc->result = (DID_ERROR << 16); 1834 + sc->scsi_done(sc); 1863 1835 1864 1836 ret = 0; 1865 1837 ··· 2418 2384 "Completing Pending TM Req sc %p, state %s flags 0x%llx\n", 2419 2385 sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc)); 2420 2386 2387 + /* 2388 + * CASE : FW didn't post itmf completion due to PCIe Errors. 2389 + * Marking the abort status as Success to call scsi completion 2390 + * in snic_abort_finish() 2391 + */ 2392 + CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS; 2393 + 2421 2394 rqi = (struct snic_req_info *) CMD_SP(sc); 2422 2395 if (!rqi) 2423 2396 return; ··· 2500 2459 cleanup: 2501 2460 sc->result = DID_TRANSPORT_DISRUPTED << 16; 2502 2461 SNIC_HOST_INFO(snic->shost, 2503 - "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p. rqi %p duration %llu msecs\n", 2504 - sc, rqi, (jiffies - st_time)); 2462 + "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n", 2463 + sc, sc->request->tag, CMD_FLAGS(sc), rqi, 2464 + jiffies_to_msecs(jiffies - st_time)); 2505 2465 2506 2466 /* Update IO stats */ 2507 2467 snic_stats_update_io_cmpl(&snic->s_stats);
+9 -3
drivers/scsi/snic/snic_stats.h
··· 42 42 atomic64_t drv_tmo; /* Abort Driver Timeouts */ 43 43 atomic64_t fw_tmo; /* Abort Firmware Timeouts */ 44 44 atomic64_t io_not_found;/* Abort IO Not Found */ 45 + atomic64_t q_fail; /* Abort Queuing Failed */ 45 46 }; 46 47 47 48 struct snic_reset_stats { ··· 70 69 struct snic_misc_stats { 71 70 u64 last_isr_time; 72 71 u64 last_ack_time; 73 - atomic64_t isr_cnt; 72 + atomic64_t ack_isr_cnt; 73 + atomic64_t cmpl_isr_cnt; 74 + atomic64_t errnotify_isr_cnt; 74 75 atomic64_t max_cq_ents; /* Max CQ Entries */ 75 76 atomic64_t data_cnt_mismat; /* Data Count Mismatch */ 76 77 atomic64_t io_tmo; ··· 84 81 atomic64_t no_icmnd_itmf_cmpls; 85 82 atomic64_t io_under_run; 86 83 atomic64_t qfull; 84 + atomic64_t qsz_rampup; 85 + atomic64_t qsz_rampdown; 86 + atomic64_t last_qsz; 87 87 atomic64_t tgt_not_rdy; 88 88 }; 89 89 ··· 107 101 snic_stats_update_active_ios(struct snic_stats *s_stats) 108 102 { 109 103 struct snic_io_stats *io = &s_stats->io; 110 - u32 nr_active_ios; 104 + int nr_active_ios; 111 105 112 - nr_active_ios = atomic64_inc_return(&io->active); 106 + nr_active_ios = atomic64_read(&io->active); 113 107 if (atomic64_read(&io->max_active) < nr_active_ios) 114 108 atomic64_set(&io->max_active, nr_active_ios); 115 109
+30 -14
drivers/scsi/snic/vnic_dev.c
··· 263 263 int wait) 264 264 { 265 265 struct devcmd2_controller *dc2c = vdev->devcmd2; 266 - struct devcmd2_result *result = dc2c->result + dc2c->next_result; 266 + struct devcmd2_result *result = NULL; 267 267 unsigned int i; 268 268 int delay; 269 269 int err; 270 270 u32 posted; 271 + u32 fetch_idx; 271 272 u32 new_posted; 273 + u8 color; 274 + 275 + fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index); 276 + if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */ 277 + /* Hardware surprise removal: return error */ 278 + return -ENODEV; 279 + } 272 280 273 281 posted = ioread32(&dc2c->wq_ctrl->posted_index); 274 282 ··· 286 278 } 287 279 288 280 new_posted = (posted + 1) % DEVCMD2_RING_SIZE; 281 + if (new_posted == fetch_idx) { 282 + pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n", 283 + pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted); 284 + 285 + return -EBUSY; 286 + } 287 + 289 288 dc2c->cmd_ring[posted].cmd = cmd; 290 289 dc2c->cmd_ring[posted].flags = 0; 291 290 ··· 314 299 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) 315 300 return 0; 316 301 302 + result = dc2c->result + dc2c->next_result; 303 + color = dc2c->color; 304 + 305 + /* 306 + * Increment next_result, after posting the devcmd, irrespective of 307 + * devcmd result, and it should be done only once. 308 + */ 309 + dc2c->next_result++; 310 + if (dc2c->next_result == dc2c->result_size) { 311 + dc2c->next_result = 0; 312 + dc2c->color = dc2c->color ? 0 : 1; 313 + } 314 + 317 315 for (delay = 0; delay < wait; delay++) { 318 316 udelay(100); 319 - if (result->color == dc2c->color) { 320 - dc2c->next_result++; 321 - if (dc2c->next_result == dc2c->result_size) { 322 - dc2c->next_result = 0; 323 - dc2c->color = dc2c->color ? 0 : 1; 324 - } 317 + if (result->color == color) { 325 318 if (result->error) { 326 319 err = (int) result->error; 327 320 if (err != ERR_ECMDUNKNOWN || ··· 340 317 return err; 341 318 } 342 319 if (_CMD_DIR(cmd) & _CMD_DIR_READ) { 343 - /* 344 - * Adding the rmb() prevents the compiler 345 - * and/or CPU from reordering the reads which 346 - * would potentially result in reading stale 347 - * values. 348 - */ 349 - rmb(); 350 320 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 351 321 vdev->args[i] = result->results[i]; 352 322 }
+6 -3
drivers/scsi/st.c
··· 1974 1974 transfer = (int)cmdstatp->uremainder64; 1975 1975 else 1976 1976 transfer = 0; 1977 - if (STp->block_size == 0 && 1978 - cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) 1979 - transfer = bytes; 1977 + if (cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) { 1978 + if (STp->block_size == 0) 1979 + transfer = bytes; 1980 + /* Some drives set ILI with MEDIUM ERROR */ 1981 + cmdstatp->flags &= ~SENSE_ILI; 1982 + } 1980 1983 1981 1984 if (cmdstatp->flags & SENSE_ILI) { /* ILI */ 1982 1985 if (STp->block_size == 0 &&
+12 -35
drivers/scsi/sun3_scsi.c
··· 36 36 #include <scsi/scsi_host.h> 37 37 #include "sun3_scsi.h" 38 38 39 - /* Definitions for the core NCR5380 driver. */ 40 - 41 - #define REAL_DMA 42 - /* #define SUPPORT_TAGS */ 43 39 /* minimum number of bytes to do dma on */ 44 40 #define DMA_MIN_SIZE 129 45 41 46 - /* #define MAX_TAGS 32 */ 42 + /* Definitions for the core NCR5380 driver. */ 47 43 48 44 #define NCR5380_implementation_fields /* none */ 49 45 ··· 51 55 #define NCR5380_abort sun3scsi_abort 52 56 #define NCR5380_info sun3scsi_info 53 57 54 - #define NCR5380_dma_read_setup(instance, data, count) \ 55 - sun3scsi_dma_setup(instance, data, count, 0) 56 - #define NCR5380_dma_write_setup(instance, data, count) \ 57 - sun3scsi_dma_setup(instance, data, count, 1) 58 + #define NCR5380_dma_recv_setup(instance, data, count) (count) 59 + #define NCR5380_dma_send_setup(instance, data, count) (count) 58 60 #define NCR5380_dma_residual(instance) \ 59 61 sun3scsi_dma_residual(instance) 60 62 #define NCR5380_dma_xfer_len(instance, cmd, phase) \ 61 - sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO)) 63 + sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd) 62 64 63 65 #define NCR5380_acquire_dma_irq(instance) (1) 64 66 #define NCR5380_release_dma_irq(instance) ··· 72 78 module_param(setup_cmd_per_lun, int, 0); 73 79 static int setup_sg_tablesize = -1; 74 80 module_param(setup_sg_tablesize, int, 0); 75 - #ifdef SUPPORT_TAGS 76 - static int setup_use_tagged_queuing = -1; 77 - module_param(setup_use_tagged_queuing, int, 0); 78 - #endif 79 81 static int setup_hostid = -1; 80 82 module_param(setup_hostid, int, 0); 81 83 ··· 253 263 return last_residual; 254 264 } 255 265 256 - static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, 257 - struct scsi_cmnd *cmd, 258 - int write_flag) 266 + static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len, 267 + struct scsi_cmnd *cmd) 259 268 { 260 - if (cmd->request->cmd_type == REQ_TYPE_FS) 261 - return wanted; 262 - else 269 + if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS) 263 270 return 0; 271 + 272 + return wanted_len; 264 273 } 265 274 266 275 static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) ··· 397 408 398 409 } 399 410 400 - #include "atari_NCR5380.c" 411 + #include "NCR5380.c" 401 412 402 413 #ifdef SUN3_SCSI_VME 403 414 #define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI" ··· 505 516 instance->io_port = (unsigned long)ioaddr; 506 517 instance->irq = irq->start; 507 518 508 - #ifdef SUPPORT_TAGS 509 - host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; 510 - #endif 511 - 512 519 error = NCR5380_init(instance, host_flags); 513 520 if (error) 514 521 goto fail_init; ··· 512 527 error = request_irq(instance->irq, scsi_sun3_intr, 0, 513 528 "NCR5380", instance); 514 529 if (error) { 515 - #ifdef REAL_DMA 516 530 pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n", 517 531 instance->host_no, instance->irq); 518 532 goto fail_irq; 519 - #else 520 - pr_warn(PFX "scsi%d: IRQ %d not free, interrupts disabled\n", 521 - instance->host_no, instance->irq); 522 - instance->irq = NO_IRQ; 523 - #endif 524 533 } 525 534 526 535 dregs->csr = 0; ··· 544 565 return 0; 545 566 546 567 fail_host: 547 - if (instance->irq != NO_IRQ) 548 - free_irq(instance->irq, instance); 568 + free_irq(instance->irq, instance); 549 569 fail_irq: 550 570 NCR5380_exit(instance); 551 571 fail_init: ··· 561 583 struct Scsi_Host *instance = platform_get_drvdata(pdev); 562 584 563 585 scsi_remove_host(instance); 564 - if (instance->irq != NO_IRQ) 565 - free_irq(instance->irq, instance); 586 + free_irq(instance->irq, instance); 566 587 NCR5380_exit(instance); 567 588 scsi_host_put(instance); 568 589 if (udc_regs)
+7 -12
drivers/scsi/t128.c
··· 1 - #define PSEUDO_DMA 2 - 3 1 /* 4 2 * Trantor T128/T128F/T228 driver 5 3 * Note : architecturally, the T100 and T130 are different and won't ··· 74 76 75 77 #include <scsi/scsi_host.h> 76 78 #include "t128.h" 77 - #define AUTOPROBE_IRQ 78 79 #include "NCR5380.h" 79 80 80 81 static struct override { ··· 207 210 instance->base = base; 208 211 ((struct NCR5380_hostdata *)instance->hostdata)->base = p; 209 212 210 - if (NCR5380_init(instance, 0)) 213 + if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP)) 211 214 goto out_unregister; 212 215 213 216 NCR5380_maybe_reset_bus(instance); ··· 291 294 } 292 295 293 296 /* 294 - * Function : int NCR5380_pread (struct Scsi_Host *instance, 297 + * Function : int t128_pread (struct Scsi_Host *instance, 295 298 * unsigned char *dst, int len) 296 299 * 297 300 * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to ··· 303 306 * timeout. 304 307 */ 305 308 306 - static inline int 307 - NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) 309 + static inline int t128_pread(struct Scsi_Host *instance, 310 + unsigned char *dst, int len) 308 311 { 309 312 struct NCR5380_hostdata *hostdata = shost_priv(instance); 310 313 void __iomem *reg, *base = hostdata->base; ··· 337 340 } 338 341 339 342 /* 340 - * Function : int NCR5380_pwrite (struct Scsi_Host *instance, 343 + * Function : int t128_pwrite (struct Scsi_Host *instance, 341 344 * unsigned char *src, int len) 342 345 * 343 346 * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from ··· 349 352 * timeout. 350 353 */ 351 354 352 - static inline int 353 - NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) 355 + static inline int t128_pwrite(struct Scsi_Host *instance, 356 + unsigned char *src, int len) 354 357 { 355 358 struct NCR5380_hostdata *hostdata = shost_priv(instance); 356 359 void __iomem *reg, *base = hostdata->base; ··· 391 394 .detect = t128_detect, 392 395 .release = t128_release, 393 396 .proc_name = "t128", 394 - .show_info = t128_show_info, 395 - .write_info = t128_write_info, 396 397 .info = t128_info, 397 398 .queuecommand = t128_queue_command, 398 399 .eh_abort_handler = t128_abort,
+5 -2
drivers/scsi/t128.h
··· 77 77 #define NCR5380_write(reg, value) writeb((value),(T128_address(reg))) 78 78 79 79 #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) 80 + #define NCR5380_dma_recv_setup t128_pread 81 + #define NCR5380_dma_send_setup t128_pwrite 82 + #define NCR5380_dma_residual(instance) (0) 80 83 81 84 #define NCR5380_intr t128_intr 82 85 #define NCR5380_queue_command t128_queue_command 83 86 #define NCR5380_abort t128_abort 84 87 #define NCR5380_bus_reset t128_bus_reset 85 88 #define NCR5380_info t128_info 86 - #define NCR5380_show_info t128_show_info 87 - #define NCR5380_write_info t128_write_info 89 + 90 + #define NCR5380_io_delay(x) udelay(x) 88 91 89 92 /* 15 14 12 10 7 5 3 90 93 1101 0100 1010 1000 */
+1 -1
drivers/usb/storage/scsiglue.c
··· 563 563 .target_alloc = target_alloc, 564 564 565 565 /* lots of sg segments can be handled */ 566 - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, 566 + .sg_tablesize = SG_MAX_SEGMENTS, 567 567 568 568 /* limit the total size of a transfer to 120 KB */ 569 569 .max_sectors = 240,
+25
include/linux/scatterlist.h
··· 286 286 #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) 287 287 288 288 /* 289 + * The maximum number of SG segments that we will put inside a 290 + * scatterlist (unless chaining is used). Should ideally fit inside a 291 + * single page, to avoid a higher order allocation. We could define this 292 + * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The 293 + * minimum value is 32 294 + */ 295 + #define SG_CHUNK_SIZE 128 296 + 297 + /* 298 + * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit 299 + * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. 300 + */ 301 + #ifdef CONFIG_ARCH_HAS_SG_CHAIN 302 + #define SG_MAX_SEGMENTS 2048 303 + #else 304 + #define SG_MAX_SEGMENTS SG_CHUNK_SIZE 305 + #endif 306 + 307 + #ifdef CONFIG_SG_POOL 308 + void sg_free_table_chained(struct sg_table *table, bool first_chunk); 309 + int sg_alloc_table_chained(struct sg_table *table, int nents, 310 + struct scatterlist *first_chunk); 311 + #endif 312 + 313 + /* 289 314 * sg page iterator 290 315 * 291 316 * Iterates over sg entries page-by-page. On each successful iteration,
-19
include/scsi/scsi.h
··· 18 18 }; 19 19 20 20 /* 21 - * The maximum number of SG segments that we will put inside a 22 - * scatterlist (unless chaining is used). Should ideally fit inside a 23 - * single page, to avoid a higher order allocation. We could define this 24 - * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The 25 - * minimum value is 32 26 - */ 27 - #define SCSI_MAX_SG_SEGMENTS 128 28 - 29 - /* 30 - * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit 31 - * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. 32 - */ 33 - #ifdef CONFIG_ARCH_HAS_SG_CHAIN 34 - #define SCSI_MAX_SG_CHAIN_SEGMENTS 2048 35 - #else 36 - #define SCSI_MAX_SG_CHAIN_SEGMENTS SCSI_MAX_SG_SEGMENTS 37 - #endif 38 - 39 - /* 40 21 * DIX-capable adapters effectively support infinite chaining for the 41 22 * protection information scatterlist 42 23 */
+11 -3
include/scsi/scsi_device.h
··· 50 50 SDEV_CREATED_BLOCK, /* same as above but for created devices */ 51 51 }; 52 52 53 + enum scsi_scan_mode { 54 + SCSI_SCAN_INITIAL = 0, 55 + SCSI_SCAN_RESCAN, 56 + SCSI_SCAN_MANUAL, 57 + }; 58 + 53 59 enum scsi_device_event { 54 60 SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */ 55 61 SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */ ··· 248 242 enum scsi_target_state { 249 243 STARGET_CREATED = 1, 250 244 STARGET_RUNNING, 245 + STARGET_REMOVE, 251 246 STARGET_DEL, 252 247 }; 253 248 ··· 398 391 extern void scsi_target_quiesce(struct scsi_target *); 399 392 extern void scsi_target_resume(struct scsi_target *); 400 393 extern void scsi_scan_target(struct device *parent, unsigned int channel, 401 - unsigned int id, u64 lun, int rescan); 394 + unsigned int id, u64 lun, 395 + enum scsi_scan_mode rescan); 402 396 extern void scsi_target_reap(struct scsi_target *); 403 397 extern void scsi_target_block(struct device *); 404 398 extern void scsi_target_unblock(struct device *, enum scsi_device_state); ··· 542 534 /* 543 535 * Although VPD inquiries can go to SCSI-2 type devices, 544 536 * some USB ones crash on receiving them, and the pages 545 - * we currently ask for are for SPC-3 and beyond 537 + * we currently ask for are mandatory for SPC-2 and beyond 546 538 */ 547 - if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages) 539 + if (sdev->scsi_level >= SCSI_SPC_2 && !sdev->skip_vpd_pages) 548 540 return 1; 549 541 return 0; 550 542 }
+1 -1
include/scsi/scsi_host.h
··· 37 37 * used in one scatter-gather request. 38 38 */ 39 39 #define SG_NONE 0 40 - #define SG_ALL SCSI_MAX_SG_SEGMENTS 40 + #define SG_ALL SG_CHUNK_SIZE 41 41 42 42 #define MODE_UNKNOWN 0x00 43 43 #define MODE_INITIATOR 0x01
+9
include/scsi/scsi_proto.h
··· 115 115 #define VERIFY_16 0x8f 116 116 #define SYNCHRONIZE_CACHE_16 0x91 117 117 #define WRITE_SAME_16 0x93 118 + #define ZBC_OUT 0x94 119 + #define ZBC_IN 0x95 118 120 #define SERVICE_ACTION_BIDIRECTIONAL 0x9d 119 121 #define SERVICE_ACTION_IN_16 0x9e 120 122 #define SERVICE_ACTION_OUT_16 0x9f ··· 145 143 #define MO_SET_PRIORITY 0x0e 146 144 #define MO_SET_TIMESTAMP 0x0f 147 145 #define MO_MANAGEMENT_PROTOCOL_OUT 0x10 146 + /* values for ZBC_IN */ 147 + #define ZI_REPORT_ZONES 0x00 148 + /* values for ZBC_OUT */ 149 + #define ZO_CLOSE_ZONE 0x01 150 + #define ZO_FINISH_ZONE 0x02 151 + #define ZO_OPEN_ZONE 0x03 152 + #define ZO_RESET_WRITE_POINTER 0x04 148 153 /* values for variable length command */ 149 154 #define XDREAD_32 0x03 150 155 #define XDWRITE_32 0x04
+2 -4
include/trace/events/scsi.h
··· 94 94 scsi_opcode_name(WRITE_16), \ 95 95 scsi_opcode_name(VERIFY_16), \ 96 96 scsi_opcode_name(WRITE_SAME_16), \ 97 + scsi_opcode_name(ZBC_OUT), \ 98 + scsi_opcode_name(ZBC_IN), \ 97 99 scsi_opcode_name(SERVICE_ACTION_IN_16), \ 98 - scsi_opcode_name(SAI_READ_CAPACITY_16), \ 99 - scsi_opcode_name(SAI_GET_LBA_STATUS), \ 100 - scsi_opcode_name(MI_REPORT_TARGET_PGS), \ 101 - scsi_opcode_name(MO_SET_TARGET_PGS), \ 102 100 scsi_opcode_name(READ_32), \ 103 101 scsi_opcode_name(WRITE_32), \ 104 102 scsi_opcode_name(WRITE_SAME_32), \
+7
lib/Kconfig
··· 523 523 a scatterlist. This should be selected by a driver or an API which 524 524 whishes to split a scatterlist amongst multiple DMA channels. 525 525 526 + config SG_POOL 527 + def_bool n 528 + help 529 + Provides a helper to allocate chained scatterlists. This should be 530 + selected by a driver or an API which whishes to allocate chained 531 + scatterlist. 532 + 526 533 # 527 534 # sg chaining option 528 535 #
+1
lib/Makefile
··· 178 178 obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o 179 179 180 180 obj-$(CONFIG_SG_SPLIT) += sg_split.o 181 + obj-$(CONFIG_SG_POOL) += sg_pool.o 181 182 obj-$(CONFIG_STMP_DEVICE) += stmp_device.o 182 183 obj-$(CONFIG_IRQ_POLL) += irq_poll.o 183 184
+172
lib/sg_pool.c
··· 1 + #include <linux/module.h> 2 + #include <linux/scatterlist.h> 3 + #include <linux/mempool.h> 4 + #include <linux/slab.h> 5 + 6 + #define SG_MEMPOOL_NR ARRAY_SIZE(sg_pools) 7 + #define SG_MEMPOOL_SIZE 2 8 + 9 + struct sg_pool { 10 + size_t size; 11 + char *name; 12 + struct kmem_cache *slab; 13 + mempool_t *pool; 14 + }; 15 + 16 + #define SP(x) { .size = x, "sgpool-" __stringify(x) } 17 + #if (SG_CHUNK_SIZE < 32) 18 + #error SG_CHUNK_SIZE is too small (must be 32 or greater) 19 + #endif 20 + static struct sg_pool sg_pools[] = { 21 + SP(8), 22 + SP(16), 23 + #if (SG_CHUNK_SIZE > 32) 24 + SP(32), 25 + #if (SG_CHUNK_SIZE > 64) 26 + SP(64), 27 + #if (SG_CHUNK_SIZE > 128) 28 + SP(128), 29 + #if (SG_CHUNK_SIZE > 256) 30 + #error SG_CHUNK_SIZE is too large (256 MAX) 31 + #endif 32 + #endif 33 + #endif 34 + #endif 35 + SP(SG_CHUNK_SIZE) 36 + }; 37 + #undef SP 38 + 39 + static inline unsigned int sg_pool_index(unsigned short nents) 40 + { 41 + unsigned int index; 42 + 43 + BUG_ON(nents > SG_CHUNK_SIZE); 44 + 45 + if (nents <= 8) 46 + index = 0; 47 + else 48 + index = get_count_order(nents) - 3; 49 + 50 + return index; 51 + } 52 + 53 + static void sg_pool_free(struct scatterlist *sgl, unsigned int nents) 54 + { 55 + struct sg_pool *sgp; 56 + 57 + sgp = sg_pools + sg_pool_index(nents); 58 + mempool_free(sgl, sgp->pool); 59 + } 60 + 61 + static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask) 62 + { 63 + struct sg_pool *sgp; 64 + 65 + sgp = sg_pools + sg_pool_index(nents); 66 + return mempool_alloc(sgp->pool, gfp_mask); 67 + } 68 + 69 + /** 70 + * sg_free_table_chained - Free a previously mapped sg table 71 + * @table: The sg table header to use 72 + * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained? 73 + * 74 + * Description: 75 + * Free an sg table previously allocated and setup with 76 + * sg_alloc_table_chained(). 77 + * 78 + **/ 79 + void sg_free_table_chained(struct sg_table *table, bool first_chunk) 80 + { 81 + if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE) 82 + return; 83 + __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free); 84 + } 85 + EXPORT_SYMBOL_GPL(sg_free_table_chained); 86 + 87 + /** 88 + * sg_alloc_table_chained - Allocate and chain SGLs in an sg table 89 + * @table: The sg table header to use 90 + * @nents: Number of entries in sg list 91 + * @first_chunk: first SGL 92 + * 93 + * Description: 94 + * Allocate and chain SGLs in an sg table. If @nents@ is larger than 95 + * SG_CHUNK_SIZE a chained sg table will be setup. 96 + * 97 + **/ 98 + int sg_alloc_table_chained(struct sg_table *table, int nents, 99 + struct scatterlist *first_chunk) 100 + { 101 + int ret; 102 + 103 + BUG_ON(!nents); 104 + 105 + if (first_chunk) { 106 + if (nents <= SG_CHUNK_SIZE) { 107 + table->nents = table->orig_nents = nents; 108 + sg_init_table(table->sgl, nents); 109 + return 0; 110 + } 111 + } 112 + 113 + ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE, 114 + first_chunk, GFP_ATOMIC, sg_pool_alloc); 115 + if (unlikely(ret)) 116 + sg_free_table_chained(table, (bool)first_chunk); 117 + return ret; 118 + } 119 + EXPORT_SYMBOL_GPL(sg_alloc_table_chained); 120 + 121 + static __init int sg_pool_init(void) 122 + { 123 + int i; 124 + 125 + for (i = 0; i < SG_MEMPOOL_NR; i++) { 126 + struct sg_pool *sgp = sg_pools + i; 127 + int size = sgp->size * sizeof(struct scatterlist); 128 + 129 + sgp->slab = kmem_cache_create(sgp->name, size, 0, 130 + SLAB_HWCACHE_ALIGN, NULL); 131 + if (!sgp->slab) { 132 + printk(KERN_ERR "SG_POOL: can't init sg slab %s\n", 133 + sgp->name); 134 + goto cleanup_sdb; 135 + } 136 + 137 + sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 138 + sgp->slab); 139 + if (!sgp->pool) { 140 + printk(KERN_ERR "SG_POOL: can't init sg mempool %s\n", 141 + sgp->name); 142 + goto cleanup_sdb; 143 + } 144 + } 145 + 146 + return 0; 147 + 148 + cleanup_sdb: 149 + for (i = 0; i < SG_MEMPOOL_NR; i++) { 150 + struct sg_pool *sgp = sg_pools + i; 151 + if (sgp->pool) 152 + mempool_destroy(sgp->pool); 153 + if (sgp->slab) 154 + kmem_cache_destroy(sgp->slab); 155 + } 156 + 157 + return -ENOMEM; 158 + } 159 + 160 + static __exit void sg_pool_exit(void) 161 + { 162 + int i; 163 + 164 + for (i = 0; i < SG_MEMPOOL_NR; i++) { 165 + struct sg_pool *sgp = sg_pools + i; 166 + mempool_destroy(sgp->pool); 167 + kmem_cache_destroy(sgp->slab); 168 + } 169 + } 170 + 171 + module_init(sg_pool_init); 172 + module_exit(sg_pool_exit);