Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (59 commits)
[SCSI] replace __FUNCTION__ with __func__
[SCSI] extend the last_sector_bug flag to cover more sectors
[SCSI] qla2xxx: Update version number to 8.02.01-k6.
[SCSI] qla2xxx: Additional NPIV corrections.
[SCSI] qla2xxx: suppress uninitialized-var warning
[SCSI] qla2xxx: use memory_read_from_buffer()
[SCSI] qla2xxx: Issue proper ISP callbacks during stop-firmware.
[SCSI] ch: fix ch_remove oops
[SCSI] 3w-9xxx: add MSI support and misc fixes
[SCSI] scsi_lib: use blk_rq_tagged in scsi_request_fn
[SCSI] ibmvfc: Update driver version to 1.0.1
[SCSI] ibmvfc: Add ADISC support
[SCSI] ibmvfc: Miscellaneous fixes
[SCSI] ibmvfc: Fix hang on module removal
[SCSI] ibmvfc: Target refcounting fixes
[SCSI] ibmvfc: Reduce unnecessary log noise
[SCSI] sym53c8xx: free luntbl in sym_hcb_free
[SCSI] scsi_scan.c: Release mutex in error handling code
[SCSI] scsi_eh_prep_cmnd should save scmd->underflow
[SCSI] sd: Support for SCSI disk (SBC) Data Integrity Field
...

+3961 -1044
+13
drivers/md/dm-mpath.c
··· 147 147 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) 148 148 { 149 149 struct pgpath *pgpath, *tmp; 150 + struct multipath *m = ti->private; 150 151 151 152 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { 152 153 list_del(&pgpath->list); 154 + if (m->hw_handler_name) 155 + scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); 153 156 dm_put_device(ti, pgpath->path.dev); 154 157 free_pgpath(pgpath); 155 158 } ··· 551 548 { 552 549 int r; 553 550 struct pgpath *p; 551 + struct multipath *m = ti->private; 554 552 555 553 /* we need at least a path arg */ 556 554 if (as->argc < 1) { ··· 568 564 if (r) { 569 565 ti->error = "error getting device"; 570 566 goto bad; 567 + } 568 + 569 + if (m->hw_handler_name) { 570 + r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev), 571 + m->hw_handler_name); 572 + if (r < 0) { 573 + dm_put_device(ti, p->path.dev); 574 + goto bad; 575 + } 571 576 } 572 577 573 578 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
+12 -12
drivers/message/fusion/mptbase.c
··· 273 273 ioc_raw_state = mpt_GetIocState(ioc, 0); 274 274 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { 275 275 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n", 276 - ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); 276 + ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); 277 277 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n", 278 - ioc->name, __FUNCTION__); 278 + ioc->name, __func__); 279 279 rc = mpt_HardResetHandler(ioc, CAN_SLEEP); 280 280 printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name, 281 - __FUNCTION__, (rc == 0) ? "success" : "failed"); 281 + __func__, (rc == 0) ? "success" : "failed"); 282 282 ioc_raw_state = mpt_GetIocState(ioc, 0); 283 283 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) 284 284 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " ··· 356 356 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS || 357 357 MptCallbacks[cb_idx] == NULL) { 358 358 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n", 359 - __FUNCTION__, ioc->name, cb_idx); 359 + __func__, ioc->name, cb_idx); 360 360 goto out; 361 361 } 362 362 ··· 420 420 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS || 421 421 MptCallbacks[cb_idx] == NULL) { 422 422 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n", 423 - __FUNCTION__, ioc->name, cb_idx); 423 + __func__, ioc->name, cb_idx); 424 424 freeme = 0; 425 425 goto out; 426 426 } ··· 2434 2434 2435 2435 if (ioc->cached_fw != NULL) { 2436 2436 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto " 2437 - "adapter\n", __FUNCTION__, ioc->name)); 2437 + "adapter\n", __func__, ioc->name)); 2438 2438 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *) 2439 2439 ioc->cached_fw, CAN_SLEEP)) < 0) { 2440 2440 printk(MYIOC_s_WARN_FMT ··· 3693 3693 3694 3694 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) { 3695 3695 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset " 3696 - "address=%p\n", ioc->name, __FUNCTION__, 3696 + "address=%p\n", ioc->name, __func__, 3697 3697 &ioc->chip->Doorbell, &ioc->chip->Reset_1078)); 3698 3698 CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07); 3699 3699 if (sleepFlag == CAN_SLEEP) ··· 4742 4742 break; 4743 4743 } 4744 4744 4745 - printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode); 4745 + printk("%s: persist_opcode=%x\n",__func__, persist_opcode); 4746 4746 4747 4747 /* Get a MF for this command. 4748 4748 */ 4749 4749 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 4750 - printk("%s: no msg frames!\n",__FUNCTION__); 4750 + printk("%s: no msg frames!\n",__func__); 4751 4751 return -1; 4752 4752 } 4753 4753 ··· 4771 4771 (SasIoUnitControlReply_t *)ioc->persist_reply_frame; 4772 4772 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) { 4773 4773 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 4774 - __FUNCTION__, 4774 + __func__, 4775 4775 sasIoUnitCntrReply->IOCStatus, 4776 4776 sasIoUnitCntrReply->IOCLogInfo); 4777 4777 return -1; 4778 4778 } 4779 4779 4780 - printk("%s: success\n",__FUNCTION__); 4780 + printk("%s: success\n",__func__); 4781 4781 return 0; 4782 4782 } 4783 4783 ··· 5784 5784 5785 5785 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5786 5786 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 5787 - ioc->name,__FUNCTION__)); 5787 + ioc->name,__func__)); 5788 5788 return -1; 5789 5789 } 5790 5790
+2 -2
drivers/message/fusion/mptctl.c
··· 505 505 event = le32_to_cpu(pEvReply->Event) & 0xFF; 506 506 507 507 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n", 508 - ioc->name, __FUNCTION__)); 508 + ioc->name, __func__)); 509 509 if(async_queue == NULL) 510 510 return 1; 511 511 ··· 2482 2482 */ 2483 2483 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { 2484 2484 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 2485 - ioc->name,__FUNCTION__)); 2485 + ioc->name,__func__)); 2486 2486 goto out; 2487 2487 } 2488 2488
+4 -4
drivers/message/fusion/mptfc.c
··· 231 231 mptfc_abort(struct scsi_cmnd *SCpnt) 232 232 { 233 233 return 234 - mptfc_block_error_handler(SCpnt, mptscsih_abort, __FUNCTION__); 234 + mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__); 235 235 } 236 236 237 237 static int 238 238 mptfc_dev_reset(struct scsi_cmnd *SCpnt) 239 239 { 240 240 return 241 - mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __FUNCTION__); 241 + mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__); 242 242 } 243 243 244 244 static int 245 245 mptfc_bus_reset(struct scsi_cmnd *SCpnt) 246 246 { 247 247 return 248 - mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __FUNCTION__); 248 + mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__); 249 249 } 250 250 251 251 static int 252 252 mptfc_host_reset(struct scsi_cmnd *SCpnt) 253 253 { 254 254 return 255 - mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __FUNCTION__); 255 + mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__); 256 256 } 257 257 258 258 static void
+13 -13
drivers/message/fusion/mptlan.c
··· 610 610 611 611 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", 612 612 IOC_AND_NETDEV_NAMES_s_s(dev), 613 - __FUNCTION__, sent)); 613 + __func__, sent)); 614 614 615 615 priv->SendCtl[ctx].skb = NULL; 616 616 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, ··· 676 676 677 677 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", 678 678 IOC_AND_NETDEV_NAMES_s_s(dev), 679 - __FUNCTION__, sent)); 679 + __func__, sent)); 680 680 681 681 priv->SendCtl[ctx].skb = NULL; 682 682 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, ··· 715 715 u16 cur_naa = 0x1000; 716 716 717 717 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n", 718 - __FUNCTION__, skb)); 718 + __func__, skb)); 719 719 720 720 spin_lock_irqsave(&priv->txfidx_lock, flags); 721 721 if (priv->mpt_txfidx_tail < 0) { ··· 723 723 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 724 724 725 725 printk (KERN_ERR "%s: no tx context available: %u\n", 726 - __FUNCTION__, priv->mpt_txfidx_tail); 726 + __func__, priv->mpt_txfidx_tail); 727 727 return 1; 728 728 } 729 729 ··· 733 733 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 734 734 735 735 printk (KERN_ERR "%s: Unable to alloc request frame\n", 736 - __FUNCTION__); 736 + __func__); 737 737 return 1; 738 738 } 739 739 ··· 1208 1208 1209 1209 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n", 1210 1210 IOC_AND_NETDEV_NAMES_s_s(dev), 1211 - __FUNCTION__, buckets, curr)); 1211 + __func__, buckets, curr)); 1212 1212 1213 1213 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) / 1214 1214 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t)); ··· 1217 1217 mf = mpt_get_msg_frame(LanCtx, mpt_dev); 1218 1218 if (mf == NULL) { 1219 1219 printk (KERN_ERR "%s: Unable to alloc request frame\n", 1220 - __FUNCTION__); 1220 + __func__); 1221 1221 dioprintk((KERN_ERR "%s: %u buckets remaining\n", 1222 - __FUNCTION__, buckets)); 1222 + __func__, buckets)); 1223 1223 goto out; 1224 1224 } 1225 1225 pRecvReq = (LANReceivePostRequest_t *) mf; ··· 1244 1244 spin_lock_irqsave(&priv->rxfidx_lock, flags); 1245 1245 if (priv->mpt_rxfidx_tail < 0) { 1246 1246 printk (KERN_ERR "%s: Can't alloc context\n", 1247 - __FUNCTION__); 1247 + __func__); 1248 1248 spin_unlock_irqrestore(&priv->rxfidx_lock, 1249 1249 flags); 1250 1250 break; ··· 1267 1267 if (skb == NULL) { 1268 1268 printk (KERN_WARNING 1269 1269 MYNAM "/%s: Can't alloc skb\n", 1270 - __FUNCTION__); 1270 + __func__); 1271 1271 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 1272 1272 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 1273 1273 break; ··· 1305 1305 1306 1306 if (pSimple == NULL) { 1307 1307 /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n", 1308 - /**/ __FUNCTION__); 1308 + /**/ __func__); 1309 1309 mpt_free_msg_frame(mpt_dev, mf); 1310 1310 goto out; 1311 1311 } ··· 1329 1329 1330 1330 out: 1331 1331 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n", 1332 - __FUNCTION__, buckets, atomic_read(&priv->buckets_out))); 1332 + __func__, buckets, atomic_read(&priv->buckets_out))); 1333 1333 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n", 1334 - __FUNCTION__, priv->total_posted, priv->total_received)); 1334 + __func__, priv->total_posted, priv->total_received)); 1335 1335 1336 1336 clear_bit(0, &priv->post_buckets_active); 1337 1337 }
+27 -27
drivers/message/fusion/mptsas.c
··· 300 300 phy_info = port_info->phy_info; 301 301 302 302 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d " 303 - "bitmask=0x%016llX\n", ioc->name, __FUNCTION__, port_details, 303 + "bitmask=0x%016llX\n", ioc->name, __func__, port_details, 304 304 port_details->num_phys, (unsigned long long) 305 305 port_details->phy_bitmask)); 306 306 ··· 411 411 */ 412 412 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT 413 413 "%s: [%p]: deleting phy = %d\n", 414 - ioc->name, __FUNCTION__, port_details, i)); 414 + ioc->name, __func__, port_details, i)); 415 415 port_details->num_phys--; 416 416 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); 417 417 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); ··· 497 497 continue; 498 498 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT 499 499 "%s: [%p]: phy_id=%02d num_phys=%02d " 500 - "bitmask=0x%016llX\n", ioc->name, __FUNCTION__, 500 + "bitmask=0x%016llX\n", ioc->name, __func__, 501 501 port_details, i, port_details->num_phys, 502 502 (unsigned long long)port_details->phy_bitmask)); 503 503 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n", ··· 553 553 554 554 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { 555 555 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n", 556 - ioc->name,__FUNCTION__, __LINE__)); 556 + ioc->name,__func__, __LINE__)); 557 557 return 0; 558 558 } 559 559 ··· 606 606 GFP_ATOMIC); 607 607 if (!target_reset_list) { 608 608 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", 609 - ioc->name,__FUNCTION__, __LINE__)); 609 + ioc->name,__func__, __LINE__)); 610 610 return; 611 611 } 612 612 ··· 673 673 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 674 674 if (!ev) { 675 675 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", 676 - ioc->name,__FUNCTION__, __LINE__)); 676 + ioc->name,__func__, __LINE__)); 677 677 return; 678 678 } 679 679 ··· 1183 1183 reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply; 1184 1184 if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) { 1185 1185 printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 1186 - ioc->name, __FUNCTION__, reply->IOCStatus, reply->IOCLogInfo); 1186 + ioc->name, __func__, reply->IOCStatus, reply->IOCLogInfo); 1187 1187 error = -ENXIO; 1188 1188 goto out_unlock; 1189 1189 } ··· 1270 1270 1271 1271 if (!rsp) { 1272 1272 printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n", 1273 - ioc->name, __FUNCTION__); 1273 + ioc->name, __func__); 1274 1274 return -EINVAL; 1275 1275 } 1276 1276 1277 1277 /* do we need to support multiple segments? */ 1278 1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1279 1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 1280 - ioc->name, __FUNCTION__, req->bio->bi_vcnt, req->data_len, 1280 + ioc->name, __func__, req->bio->bi_vcnt, req->data_len, 1281 1281 rsp->bio->bi_vcnt, rsp->data_len); 1282 1282 return -EINVAL; 1283 1283 } ··· 1343 1343 1344 1344 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); 1345 1345 if (!timeleft) { 1346 - printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __FUNCTION__); 1346 + printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__); 1347 1347 /* On timeout reset the board */ 1348 1348 mpt_HardResetHandler(ioc, CAN_SLEEP); 1349 1349 ret = -ETIMEDOUT; ··· 1361 1361 rsp->data_len -= smprep->ResponseDataLength; 1362 1362 } else { 1363 1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 1364 - ioc->name, __FUNCTION__); 1364 + ioc->name, __func__); 1365 1365 ret = -ENXIO; 1366 1366 } 1367 1367 unmap: ··· 2006 2006 if (error) { 2007 2007 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2008 2008 "%s: exit at line=%d\n", ioc->name, 2009 - __FUNCTION__, __LINE__)); 2009 + __func__, __LINE__)); 2010 2010 goto out; 2011 2011 } 2012 2012 mptsas_set_port(ioc, phy_info, port); ··· 2076 2076 if (!rphy) { 2077 2077 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2078 2078 "%s: exit at line=%d\n", ioc->name, 2079 - __FUNCTION__, __LINE__)); 2079 + __func__, __LINE__)); 2080 2080 goto out; 2081 2081 } 2082 2082 ··· 2085 2085 if (error) { 2086 2086 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2087 2087 "%s: exit at line=%d\n", ioc->name, 2088 - __FUNCTION__, __LINE__)); 2088 + __func__, __LINE__)); 2089 2089 sas_rphy_free(rphy); 2090 2090 goto out; 2091 2091 } ··· 2613 2613 (ev->channel << 8) + ev->id)) { 2614 2614 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2615 2615 "%s: exit at line=%d\n", ioc->name, 2616 - __FUNCTION__, __LINE__)); 2616 + __func__, __LINE__)); 2617 2617 break; 2618 2618 } 2619 2619 phy_info = mptsas_find_phyinfo_by_sas_address( ··· 2633 2633 if (!phy_info){ 2634 2634 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2635 2635 "%s: exit at line=%d\n", ioc->name, 2636 - __FUNCTION__, __LINE__)); 2636 + __func__, __LINE__)); 2637 2637 break; 2638 2638 } 2639 2639 if (!phy_info->port_details) { 2640 2640 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2641 2641 "%s: exit at line=%d\n", ioc->name, 2642 - __FUNCTION__, __LINE__)); 2642 + __func__, __LINE__)); 2643 2643 break; 2644 2644 } 2645 2645 rphy = mptsas_get_rphy(phy_info); 2646 2646 if (!rphy) { 2647 2647 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2648 2648 "%s: exit at line=%d\n", ioc->name, 2649 - __FUNCTION__, __LINE__)); 2649 + __func__, __LINE__)); 2650 2650 break; 2651 2651 } 2652 2652 ··· 2654 2654 if (!port) { 2655 2655 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2656 2656 "%s: exit at line=%d\n", ioc->name, 2657 - __FUNCTION__, __LINE__)); 2657 + __func__, __LINE__)); 2658 2658 break; 2659 2659 } 2660 2660 ··· 2665 2665 if (!vtarget) { 2666 2666 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2667 2667 "%s: exit at line=%d\n", ioc->name, 2668 - __FUNCTION__, __LINE__)); 2668 + __func__, __LINE__)); 2669 2669 break; 2670 2670 } 2671 2671 ··· 2720 2720 (ev->channel << 8) + ev->id)) { 2721 2721 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2722 2722 "%s: exit at line=%d\n", ioc->name, 2723 - __FUNCTION__, __LINE__)); 2723 + __func__, __LINE__)); 2724 2724 break; 2725 2725 } 2726 2726 ··· 2732 2732 if (!phy_info || !phy_info->port_details) { 2733 2733 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2734 2734 "%s: exit at line=%d\n", ioc->name, 2735 - __FUNCTION__, __LINE__)); 2735 + __func__, __LINE__)); 2736 2736 break; 2737 2737 } 2738 2738 ··· 2744 2744 if (!vtarget) { 2745 2745 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2746 2746 "%s: exit at line=%d\n", ioc->name, 2747 - __FUNCTION__, __LINE__)); 2747 + __func__, __LINE__)); 2748 2748 break; 2749 2749 } 2750 2750 /* ··· 2767 2767 if (mptsas_get_rphy(phy_info)) { 2768 2768 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2769 2769 "%s: exit at line=%d\n", ioc->name, 2770 - __FUNCTION__, __LINE__)); 2770 + __func__, __LINE__)); 2771 2771 if (ev->channel) printk("%d\n", __LINE__); 2772 2772 break; 2773 2773 } ··· 2776 2776 if (!port) { 2777 2777 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2778 2778 "%s: exit at line=%d\n", ioc->name, 2779 - __FUNCTION__, __LINE__)); 2779 + __func__, __LINE__)); 2780 2780 break; 2781 2781 } 2782 2782 memcpy(&phy_info->attached, &sas_device, ··· 2801 2801 if (!rphy) { 2802 2802 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2803 2803 "%s: exit at line=%d\n", ioc->name, 2804 - __FUNCTION__, __LINE__)); 2804 + __func__, __LINE__)); 2805 2805 break; /* non-fatal: an rphy can be added later */ 2806 2806 } 2807 2807 ··· 2809 2809 if (sas_rphy_add(rphy)) { 2810 2810 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2811 2811 "%s: exit at line=%d\n", ioc->name, 2812 - __FUNCTION__, __LINE__)); 2812 + __func__, __LINE__)); 2813 2813 sas_rphy_free(rphy); 2814 2814 break; 2815 2815 }
+2 -2
drivers/message/fusion/mptscsih.c
··· 461 461 462 462 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 463 463 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n", 464 - ioc->name,__FUNCTION__)); 464 + ioc->name,__func__)); 465 465 return; 466 466 } 467 467 ··· 2187 2187 (ioc->debug_level & MPT_DEBUG_TM )) 2188 2188 printk("%s: ha=%d [%d:%d:0] task_type=0x%02X " 2189 2189 "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X " 2190 - "term_cmnds=%d\n", __FUNCTION__, ioc->id, pScsiTmReply->Bus, 2190 + "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus, 2191 2191 pScsiTmReply->TargetID, pScsiTmReq->TaskType, 2192 2192 le16_to_cpu(pScsiTmReply->IOCStatus), 2193 2193 le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
+30 -10
drivers/scsi/3w-9xxx.c
··· 4 4 Written By: Adam Radford <linuxraid@amcc.com> 5 5 Modifications By: Tom Couch <linuxraid@amcc.com> 6 6 7 - Copyright (C) 2004-2007 Applied Micro Circuits Corporation. 7 + Copyright (C) 2004-2008 Applied Micro Circuits Corporation. 8 8 9 9 This program is free software; you can redistribute it and/or modify 10 10 it under the terms of the GNU General Public License as published by ··· 71 71 Add support for 9650SE controllers. 72 72 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails. 73 73 2.26.02.010 - Add support for 9690SA controllers. 74 + 2.26.02.011 - Increase max AENs drained to 256. 75 + Add MSI support and "use_msi" module parameter. 76 + Fix bug in twa_get_param() on 4GB+. 77 + Use pci_resource_len() for ioremap(). 74 78 */ 75 79 76 80 #include <linux/module.h> ··· 99 95 #include "3w-9xxx.h" 100 96 101 97 /* Globals */ 102 - #define TW_DRIVER_VERSION "2.26.02.010" 98 + #define TW_DRIVER_VERSION "2.26.02.011" 103 99 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 104 100 static unsigned int twa_device_extension_count; 105 101 static int twa_major = -1; ··· 110 106 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver"); 111 107 MODULE_LICENSE("GPL"); 112 108 MODULE_VERSION(TW_DRIVER_VERSION); 109 + 110 + static int use_msi = 0; 111 + module_param(use_msi, int, S_IRUGO); 112 + MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); 113 113 114 114 /* Function prototypes */ 115 115 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); ··· 1046 1038 TW_Command_Full *full_command_packet; 1047 1039 TW_Command *command_packet; 1048 1040 TW_Param_Apache *param; 1049 - unsigned long param_value; 1050 1041 void *retval = NULL; 1051 1042 1052 1043 /* Setup the command packet */ ··· 1064 1057 param->table_id = cpu_to_le16(table_id | 0x8000); 1065 1058 param->parameter_id = cpu_to_le16(parameter_id); 1066 1059 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); 1067 - param_value = tw_dev->generic_buffer_phys[request_id]; 1068 1060 1069 - command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(param_value); 1061 + command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 1070 1062 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); 1071 1063 1072 1064 /* Post the command packet to the board */ ··· 2006 2000 { 2007 2001 struct Scsi_Host *host = NULL; 2008 2002 TW_Device_Extension *tw_dev; 2009 - u32 mem_addr; 2003 + unsigned long mem_addr, mem_len; 2010 2004 int retval = -ENODEV; 2011 2005 2012 2006 retval = pci_enable_device(pdev); ··· 2051 2045 goto out_free_device_extension; 2052 2046 } 2053 2047 2054 - if (pdev->device == PCI_DEVICE_ID_3WARE_9000) 2048 + if (pdev->device == PCI_DEVICE_ID_3WARE_9000) { 2055 2049 mem_addr = pci_resource_start(pdev, 1); 2056 - else 2050 + mem_len = pci_resource_len(pdev, 1); 2051 + } else { 2057 2052 mem_addr = pci_resource_start(pdev, 2); 2053 + mem_len = pci_resource_len(pdev, 2); 2054 + } 2058 2055 2059 2056 /* Save base address */ 2060 - tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE); 2057 + tw_dev->base_addr = ioremap(mem_addr, mem_len); 2061 2058 if (!tw_dev->base_addr) { 2062 2059 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); 2063 2060 goto out_release_mem_region; ··· 2095 2086 2096 2087 pci_set_drvdata(pdev, host); 2097 2088 2098 - printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%x, IRQ: %d.\n", 2089 + printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n", 2099 2090 host->host_no, mem_addr, pdev->irq); 2100 2091 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", 2101 2092 host->host_no, ··· 2105 2096 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), 2106 2097 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, 2107 2098 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH))); 2099 + 2100 + /* Try to enable MSI */ 2101 + if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) && 2102 + !pci_enable_msi(pdev)) 2103 + set_bit(TW_USING_MSI, &tw_dev->flags); 2108 2104 2109 2105 /* Now setup the interrupt handler */ 2110 2106 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); ··· 2134 2120 return 0; 2135 2121 2136 2122 out_remove_host: 2123 + if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2124 + pci_disable_msi(pdev); 2137 2125 scsi_remove_host(host); 2138 2126 out_iounmap: 2139 2127 iounmap(tw_dev->base_addr); ··· 2166 2150 2167 2151 /* Shutdown the card */ 2168 2152 __twa_shutdown(tw_dev); 2153 + 2154 + /* Disable MSI if enabled */ 2155 + if (test_bit(TW_USING_MSI, &tw_dev->flags)) 2156 + pci_disable_msi(pdev); 2169 2157 2170 2158 /* Free IO remapping */ 2171 2159 iounmap(tw_dev->base_addr);
+5 -4
drivers/scsi/3w-9xxx.h
··· 4 4 Written By: Adam Radford <linuxraid@amcc.com> 5 5 Modifications By: Tom Couch <linuxraid@amcc.com> 6 6 7 - Copyright (C) 2004-2007 Applied Micro Circuits Corporation. 7 + Copyright (C) 2004-2008 Applied Micro Circuits Corporation. 8 8 9 9 This program is free software; you can redistribute it and/or modify 10 10 it under the terms of the GNU General Public License as published by ··· 319 319 320 320 /* Compatibility defines */ 321 321 #define TW_9000_ARCH_ID 0x5 322 - #define TW_CURRENT_DRIVER_SRL 30 323 - #define TW_CURRENT_DRIVER_BUILD 80 322 + #define TW_CURRENT_DRIVER_SRL 35 323 + #define TW_CURRENT_DRIVER_BUILD 0 324 324 #define TW_CURRENT_DRIVER_BRANCH 0 325 325 326 326 /* Phase defines */ ··· 352 352 #define TW_MAX_RESET_TRIES 2 353 353 #define TW_MAX_CMDS_PER_LUN 254 354 354 #define TW_MAX_RESPONSE_DRAIN 256 355 - #define TW_MAX_AEN_DRAIN 40 355 + #define TW_MAX_AEN_DRAIN 255 356 356 #define TW_IN_RESET 2 357 + #define TW_USING_MSI 3 357 358 #define TW_IN_ATTENTION_LOOP 4 358 359 #define TW_MAX_SECTORS 256 359 360 #define TW_AEN_WAIT_TIME 1000
+1
drivers/scsi/Kconfig
··· 63 63 config BLK_DEV_SD 64 64 tristate "SCSI disk support" 65 65 depends on SCSI 66 + select CRC_T10DIF 66 67 ---help--- 67 68 If you want to use SCSI hard disks, Fibre Channel disks, 68 69 Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
+2
drivers/scsi/Makefile
··· 151 151 scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o 152 152 153 153 sd_mod-objs := sd.o 154 + sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o 155 + 154 156 sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o 155 157 ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ 156 158 := -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \
+1 -1
drivers/scsi/advansys.c
··· 2278 2278 #define ASC_DBG(lvl, format, arg...) { \ 2279 2279 if (asc_dbglvl >= (lvl)) \ 2280 2280 printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \ 2281 - __FUNCTION__ , ## arg); \ 2281 + __func__ , ## arg); \ 2282 2282 } 2283 2283 2284 2284 #define ASC_DBG_PRT_SCSI_HOST(lvl, s) \
+6 -6
drivers/scsi/aha152x.c
··· 288 288 #define DO_LOCK(flags) \ 289 289 do { \ 290 290 if(spin_is_locked(&QLOCK)) { \ 291 - DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \ 291 + DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \ 292 292 } \ 293 - DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \ 293 + DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \ 294 294 spin_lock_irqsave(&QLOCK,flags); \ 295 - DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \ 296 - QLOCKER=__FUNCTION__; \ 295 + DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \ 296 + QLOCKER=__func__; \ 297 297 QLOCKERL=__LINE__; \ 298 298 } while(0) 299 299 300 300 #define DO_UNLOCK(flags) \ 301 301 do { \ 302 - DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \ 302 + DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \ 303 303 spin_unlock_irqrestore(&QLOCK,flags); \ 304 - DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \ 304 + DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \ 305 305 QLOCKER="(not locked)"; \ 306 306 QLOCKERL=0; \ 307 307 } while(0)
+2 -2
drivers/scsi/aic94xx/aic94xx.h
··· 39 39 40 40 #ifdef ASD_ENTER_EXIT 41 41 #define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \ 42 - __FUNCTION__) 42 + __func__) 43 43 #define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \ 44 - __FUNCTION__) 44 + __func__) 45 45 #else 46 46 #define ENTER 47 47 #define EXIT
+1 -1
drivers/scsi/aic94xx/aic94xx_hwi.c
··· 1359 1359 struct asd_ascb *ascb_list; 1360 1360 1361 1361 if (!phy_mask) { 1362 - asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__); 1362 + asd_printk("%s called with phy_mask of 0!?\n", __func__); 1363 1363 return 0; 1364 1364 } 1365 1365
+23 -23
drivers/scsi/aic94xx/aic94xx_scb.c
··· 211 211 phy->asd_port = port; 212 212 } 213 213 ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n", 214 - __FUNCTION__, phy->asd_port->phy_mask, sas_phy->id); 214 + __func__, phy->asd_port->phy_mask, sas_phy->id); 215 215 asd_update_port_links(asd_ha, phy); 216 216 spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags); 217 217 } ··· 294 294 struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num, 295 295 GFP_ATOMIC); 296 296 if (!cp) { 297 - asd_printk("%s: out of memory\n", __FUNCTION__); 297 + asd_printk("%s: out of memory\n", __func__); 298 298 goto out; 299 299 } 300 300 ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n", ··· 446 446 struct domain_device *failed_dev = NULL; 447 447 448 448 ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n", 449 - __FUNCTION__, dl->status_block[3]); 449 + __func__, dl->status_block[3]); 450 450 451 451 /* 452 452 * Find the task that caused the abort and abort it first. ··· 474 474 475 475 if (!failed_dev) { 476 476 ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n", 477 - __FUNCTION__, tc_abort); 477 + __func__, tc_abort); 478 478 goto out; 479 479 } 480 480 ··· 502 502 conn_handle = *((u16*)(&dl->status_block[1])); 503 503 conn_handle = le16_to_cpu(conn_handle); 504 504 505 - ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__, 505 + ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __func__, 506 506 dl->status_block[3]); 507 507 508 508 /* Find the last pending task for the device... */ ··· 522 522 523 523 if (!last_dev_task) { 524 524 ASD_DPRINTK("%s: Device reset for idle device %d?\n", 525 - __FUNCTION__, conn_handle); 525 + __func__, conn_handle); 526 526 goto out; 527 527 } 528 528 ··· 549 549 goto out; 550 550 } 551 551 case SIGNAL_NCQ_ERROR: 552 - ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__); 552 + ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __func__); 553 553 goto out; 554 554 case CLEAR_NCQ_ERROR: 555 - ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__); 555 + ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __func__); 556 556 goto out; 557 557 } 558 558 ··· 560 560 561 561 switch (sb_opcode) { 562 562 case BYTES_DMAED: 563 - ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id); 563 + ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __func__, phy_id); 564 564 asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id); 565 565 break; 566 566 case PRIMITIVE_RECVD: 567 - ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__, 567 + ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __func__, 568 568 phy_id); 569 569 asd_primitive_rcvd_tasklet(ascb, dl, phy_id); 570 570 break; 571 571 case PHY_EVENT: 572 - ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id); 572 + ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __func__, phy_id); 573 573 asd_phy_event_tasklet(ascb, dl); 574 574 break; 575 575 case LINK_RESET_ERROR: 576 - ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__, 576 + ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __func__, 577 577 phy_id); 578 578 asd_link_reset_err_tasklet(ascb, dl, phy_id); 579 579 break; 580 580 case TIMER_EVENT: 581 581 ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n", 582 - __FUNCTION__, phy_id); 582 + __func__, phy_id); 583 583 asd_turn_led(asd_ha, phy_id, 0); 584 584 /* the device is gone */ 585 585 sas_phy_disconnected(sas_phy); ··· 587 587 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT); 588 588 break; 589 589 default: 590 - ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__, 590 + ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__, 591 591 phy_id, sb_opcode); 592 592 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n", 593 593 edb, dl->opcode); ··· 654 654 655 655 if (status != 0) { 656 656 ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n", 657 - __FUNCTION__, phy_id, status); 657 + __func__, phy_id, status); 658 658 goto out; 659 659 } 660 660 ··· 663 663 asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id); 664 664 asd_turn_led(asd_ha, phy_id, 0); 665 665 asd_control_led(asd_ha, phy_id, 0); 666 - ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id); 666 + ASD_DPRINTK("%s: disable phy%d\n", __func__, phy_id); 667 667 break; 668 668 669 669 case ENABLE_PHY: ··· 673 673 get_lrate_mode(phy, oob_mode); 674 674 asd_turn_led(asd_ha, phy_id, 1); 675 675 ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n", 676 - __FUNCTION__, phy_id,phy->sas_phy.linkrate, 676 + __func__, phy_id,phy->sas_phy.linkrate, 677 677 phy->sas_phy.iproto); 678 678 } else if (oob_status & CURRENT_SPINUP_HOLD) { 679 679 asd_ha->hw_prof.enabled_phys |= (1 << phy_id); 680 680 asd_turn_led(asd_ha, phy_id, 1); 681 - ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__, 681 + ASD_DPRINTK("%s: phy%d, spinup hold\n", __func__, 682 682 phy_id); 683 683 } else if (oob_status & CURRENT_ERR_MASK) { 684 684 asd_turn_led(asd_ha, phy_id, 0); 685 685 ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n", 686 - __FUNCTION__, phy_id, oob_status); 686 + __func__, phy_id, oob_status); 687 687 } else if (oob_status & (CURRENT_HOT_PLUG_CNCT 688 688 | CURRENT_DEVICE_PRESENT)) { 689 689 asd_ha->hw_prof.enabled_phys |= (1 << phy_id); 690 690 asd_turn_led(asd_ha, phy_id, 1); 691 691 ASD_DPRINTK("%s: phy%d: hot plug or device present\n", 692 - __FUNCTION__, phy_id); 692 + __func__, phy_id); 693 693 } else { 694 694 asd_ha->hw_prof.enabled_phys |= (1 << phy_id); 695 695 asd_turn_led(asd_ha, phy_id, 0); 696 696 ASD_DPRINTK("%s: phy%d: no device present: " 697 697 "oob_status:0x%x\n", 698 - __FUNCTION__, phy_id, oob_status); 698 + __func__, phy_id, oob_status); 699 699 } 700 700 break; 701 701 case RELEASE_SPINUP_HOLD: 702 702 case PHY_NO_OP: 703 703 case EXECUTE_HARD_RESET: 704 - ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__, 704 + ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __func__, 705 705 phy_id, control_phy->sub_func); 706 706 /* XXX finish */ 707 707 break; 708 708 default: 709 - ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__, 709 + ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __func__, 710 710 phy_id, control_phy->sub_func); 711 711 break; 712 712 }
+1 -1
drivers/scsi/aic94xx/aic94xx_task.c
··· 320 320 case TC_RESUME: 321 321 case TC_PARTIAL_SG_LIST: 322 322 default: 323 - ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode); 323 + ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode); 324 324 break; 325 325 } 326 326
+9 -9
drivers/scsi/aic94xx/aic94xx_tmf.c
··· 75 75 struct done_list_struct *dl) 76 76 { 77 77 struct tasklet_completion_status *tcs = ascb->uldd_task; 78 - ASD_DPRINTK("%s: here\n", __FUNCTION__); 78 + ASD_DPRINTK("%s: here\n", __func__); 79 79 if (!del_timer(&ascb->timer)) { 80 - ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); 80 + ASD_DPRINTK("%s: couldn't delete timer\n", __func__); 81 81 return; 82 82 } 83 - ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); 83 + ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode); 84 84 tcs->dl_opcode = dl->opcode; 85 85 complete(ascb->completion); 86 86 asd_ascb_free(ascb); ··· 91 91 struct asd_ascb *ascb = (void *)data; 92 92 struct tasklet_completion_status *tcs = ascb->uldd_task; 93 93 94 - ASD_DPRINTK("%s: here\n", __FUNCTION__); 94 + ASD_DPRINTK("%s: here\n", __func__); 95 95 tcs->dl_opcode = TMF_RESP_FUNC_FAILED; 96 96 complete(ascb->completion); 97 97 } ··· 103 103 DECLARE_COMPLETION_ONSTACK(completion); \ 104 104 DECLARE_TCS(tcs); \ 105 105 \ 106 - ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ 106 + ASD_DPRINTK("%s: PRE\n", __func__); \ 107 107 res = 1; \ 108 108 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ 109 109 if (!ascb) \ ··· 115 115 scb->header.opcode = CLEAR_NEXUS 116 116 117 117 #define CLEAR_NEXUS_POST \ 118 - ASD_DPRINTK("%s: POST\n", __FUNCTION__); \ 118 + ASD_DPRINTK("%s: POST\n", __func__); \ 119 119 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \ 120 120 asd_clear_nexus_timedout); \ 121 121 if (res) \ 122 122 goto out_err; \ 123 - ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ 123 + ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \ 124 124 wait_for_completion(&completion); \ 125 125 res = tcs.dl_opcode; \ 126 126 if (res == TC_NO_ERROR) \ ··· 417 417 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 418 418 spin_unlock_irqrestore(&task->task_state_lock, flags); 419 419 res = TMF_RESP_FUNC_COMPLETE; 420 - ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task); 420 + ASD_DPRINTK("%s: task 0x%p done\n", __func__, task); 421 421 goto out_done; 422 422 } 423 423 spin_unlock_irqrestore(&task->task_state_lock, flags); ··· 481 481 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 482 482 spin_unlock_irqrestore(&task->task_state_lock, flags); 483 483 res = TMF_RESP_FUNC_COMPLETE; 484 - ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task); 484 + ASD_DPRINTK("%s: task 0x%p done\n", __func__, task); 485 485 goto out_done; 486 486 } 487 487 spin_unlock_irqrestore(&task->task_state_lock, flags);
+2 -2
drivers/scsi/arm/fas216.c
··· 240 240 panic("scsi memory space corrupted in %s", func); 241 241 } 242 242 } 243 - #define fas216_checkmagic(info) __fas216_checkmagic((info), __FUNCTION__) 243 + #define fas216_checkmagic(info) __fas216_checkmagic((info), __func__) 244 244 #else 245 245 #define fas216_checkmagic(info) 246 246 #endif ··· 2658 2658 fas216_checkmagic(info); 2659 2659 2660 2660 printk("scsi%d.%c: %s: resetting host\n", 2661 - info->host->host_no, '0' + SCpnt->device->id, __FUNCTION__); 2661 + info->host->host_no, '0' + SCpnt->device->id, __func__); 2662 2662 2663 2663 /* 2664 2664 * Reset the SCSI chip.
+1
drivers/scsi/ch.c
··· 930 930 if (init) 931 931 ch_init_elem(ch); 932 932 933 + dev_set_drvdata(dev, ch); 933 934 sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name); 934 935 935 936 return 0;
+8
drivers/scsi/device_handler/Kconfig
··· 30 30 depends on SCSI_DH 31 31 help 32 32 If you have a EMC CLARiiON select y. Otherwise, say N. 33 + 34 + config SCSI_DH_ALUA 35 + tristate "SPC-3 ALUA Device Handler (EXPERIMENTAL)" 36 + depends on SCSI_DH && EXPERIMENTAL 37 + help 38 + SCSI Device handler for generic SPC-3 Asymmetric Logical Unit 39 + Access (ALUA). 40 +
+1
drivers/scsi/device_handler/Makefile
··· 5 5 obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o 6 6 obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o 7 7 obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o 8 + obj-$(CONFIG_SCSI_DH_ALUA) += scsi_dh_alua.o
+415 -31
drivers/scsi/device_handler/scsi_dh.c
··· 24 24 #include <scsi/scsi_dh.h> 25 25 #include "../scsi_priv.h" 26 26 27 + struct scsi_dh_devinfo_list { 28 + struct list_head node; 29 + char vendor[9]; 30 + char model[17]; 31 + struct scsi_device_handler *handler; 32 + }; 33 + 27 34 static DEFINE_SPINLOCK(list_lock); 28 35 static LIST_HEAD(scsi_dh_list); 36 + static LIST_HEAD(scsi_dh_dev_list); 29 37 30 38 static struct scsi_device_handler *get_device_handler(const char *name) 31 39 { ··· 41 33 42 34 spin_lock(&list_lock); 43 35 list_for_each_entry(tmp, &scsi_dh_list, list) { 44 - if (!strcmp(tmp->name, name)) { 36 + if (!strncmp(tmp->name, name, strlen(tmp->name))) { 45 37 found = tmp; 46 38 break; 47 39 } ··· 50 42 return found; 51 43 } 52 44 45 + 46 + static struct scsi_device_handler * 47 + scsi_dh_cache_lookup(struct scsi_device *sdev) 48 + { 49 + struct scsi_dh_devinfo_list *tmp; 50 + struct scsi_device_handler *found_dh = NULL; 51 + 52 + spin_lock(&list_lock); 53 + list_for_each_entry(tmp, &scsi_dh_dev_list, node) { 54 + if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) && 55 + !strncmp(sdev->model, tmp->model, strlen(tmp->model))) { 56 + found_dh = tmp->handler; 57 + break; 58 + } 59 + } 60 + spin_unlock(&list_lock); 61 + 62 + return found_dh; 63 + } 64 + 65 + static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh, 66 + struct scsi_device *sdev) 67 + { 68 + int i, found = 0; 69 + 70 + for(i = 0; scsi_dh->devlist[i].vendor; i++) { 71 + if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor, 72 + strlen(scsi_dh->devlist[i].vendor)) && 73 + !strncmp(sdev->model, scsi_dh->devlist[i].model, 74 + strlen(scsi_dh->devlist[i].model))) { 75 + found = 1; 76 + break; 77 + } 78 + } 79 + return found; 80 + } 81 + 82 + /* 83 + * device_handler_match - Attach a device handler to a device 84 + * @scsi_dh - The device handler to match against or NULL 85 + * @sdev - SCSI device to be tested against @scsi_dh 86 + * 87 + * Tests @sdev against the device handler @scsi_dh or against 88 + * all registered device_handler if @scsi_dh == NULL. 89 + * Returns the found device handler or NULL if not found. 90 + */ 91 + static struct scsi_device_handler * 92 + device_handler_match(struct scsi_device_handler *scsi_dh, 93 + struct scsi_device *sdev) 94 + { 95 + struct scsi_device_handler *found_dh = NULL; 96 + struct scsi_dh_devinfo_list *tmp; 97 + 98 + found_dh = scsi_dh_cache_lookup(sdev); 99 + if (found_dh) 100 + return found_dh; 101 + 102 + if (scsi_dh) { 103 + if (scsi_dh_handler_lookup(scsi_dh, sdev)) 104 + found_dh = scsi_dh; 105 + } else { 106 + struct scsi_device_handler *tmp_dh; 107 + 108 + spin_lock(&list_lock); 109 + list_for_each_entry(tmp_dh, &scsi_dh_list, list) { 110 + if (scsi_dh_handler_lookup(tmp_dh, sdev)) 111 + found_dh = tmp_dh; 112 + } 113 + spin_unlock(&list_lock); 114 + } 115 + 116 + if (found_dh) { /* If device is found, add it to the cache */ 117 + tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); 118 + if (tmp) { 119 + strncpy(tmp->vendor, sdev->vendor, 8); 120 + strncpy(tmp->model, sdev->model, 16); 121 + tmp->vendor[8] = '\0'; 122 + tmp->model[16] = '\0'; 123 + tmp->handler = found_dh; 124 + spin_lock(&list_lock); 125 + list_add(&tmp->node, &scsi_dh_dev_list); 126 + spin_unlock(&list_lock); 127 + } else { 128 + found_dh = NULL; 129 + } 130 + } 131 + 132 + return found_dh; 133 + } 134 + 135 + /* 136 + * scsi_dh_handler_attach - Attach a device handler to a device 137 + * @sdev - SCSI device the device handler should attach to 138 + * @scsi_dh - The device handler to attach 139 + */ 140 + static int scsi_dh_handler_attach(struct scsi_device *sdev, 141 + struct scsi_device_handler *scsi_dh) 142 + { 143 + int err = 0; 144 + 145 + if (sdev->scsi_dh_data) { 146 + if (sdev->scsi_dh_data->scsi_dh != scsi_dh) 147 + err = -EBUSY; 148 + } else if (scsi_dh->attach) 149 + err = scsi_dh->attach(sdev); 150 + 151 + return err; 152 + } 153 + 154 + /* 155 + * scsi_dh_handler_detach - Detach a device handler from a device 156 + * @sdev - SCSI device the device handler should be detached from 157 + * @scsi_dh - Device handler to be detached 158 + * 159 + * Detach from a device handler. If a device handler is specified, 160 + * only detach if the currently attached handler matches @scsi_dh. 161 + */ 162 + static void scsi_dh_handler_detach(struct scsi_device *sdev, 163 + struct scsi_device_handler *scsi_dh) 164 + { 165 + if (!sdev->scsi_dh_data) 166 + return; 167 + 168 + if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh) 169 + return; 170 + 171 + if (!scsi_dh) 172 + scsi_dh = sdev->scsi_dh_data->scsi_dh; 173 + 174 + if (scsi_dh && scsi_dh->detach) 175 + scsi_dh->detach(sdev); 176 + } 177 + 178 + /* 179 + * Functions for sysfs attribute 'dh_state' 180 + */ 181 + static ssize_t 182 + store_dh_state(struct device *dev, struct device_attribute *attr, 183 + const char *buf, size_t count) 184 + { 185 + struct scsi_device *sdev = to_scsi_device(dev); 186 + struct scsi_device_handler *scsi_dh; 187 + int err = -EINVAL; 188 + 189 + if (!sdev->scsi_dh_data) { 190 + /* 191 + * Attach to a device handler 192 + */ 193 + if (!(scsi_dh = get_device_handler(buf))) 194 + return err; 195 + err = scsi_dh_handler_attach(sdev, scsi_dh); 196 + } else { 197 + scsi_dh = sdev->scsi_dh_data->scsi_dh; 198 + if (!strncmp(buf, "detach", 6)) { 199 + /* 200 + * Detach from a device handler 201 + */ 202 + scsi_dh_handler_detach(sdev, scsi_dh); 203 + err = 0; 204 + } else if (!strncmp(buf, "activate", 8)) { 205 + /* 206 + * Activate a device handler 207 + */ 208 + if (scsi_dh->activate) 209 + err = scsi_dh->activate(sdev); 210 + else 211 + err = 0; 212 + } 213 + } 214 + 215 + return err<0?err:count; 216 + } 217 + 218 + static ssize_t 219 + show_dh_state(struct device *dev, struct device_attribute *attr, char *buf) 220 + { 221 + struct scsi_device *sdev = to_scsi_device(dev); 222 + 223 + if (!sdev->scsi_dh_data) 224 + return snprintf(buf, 20, "detached\n"); 225 + 226 + return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name); 227 + } 228 + 229 + static struct device_attribute scsi_dh_state_attr = 230 + __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state, 231 + store_dh_state); 232 + 233 + /* 234 + * scsi_dh_sysfs_attr_add - Callback for scsi_init_dh 235 + */ 236 + static int scsi_dh_sysfs_attr_add(struct device *dev, void *data) 237 + { 238 + struct scsi_device *sdev; 239 + int err; 240 + 241 + if (!scsi_is_sdev_device(dev)) 242 + return 0; 243 + 244 + sdev = to_scsi_device(dev); 245 + 246 + err = device_create_file(&sdev->sdev_gendev, 247 + &scsi_dh_state_attr); 248 + 249 + return 0; 250 + } 251 + 252 + /* 253 + * scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh 254 + */ 255 + static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data) 256 + { 257 + struct scsi_device *sdev; 258 + 259 + if (!scsi_is_sdev_device(dev)) 260 + return 0; 261 + 262 + sdev = to_scsi_device(dev); 263 + 264 + device_remove_file(&sdev->sdev_gendev, 265 + &scsi_dh_state_attr); 266 + 267 + return 0; 268 + } 269 + 270 + /* 271 + * scsi_dh_notifier - notifier chain callback 272 + */ 273 + static int scsi_dh_notifier(struct notifier_block *nb, 274 + unsigned long action, void *data) 275 + { 276 + struct device *dev = data; 277 + struct scsi_device *sdev; 278 + int err = 0; 279 + struct scsi_device_handler *devinfo = NULL; 280 + 281 + if (!scsi_is_sdev_device(dev)) 282 + return 0; 283 + 284 + sdev = to_scsi_device(dev); 285 + 286 + if (action == BUS_NOTIFY_ADD_DEVICE) { 287 + devinfo = device_handler_match(NULL, sdev); 288 + if (!devinfo) 289 + goto out; 290 + 291 + err = scsi_dh_handler_attach(sdev, devinfo); 292 + if (!err) 293 + err = device_create_file(dev, &scsi_dh_state_attr); 294 + } else if (action == BUS_NOTIFY_DEL_DEVICE) { 295 + device_remove_file(dev, &scsi_dh_state_attr); 296 + scsi_dh_handler_detach(sdev, NULL); 297 + } 298 + out: 299 + return err; 300 + } 301 + 302 + /* 303 + * scsi_dh_notifier_add - Callback for scsi_register_device_handler 304 + */ 53 305 static int scsi_dh_notifier_add(struct device *dev, void *data) 54 306 { 55 307 struct scsi_device_handler *scsi_dh = data; 308 + struct scsi_device *sdev; 56 309 57 - scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev); 310 + if (!scsi_is_sdev_device(dev)) 311 + return 0; 312 + 313 + if (!get_device(dev)) 314 + return 0; 315 + 316 + sdev = to_scsi_device(dev); 317 + 318 + if (device_handler_match(scsi_dh, sdev)) 319 + scsi_dh_handler_attach(sdev, scsi_dh); 320 + 321 + put_device(dev); 322 + 323 + return 0; 324 + } 325 + 326 + /* 327 + * scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler 328 + */ 329 + static int scsi_dh_notifier_remove(struct device *dev, void *data) 330 + { 331 + struct scsi_device_handler *scsi_dh = data; 332 + struct scsi_device *sdev; 333 + 334 + if (!scsi_is_sdev_device(dev)) 335 + return 0; 336 + 337 + if (!get_device(dev)) 338 + return 0; 339 + 340 + sdev = to_scsi_device(dev); 341 + 342 + scsi_dh_handler_detach(sdev, scsi_dh); 343 + 344 + put_device(dev); 345 + 58 346 return 0; 59 347 } 60 348 ··· 363 59 */ 364 60 int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) 365 61 { 366 - int ret = -EBUSY; 367 - struct scsi_device_handler *tmp; 62 + if (get_device_handler(scsi_dh->name)) 63 + return -EBUSY; 368 64 369 - tmp = get_device_handler(scsi_dh->name); 370 - if (tmp) 371 - goto done; 372 - 373 - ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb); 374 - 375 - bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add); 376 65 spin_lock(&list_lock); 377 66 list_add(&scsi_dh->list, &scsi_dh_list); 378 67 spin_unlock(&list_lock); 68 + bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add); 69 + printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name); 379 70 380 - done: 381 - return ret; 71 + return SCSI_DH_OK; 382 72 } 383 73 EXPORT_SYMBOL_GPL(scsi_register_device_handler); 384 - 385 - static int scsi_dh_notifier_remove(struct device *dev, void *data) 386 - { 387 - struct scsi_device_handler *scsi_dh = data; 388 - 389 - scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev); 390 - return 0; 391 - } 392 74 393 75 /* 394 76 * scsi_unregister_device_handler - register a device handler personality ··· 385 95 */ 386 96 int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) 387 97 { 388 - int ret = -ENODEV; 389 - struct scsi_device_handler *tmp; 98 + struct scsi_dh_devinfo_list *tmp, *pos; 390 99 391 - tmp = get_device_handler(scsi_dh->name); 392 - if (!tmp) 393 - goto done; 394 - 395 - ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb); 100 + if (!get_device_handler(scsi_dh->name)) 101 + return -ENODEV; 396 102 397 103 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, 398 - scsi_dh_notifier_remove); 104 + scsi_dh_notifier_remove); 105 + 399 106 spin_lock(&list_lock); 400 107 list_del(&scsi_dh->list); 108 + list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) { 109 + if (pos->handler == scsi_dh) { 110 + list_del(&pos->node); 111 + kfree(pos); 112 + } 113 + } 401 114 spin_unlock(&list_lock); 115 + printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name); 402 116 403 - done: 404 - return ret; 117 + return SCSI_DH_OK; 405 118 } 406 119 EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); 407 120 ··· 449 156 return (get_device_handler(name) != NULL); 450 157 } 451 158 EXPORT_SYMBOL_GPL(scsi_dh_handler_exist); 159 + 160 + /* 161 + * scsi_dh_handler_attach - Attach device handler 162 + * @sdev - sdev the handler should be attached to 163 + * @name - name of the handler to attach 164 + */ 165 + int scsi_dh_attach(struct request_queue *q, const char *name) 166 + { 167 + unsigned long flags; 168 + struct scsi_device *sdev; 169 + struct scsi_device_handler *scsi_dh; 170 + int err = 0; 171 + 172 + scsi_dh = get_device_handler(name); 173 + if (!scsi_dh) 174 + return -EINVAL; 175 + 176 + spin_lock_irqsave(q->queue_lock, flags); 177 + sdev = q->queuedata; 178 + if (!sdev || !get_device(&sdev->sdev_gendev)) 179 + err = -ENODEV; 180 + spin_unlock_irqrestore(q->queue_lock, flags); 181 + 182 + if (!err) { 183 + err = scsi_dh_handler_attach(sdev, scsi_dh); 184 + 185 + put_device(&sdev->sdev_gendev); 186 + } 187 + return err; 188 + } 189 + EXPORT_SYMBOL_GPL(scsi_dh_attach); 190 + 191 + /* 192 + * scsi_dh_handler_detach - Detach device handler 193 + * @sdev - sdev the handler should be detached from 194 + * 195 + * This function will detach the device handler only 196 + * if the sdev is not part of the internal list, ie 197 + * if it has been attached manually. 198 + */ 199 + void scsi_dh_detach(struct request_queue *q) 200 + { 201 + unsigned long flags; 202 + struct scsi_device *sdev; 203 + struct scsi_device_handler *scsi_dh = NULL; 204 + 205 + spin_lock_irqsave(q->queue_lock, flags); 206 + sdev = q->queuedata; 207 + if (!sdev || !get_device(&sdev->sdev_gendev)) 208 + sdev = NULL; 209 + spin_unlock_irqrestore(q->queue_lock, flags); 210 + 211 + if (!sdev) 212 + return; 213 + 214 + if (sdev->scsi_dh_data) { 215 + /* if sdev is not on internal list, detach */ 216 + scsi_dh = sdev->scsi_dh_data->scsi_dh; 217 + if (!device_handler_match(scsi_dh, sdev)) 218 + scsi_dh_handler_detach(sdev, scsi_dh); 219 + } 220 + put_device(&sdev->sdev_gendev); 221 + } 222 + EXPORT_SYMBOL_GPL(scsi_dh_detach); 223 + 224 + static struct notifier_block scsi_dh_nb = { 225 + .notifier_call = scsi_dh_notifier 226 + }; 227 + 228 + static int __init scsi_dh_init(void) 229 + { 230 + int r; 231 + 232 + r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb); 233 + 234 + if (!r) 235 + bus_for_each_dev(&scsi_bus_type, NULL, NULL, 236 + scsi_dh_sysfs_attr_add); 237 + 238 + return r; 239 + } 240 + 241 + static void __exit scsi_dh_exit(void) 242 + { 243 + bus_for_each_dev(&scsi_bus_type, NULL, NULL, 244 + scsi_dh_sysfs_attr_remove); 245 + bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb); 246 + } 247 + 248 + module_init(scsi_dh_init); 249 + module_exit(scsi_dh_exit); 452 250 453 251 MODULE_DESCRIPTION("SCSI device handler"); 454 252 MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
+802
drivers/scsi/device_handler/scsi_dh_alua.c
··· 1 + /* 2 + * Generic SCSI-3 ALUA SCSI Device Handler 3 + * 4 + * Copyright (C) 2007, 2008 Hannes Reinecke, SUSE Linux Products GmbH. 5 + * All rights reserved. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the Free Software 19 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 + * 21 + */ 22 + #include <scsi/scsi.h> 23 + #include <scsi/scsi_eh.h> 24 + #include <scsi/scsi_dh.h> 25 + 26 + #define ALUA_DH_NAME "alua" 27 + #define ALUA_DH_VER "1.2" 28 + 29 + #define TPGS_STATE_OPTIMIZED 0x0 30 + #define TPGS_STATE_NONOPTIMIZED 0x1 31 + #define TPGS_STATE_STANDBY 0x2 32 + #define TPGS_STATE_UNAVAILABLE 0x3 33 + #define TPGS_STATE_OFFLINE 0xe 34 + #define TPGS_STATE_TRANSITIONING 0xf 35 + 36 + #define TPGS_SUPPORT_NONE 0x00 37 + #define TPGS_SUPPORT_OPTIMIZED 0x01 38 + #define TPGS_SUPPORT_NONOPTIMIZED 0x02 39 + #define TPGS_SUPPORT_STANDBY 0x04 40 + #define TPGS_SUPPORT_UNAVAILABLE 0x08 41 + #define TPGS_SUPPORT_OFFLINE 0x40 42 + #define TPGS_SUPPORT_TRANSITION 0x80 43 + 44 + #define TPGS_MODE_UNINITIALIZED -1 45 + #define TPGS_MODE_NONE 0x0 46 + #define TPGS_MODE_IMPLICIT 0x1 47 + #define TPGS_MODE_EXPLICIT 0x2 48 + 49 + #define ALUA_INQUIRY_SIZE 36 50 + #define ALUA_FAILOVER_TIMEOUT (60 * HZ) 51 + #define ALUA_FAILOVER_RETRIES 5 52 + 53 + struct alua_dh_data { 54 + int group_id; 55 + int rel_port; 56 + int tpgs; 57 + int state; 58 + unsigned char inq[ALUA_INQUIRY_SIZE]; 59 + unsigned char *buff; 60 + int bufflen; 61 + unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 62 + int senselen; 63 + }; 64 + 65 + #define ALUA_POLICY_SWITCH_CURRENT 0 66 + #define ALUA_POLICY_SWITCH_ALL 1 67 + 68 + static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev) 69 + { 70 + struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; 71 + BUG_ON(scsi_dh_data == NULL); 72 + return ((struct alua_dh_data *) scsi_dh_data->buf); 73 + } 74 + 75 + static int realloc_buffer(struct alua_dh_data *h, unsigned len) 76 + { 77 + if (h->buff && h->buff != h->inq) 78 + kfree(h->buff); 79 + 80 + h->buff = kmalloc(len, GFP_NOIO); 81 + if (!h->buff) { 82 + h->buff = h->inq; 83 + h->bufflen = ALUA_INQUIRY_SIZE; 84 + return 1; 85 + } 86 + h->bufflen = len; 87 + return 0; 88 + } 89 + 90 + static struct request *get_alua_req(struct scsi_device *sdev, 91 + void *buffer, unsigned buflen, int rw) 92 + { 93 + struct request *rq; 94 + struct request_queue *q = sdev->request_queue; 95 + 96 + rq = blk_get_request(q, rw, GFP_NOIO); 97 + 98 + if (!rq) { 99 + sdev_printk(KERN_INFO, sdev, 100 + "%s: blk_get_request failed\n", __func__); 101 + return NULL; 102 + } 103 + 104 + if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { 105 + blk_put_request(rq); 106 + sdev_printk(KERN_INFO, sdev, 107 + "%s: blk_rq_map_kern failed\n", __func__); 108 + return NULL; 109 + } 110 + 111 + rq->cmd_type = REQ_TYPE_BLOCK_PC; 112 + rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 113 + rq->retries = ALUA_FAILOVER_RETRIES; 114 + rq->timeout = ALUA_FAILOVER_TIMEOUT; 115 + 116 + return rq; 117 + } 118 + 119 + /* 120 + * submit_std_inquiry - Issue a standard INQUIRY command 121 + * @sdev: sdev the command should be send to 122 + */ 123 + static int submit_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) 124 + { 125 + struct request *rq; 126 + int err = SCSI_DH_RES_TEMP_UNAVAIL; 127 + 128 + rq = get_alua_req(sdev, h->inq, ALUA_INQUIRY_SIZE, READ); 129 + if (!rq) 130 + goto done; 131 + 132 + /* Prepare the command. */ 133 + rq->cmd[0] = INQUIRY; 134 + rq->cmd[1] = 0; 135 + rq->cmd[2] = 0; 136 + rq->cmd[4] = ALUA_INQUIRY_SIZE; 137 + rq->cmd_len = COMMAND_SIZE(INQUIRY); 138 + 139 + rq->sense = h->sense; 140 + memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 141 + rq->sense_len = h->senselen = 0; 142 + 143 + err = blk_execute_rq(rq->q, NULL, rq, 1); 144 + if (err == -EIO) { 145 + sdev_printk(KERN_INFO, sdev, 146 + "%s: std inquiry failed with %x\n", 147 + ALUA_DH_NAME, rq->errors); 148 + h->senselen = rq->sense_len; 149 + err = SCSI_DH_IO; 150 + } 151 + blk_put_request(rq); 152 + done: 153 + return err; 154 + } 155 + 156 + /* 157 + * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command 158 + * @sdev: sdev the command should be sent to 159 + */ 160 + static int submit_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) 161 + { 162 + struct request *rq; 163 + int err = SCSI_DH_RES_TEMP_UNAVAIL; 164 + 165 + rq = get_alua_req(sdev, h->buff, h->bufflen, READ); 166 + if (!rq) 167 + goto done; 168 + 169 + /* Prepare the command. */ 170 + rq->cmd[0] = INQUIRY; 171 + rq->cmd[1] = 1; 172 + rq->cmd[2] = 0x83; 173 + rq->cmd[4] = h->bufflen; 174 + rq->cmd_len = COMMAND_SIZE(INQUIRY); 175 + 176 + rq->sense = h->sense; 177 + memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 178 + rq->sense_len = h->senselen = 0; 179 + 180 + err = blk_execute_rq(rq->q, NULL, rq, 1); 181 + if (err == -EIO) { 182 + sdev_printk(KERN_INFO, sdev, 183 + "%s: evpd inquiry failed with %x\n", 184 + ALUA_DH_NAME, rq->errors); 185 + h->senselen = rq->sense_len; 186 + err = SCSI_DH_IO; 187 + } 188 + blk_put_request(rq); 189 + done: 190 + return err; 191 + } 192 + 193 + /* 194 + * submit_rtpg - Issue a REPORT TARGET GROUP STATES command 195 + * @sdev: sdev the command should be sent to 196 + */ 197 + static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) 198 + { 199 + struct request *rq; 200 + int err = SCSI_DH_RES_TEMP_UNAVAIL; 201 + 202 + rq = get_alua_req(sdev, h->buff, h->bufflen, READ); 203 + if (!rq) 204 + goto done; 205 + 206 + /* Prepare the command. */ 207 + rq->cmd[0] = MAINTENANCE_IN; 208 + rq->cmd[1] = MI_REPORT_TARGET_PGS; 209 + rq->cmd[6] = (h->bufflen >> 24) & 0xff; 210 + rq->cmd[7] = (h->bufflen >> 16) & 0xff; 211 + rq->cmd[8] = (h->bufflen >> 8) & 0xff; 212 + rq->cmd[9] = h->bufflen & 0xff; 213 + rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN); 214 + 215 + rq->sense = h->sense; 216 + memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 217 + rq->sense_len = h->senselen = 0; 218 + 219 + err = blk_execute_rq(rq->q, NULL, rq, 1); 220 + if (err == -EIO) { 221 + sdev_printk(KERN_INFO, sdev, 222 + "%s: rtpg failed with %x\n", 223 + ALUA_DH_NAME, rq->errors); 224 + h->senselen = rq->sense_len; 225 + err = SCSI_DH_IO; 226 + } 227 + blk_put_request(rq); 228 + done: 229 + return err; 230 + } 231 + 232 + /* 233 + * submit_stpg - Issue a SET TARGET GROUP STATES command 234 + * @sdev: sdev the command should be sent to 235 + * 236 + * Currently we're only setting the current target port group state 237 + * to 'active/optimized' and let the array firmware figure out 238 + * the states of the remaining groups. 239 + */ 240 + static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h) 241 + { 242 + struct request *rq; 243 + int err = SCSI_DH_RES_TEMP_UNAVAIL; 244 + int stpg_len = 8; 245 + 246 + /* Prepare the data buffer */ 247 + memset(h->buff, 0, stpg_len); 248 + h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f; 249 + h->buff[6] = (h->group_id >> 8) & 0x0f; 250 + h->buff[7] = h->group_id & 0x0f; 251 + 252 + rq = get_alua_req(sdev, h->buff, stpg_len, WRITE); 253 + if (!rq) 254 + goto done; 255 + 256 + /* Prepare the command. */ 257 + rq->cmd[0] = MAINTENANCE_OUT; 258 + rq->cmd[1] = MO_SET_TARGET_PGS; 259 + rq->cmd[6] = (stpg_len >> 24) & 0xff; 260 + rq->cmd[7] = (stpg_len >> 16) & 0xff; 261 + rq->cmd[8] = (stpg_len >> 8) & 0xff; 262 + rq->cmd[9] = stpg_len & 0xff; 263 + rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT); 264 + 265 + rq->sense = h->sense; 266 + memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 267 + rq->sense_len = h->senselen = 0; 268 + 269 + err = blk_execute_rq(rq->q, NULL, rq, 1); 270 + if (err == -EIO) { 271 + sdev_printk(KERN_INFO, sdev, 272 + "%s: stpg failed with %x\n", 273 + ALUA_DH_NAME, rq->errors); 274 + h->senselen = rq->sense_len; 275 + err = SCSI_DH_IO; 276 + } 277 + blk_put_request(rq); 278 + done: 279 + return err; 280 + } 281 + 282 + /* 283 + * alua_std_inquiry - Evaluate standard INQUIRY command 284 + * @sdev: device to be checked 285 + * 286 + * Just extract the TPGS setting to find out if ALUA 287 + * is supported. 288 + */ 289 + static int alua_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) 290 + { 291 + int err; 292 + 293 + err = submit_std_inquiry(sdev, h); 294 + 295 + if (err != SCSI_DH_OK) 296 + return err; 297 + 298 + /* Check TPGS setting */ 299 + h->tpgs = (h->inq[5] >> 4) & 0x3; 300 + switch (h->tpgs) { 301 + case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: 302 + sdev_printk(KERN_INFO, sdev, 303 + "%s: supports implicit and explicit TPGS\n", 304 + ALUA_DH_NAME); 305 + break; 306 + case TPGS_MODE_EXPLICIT: 307 + sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n", 308 + ALUA_DH_NAME); 309 + break; 310 + case TPGS_MODE_IMPLICIT: 311 + sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n", 312 + ALUA_DH_NAME); 313 + break; 314 + default: 315 + h->tpgs = TPGS_MODE_NONE; 316 + sdev_printk(KERN_INFO, sdev, "%s: not supported\n", 317 + ALUA_DH_NAME); 318 + err = SCSI_DH_DEV_UNSUPP; 319 + break; 320 + } 321 + 322 + return err; 323 + } 324 + 325 + /* 326 + * alua_vpd_inquiry - Evaluate INQUIRY vpd page 0x83 327 + * @sdev: device to be checked 328 + * 329 + * Extract the relative target port and the target port group 330 + * descriptor from the list of identificators. 331 + */ 332 + static int alua_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) 333 + { 334 + int len; 335 + unsigned err; 336 + unsigned char *d; 337 + 338 + retry: 339 + err = submit_vpd_inquiry(sdev, h); 340 + 341 + if (err != SCSI_DH_OK) 342 + return err; 343 + 344 + /* Check if vpd page exceeds initial buffer */ 345 + len = (h->buff[2] << 8) + h->buff[3] + 4; 346 + if (len > h->bufflen) { 347 + /* Resubmit with the correct length */ 348 + if (realloc_buffer(h, len)) { 349 + sdev_printk(KERN_WARNING, sdev, 350 + "%s: kmalloc buffer failed\n", 351 + ALUA_DH_NAME); 352 + /* Temporary failure, bypass */ 353 + return SCSI_DH_DEV_TEMP_BUSY; 354 + } 355 + goto retry; 356 + } 357 + 358 + /* 359 + * Now look for the correct descriptor. 360 + */ 361 + d = h->buff + 4; 362 + while (d < h->buff + len) { 363 + switch (d[1] & 0xf) { 364 + case 0x4: 365 + /* Relative target port */ 366 + h->rel_port = (d[6] << 8) + d[7]; 367 + break; 368 + case 0x5: 369 + /* Target port group */ 370 + h->group_id = (d[6] << 8) + d[7]; 371 + break; 372 + default: 373 + break; 374 + } 375 + d += d[3] + 4; 376 + } 377 + 378 + if (h->group_id == -1) { 379 + /* 380 + * Internal error; TPGS supported but required 381 + * VPD identification descriptors not present. 382 + * Disable ALUA support 383 + */ 384 + sdev_printk(KERN_INFO, sdev, 385 + "%s: No target port descriptors found\n", 386 + ALUA_DH_NAME); 387 + h->state = TPGS_STATE_OPTIMIZED; 388 + h->tpgs = TPGS_MODE_NONE; 389 + err = SCSI_DH_DEV_UNSUPP; 390 + } else { 391 + sdev_printk(KERN_INFO, sdev, 392 + "%s: port group %02x rel port %02x\n", 393 + ALUA_DH_NAME, h->group_id, h->rel_port); 394 + } 395 + 396 + return err; 397 + } 398 + 399 + static char print_alua_state(int state) 400 + { 401 + switch (state) { 402 + case TPGS_STATE_OPTIMIZED: 403 + return 'A'; 404 + case TPGS_STATE_NONOPTIMIZED: 405 + return 'N'; 406 + case TPGS_STATE_STANDBY: 407 + return 'S'; 408 + case TPGS_STATE_UNAVAILABLE: 409 + return 'U'; 410 + case TPGS_STATE_OFFLINE: 411 + return 'O'; 412 + case TPGS_STATE_TRANSITIONING: 413 + return 'T'; 414 + default: 415 + return 'X'; 416 + } 417 + } 418 + 419 + static int alua_check_sense(struct scsi_device *sdev, 420 + struct scsi_sense_hdr *sense_hdr) 421 + { 422 + switch (sense_hdr->sense_key) { 423 + case NOT_READY: 424 + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) 425 + /* 426 + * LUN Not Accessible - ALUA state transition 427 + */ 428 + return NEEDS_RETRY; 429 + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b) 430 + /* 431 + * LUN Not Accessible -- Target port in standby state 432 + */ 433 + return SUCCESS; 434 + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0c) 435 + /* 436 + * LUN Not Accessible -- Target port in unavailable state 437 + */ 438 + return SUCCESS; 439 + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x12) 440 + /* 441 + * LUN Not Ready -- Offline 442 + */ 443 + return SUCCESS; 444 + break; 445 + case UNIT_ATTENTION: 446 + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) 447 + /* 448 + * Power On, Reset, or Bus Device Reset, just retry. 449 + */ 450 + return NEEDS_RETRY; 451 + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { 452 + /* 453 + * ALUA state changed 454 + */ 455 + return NEEDS_RETRY; 456 + } 457 + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { 458 + /* 459 + * Implicit ALUA state transition failed 460 + */ 461 + return NEEDS_RETRY; 462 + } 463 + break; 464 + } 465 + 466 + return SCSI_RETURN_NOT_HANDLED; 467 + } 468 + 469 + /* 470 + * alua_stpg - Evaluate SET TARGET GROUP STATES 471 + * @sdev: the device to be evaluated 472 + * @state: the new target group state 473 + * 474 + * Send a SET TARGET GROUP STATES command to the device. 475 + * We only have to test here if we should resubmit the command; 476 + * any other error is assumed as a failure. 477 + */ 478 + static int alua_stpg(struct scsi_device *sdev, int state, 479 + struct alua_dh_data *h) 480 + { 481 + struct scsi_sense_hdr sense_hdr; 482 + unsigned err; 483 + int retry = ALUA_FAILOVER_RETRIES; 484 + 485 + retry: 486 + err = submit_stpg(sdev, h); 487 + if (err == SCSI_DH_IO && h->senselen > 0) { 488 + err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, 489 + &sense_hdr); 490 + if (!err) 491 + return SCSI_DH_IO; 492 + err = alua_check_sense(sdev, &sense_hdr); 493 + if (retry > 0 && err == NEEDS_RETRY) { 494 + retry--; 495 + goto retry; 496 + } 497 + sdev_printk(KERN_INFO, sdev, 498 + "%s: stpg sense code: %02x/%02x/%02x\n", 499 + ALUA_DH_NAME, sense_hdr.sense_key, 500 + sense_hdr.asc, sense_hdr.ascq); 501 + err = SCSI_DH_IO; 502 + } 503 + if (err == SCSI_DH_OK) { 504 + h->state = state; 505 + sdev_printk(KERN_INFO, sdev, 506 + "%s: port group %02x switched to state %c\n", 507 + ALUA_DH_NAME, h->group_id, 508 + print_alua_state(h->state) ); 509 + } 510 + return err; 511 + } 512 + 513 + /* 514 + * alua_rtpg - Evaluate REPORT TARGET GROUP STATES 515 + * @sdev: the device to be evaluated. 516 + * 517 + * Evaluate the Target Port Group State. 518 + * Returns SCSI_DH_DEV_OFFLINED if the path is 519 + * found to be unuseable. 520 + */ 521 + static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) 522 + { 523 + struct scsi_sense_hdr sense_hdr; 524 + int len, k, off, valid_states = 0; 525 + char *ucp; 526 + unsigned err; 527 + 528 + retry: 529 + err = submit_rtpg(sdev, h); 530 + 531 + if (err == SCSI_DH_IO && h->senselen > 0) { 532 + err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, 533 + &sense_hdr); 534 + if (!err) 535 + return SCSI_DH_IO; 536 + 537 + err = alua_check_sense(sdev, &sense_hdr); 538 + if (err == NEEDS_RETRY) 539 + goto retry; 540 + sdev_printk(KERN_INFO, sdev, 541 + "%s: rtpg sense code %02x/%02x/%02x\n", 542 + ALUA_DH_NAME, sense_hdr.sense_key, 543 + sense_hdr.asc, sense_hdr.ascq); 544 + err = SCSI_DH_IO; 545 + } 546 + if (err != SCSI_DH_OK) 547 + return err; 548 + 549 + len = (h->buff[0] << 24) + (h->buff[1] << 16) + 550 + (h->buff[2] << 8) + h->buff[3] + 4; 551 + 552 + if (len > h->bufflen) { 553 + /* Resubmit with the correct length */ 554 + if (realloc_buffer(h, len)) { 555 + sdev_printk(KERN_WARNING, sdev, 556 + "%s: kmalloc buffer failed\n",__func__); 557 + /* Temporary failure, bypass */ 558 + return SCSI_DH_DEV_TEMP_BUSY; 559 + } 560 + goto retry; 561 + } 562 + 563 + for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) { 564 + if (h->group_id == (ucp[2] << 8) + ucp[3]) { 565 + h->state = ucp[0] & 0x0f; 566 + valid_states = ucp[1]; 567 + } 568 + off = 8 + (ucp[7] * 4); 569 + } 570 + 571 + sdev_printk(KERN_INFO, sdev, 572 + "%s: port group %02x state %c supports %c%c%c%c%c%c\n", 573 + ALUA_DH_NAME, h->group_id, print_alua_state(h->state), 574 + valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', 575 + valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', 576 + valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', 577 + valid_states&TPGS_SUPPORT_STANDBY?'S':'s', 578 + valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', 579 + valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); 580 + 581 + if (h->tpgs & TPGS_MODE_EXPLICIT) { 582 + switch (h->state) { 583 + case TPGS_STATE_TRANSITIONING: 584 + /* State transition, retry */ 585 + goto retry; 586 + break; 587 + case TPGS_STATE_OFFLINE: 588 + /* Path is offline, fail */ 589 + err = SCSI_DH_DEV_OFFLINED; 590 + break; 591 + default: 592 + break; 593 + } 594 + } else { 595 + /* Only Implicit ALUA support */ 596 + if (h->state == TPGS_STATE_OPTIMIZED || 597 + h->state == TPGS_STATE_NONOPTIMIZED || 598 + h->state == TPGS_STATE_STANDBY) 599 + /* Useable path if active */ 600 + err = SCSI_DH_OK; 601 + else 602 + /* Path unuseable for unavailable/offline */ 603 + err = SCSI_DH_DEV_OFFLINED; 604 + } 605 + return err; 606 + } 607 + 608 + /* 609 + * alua_initialize - Initialize ALUA state 610 + * @sdev: the device to be initialized 611 + * 612 + * For the prep_fn to work correctly we have 613 + * to initialize the ALUA state for the device. 614 + */ 615 + static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) 616 + { 617 + int err; 618 + 619 + err = alua_std_inquiry(sdev, h); 620 + if (err != SCSI_DH_OK) 621 + goto out; 622 + 623 + err = alua_vpd_inquiry(sdev, h); 624 + if (err != SCSI_DH_OK) 625 + goto out; 626 + 627 + err = alua_rtpg(sdev, h); 628 + if (err != SCSI_DH_OK) 629 + goto out; 630 + 631 + out: 632 + return err; 633 + } 634 + 635 + /* 636 + * alua_activate - activate a path 637 + * @sdev: device on the path to be activated 638 + * 639 + * We're currently switching the port group to be activated only and 640 + * let the array figure out the rest. 641 + * There may be other arrays which require us to switch all port groups 642 + * based on a certain policy. But until we actually encounter them it 643 + * should be okay. 644 + */ 645 + static int alua_activate(struct scsi_device *sdev) 646 + { 647 + struct alua_dh_data *h = get_alua_data(sdev); 648 + int err = SCSI_DH_OK; 649 + 650 + if (h->group_id != -1) { 651 + err = alua_rtpg(sdev, h); 652 + if (err != SCSI_DH_OK) 653 + goto out; 654 + } 655 + 656 + if (h->tpgs == TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) 657 + err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h); 658 + 659 + out: 660 + return err; 661 + } 662 + 663 + /* 664 + * alua_prep_fn - request callback 665 + * 666 + * Fail I/O to all paths not in state 667 + * active/optimized or active/non-optimized. 668 + */ 669 + static int alua_prep_fn(struct scsi_device *sdev, struct request *req) 670 + { 671 + struct alua_dh_data *h = get_alua_data(sdev); 672 + int ret = BLKPREP_OK; 673 + 674 + if (h->state != TPGS_STATE_OPTIMIZED && 675 + h->state != TPGS_STATE_NONOPTIMIZED) { 676 + ret = BLKPREP_KILL; 677 + req->cmd_flags |= REQ_QUIET; 678 + } 679 + return ret; 680 + 681 + } 682 + 683 + const struct scsi_dh_devlist alua_dev_list[] = { 684 + {"HP", "MSA VOLUME" }, 685 + {"HP", "HSV101" }, 686 + {"HP", "HSV111" }, 687 + {"HP", "HSV200" }, 688 + {"HP", "HSV210" }, 689 + {"HP", "HSV300" }, 690 + {"IBM", "2107900" }, 691 + {"IBM", "2145" }, 692 + {"Pillar", "Axiom" }, 693 + {NULL, NULL} 694 + }; 695 + 696 + static int alua_bus_attach(struct scsi_device *sdev); 697 + static void alua_bus_detach(struct scsi_device *sdev); 698 + 699 + static struct scsi_device_handler alua_dh = { 700 + .name = ALUA_DH_NAME, 701 + .module = THIS_MODULE, 702 + .devlist = alua_dev_list, 703 + .attach = alua_bus_attach, 704 + .detach = alua_bus_detach, 705 + .prep_fn = alua_prep_fn, 706 + .check_sense = alua_check_sense, 707 + .activate = alua_activate, 708 + }; 709 + 710 + /* 711 + * alua_bus_attach - Attach device handler 712 + * @sdev: device to be attached to 713 + */ 714 + static int alua_bus_attach(struct scsi_device *sdev) 715 + { 716 + struct scsi_dh_data *scsi_dh_data; 717 + struct alua_dh_data *h; 718 + unsigned long flags; 719 + int err = SCSI_DH_OK; 720 + 721 + scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 722 + + sizeof(*h) , GFP_KERNEL); 723 + if (!scsi_dh_data) { 724 + sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", 725 + ALUA_DH_NAME); 726 + return -ENOMEM; 727 + } 728 + 729 + scsi_dh_data->scsi_dh = &alua_dh; 730 + h = (struct alua_dh_data *) scsi_dh_data->buf; 731 + h->tpgs = TPGS_MODE_UNINITIALIZED; 732 + h->state = TPGS_STATE_OPTIMIZED; 733 + h->group_id = -1; 734 + h->rel_port = -1; 735 + h->buff = h->inq; 736 + h->bufflen = ALUA_INQUIRY_SIZE; 737 + 738 + err = alua_initialize(sdev, h); 739 + if (err != SCSI_DH_OK) 740 + goto failed; 741 + 742 + if (!try_module_get(THIS_MODULE)) 743 + goto failed; 744 + 745 + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 746 + sdev->scsi_dh_data = scsi_dh_data; 747 + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 748 + 749 + return 0; 750 + 751 + failed: 752 + kfree(scsi_dh_data); 753 + sdev_printk(KERN_ERR, sdev, "%s: not attached\n", ALUA_DH_NAME); 754 + return -EINVAL; 755 + } 756 + 757 + /* 758 + * alua_bus_detach - Detach device handler 759 + * @sdev: device to be detached from 760 + */ 761 + static void alua_bus_detach(struct scsi_device *sdev) 762 + { 763 + struct scsi_dh_data *scsi_dh_data; 764 + struct alua_dh_data *h; 765 + unsigned long flags; 766 + 767 + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 768 + scsi_dh_data = sdev->scsi_dh_data; 769 + sdev->scsi_dh_data = NULL; 770 + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 771 + 772 + h = (struct alua_dh_data *) scsi_dh_data->buf; 773 + if (h->buff && h->inq != h->buff) 774 + kfree(h->buff); 775 + kfree(scsi_dh_data); 776 + module_put(THIS_MODULE); 777 + sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", ALUA_DH_NAME); 778 + } 779 + 780 + static int __init alua_init(void) 781 + { 782 + int r; 783 + 784 + r = scsi_register_device_handler(&alua_dh); 785 + if (r != 0) 786 + printk(KERN_ERR "%s: Failed to register scsi device handler", 787 + ALUA_DH_NAME); 788 + return r; 789 + } 790 + 791 + static void __exit alua_exit(void) 792 + { 793 + scsi_unregister_device_handler(&alua_dh); 794 + } 795 + 796 + module_init(alua_init); 797 + module_exit(alua_exit); 798 + 799 + MODULE_DESCRIPTION("DM Multipath ALUA support"); 800 + MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); 801 + MODULE_LICENSE("GPL"); 802 + MODULE_VERSION(ALUA_DH_VER);
+415 -241
drivers/scsi/device_handler/scsi_dh_emc.c
··· 25 25 #include <scsi/scsi_dh.h> 26 26 #include <scsi/scsi_device.h> 27 27 28 - #define CLARIION_NAME "emc_clariion" 28 + #define CLARIION_NAME "emc" 29 29 30 30 #define CLARIION_TRESPASS_PAGE 0x22 31 - #define CLARIION_BUFFER_SIZE 0x80 31 + #define CLARIION_BUFFER_SIZE 0xFC 32 32 #define CLARIION_TIMEOUT (60 * HZ) 33 33 #define CLARIION_RETRIES 3 34 34 #define CLARIION_UNBOUND_LU -1 35 + #define CLARIION_SP_A 0 36 + #define CLARIION_SP_B 1 37 + 38 + /* Flags */ 39 + #define CLARIION_SHORT_TRESPASS 1 40 + #define CLARIION_HONOR_RESERVATIONS 2 41 + 42 + /* LUN states */ 43 + #define CLARIION_LUN_UNINITIALIZED -1 44 + #define CLARIION_LUN_UNBOUND 0 45 + #define CLARIION_LUN_BOUND 1 46 + #define CLARIION_LUN_OWNED 2 35 47 36 48 static unsigned char long_trespass[] = { 37 - 0, 0, 0, 0, 49 + 0, 0, 0, 0, 0, 0, 0, 0, 38 50 CLARIION_TRESPASS_PAGE, /* Page code */ 39 51 0x09, /* Page length - 2 */ 40 - 0x81, /* Trespass code + Honor reservation bit */ 41 - 0xff, 0xff, /* Trespass target */ 42 - 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ 43 - }; 44 - 45 - static unsigned char long_trespass_hr[] = { 46 - 0, 0, 0, 0, 47 - CLARIION_TRESPASS_PAGE, /* Page code */ 48 - 0x09, /* Page length - 2 */ 49 - 0x01, /* Trespass code + Honor reservation bit */ 52 + 0x01, /* Trespass code */ 50 53 0xff, 0xff, /* Trespass target */ 51 54 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ 52 55 }; ··· 58 55 0, 0, 0, 0, 59 56 CLARIION_TRESPASS_PAGE, /* Page code */ 60 57 0x02, /* Page length - 2 */ 61 - 0x81, /* Trespass code + Honor reservation bit */ 58 + 0x01, /* Trespass code */ 62 59 0xff, /* Trespass target */ 63 60 }; 64 61 65 - static unsigned char short_trespass_hr[] = { 66 - 0, 0, 0, 0, 67 - CLARIION_TRESPASS_PAGE, /* Page code */ 68 - 0x02, /* Page length - 2 */ 69 - 0x01, /* Trespass code + Honor reservation bit */ 70 - 0xff, /* Trespass target */ 62 + static const char * lun_state[] = 63 + { 64 + "not bound", 65 + "bound", 66 + "owned", 71 67 }; 72 68 73 69 struct clariion_dh_data { 74 70 /* 71 + * Flags: 72 + * CLARIION_SHORT_TRESPASS 75 73 * Use short trespass command (FC-series) or the long version 76 74 * (default for AX/CX CLARiiON arrays). 77 - */ 78 - unsigned short_trespass; 79 - /* 75 + * 76 + * CLARIION_HONOR_RESERVATIONS 80 77 * Whether or not (default) to honor SCSI reservations when 81 78 * initiating a switch-over. 82 79 */ 83 - unsigned hr; 84 - /* I/O buffer for both MODE_SELECT and INQUIRY commands. */ 80 + unsigned flags; 81 + /* 82 + * I/O buffer for both MODE_SELECT and INQUIRY commands. 83 + */ 85 84 char buffer[CLARIION_BUFFER_SIZE]; 86 85 /* 87 86 * SCSI sense buffer for commands -- assumes serial issuance 88 87 * and completion sequence of all commands for same multipath. 89 88 */ 90 89 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 91 - /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */ 90 + unsigned int senselen; 91 + /* 92 + * LUN state 93 + */ 94 + int lun_state; 95 + /* 96 + * SP Port number 97 + */ 98 + int port; 99 + /* 100 + * which SP (A=0,B=1,UNBOUND=-1) is the default SP for this 101 + * path's mapped LUN 102 + */ 92 103 int default_sp; 93 - /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */ 104 + /* 105 + * which SP (A=0,B=1,UNBOUND=-1) is the active SP for this 106 + * path's mapped LUN 107 + */ 94 108 int current_sp; 95 109 }; 96 110 ··· 122 102 /* 123 103 * Parse MODE_SELECT cmd reply. 124 104 */ 125 - static int trespass_endio(struct scsi_device *sdev, int result) 105 + static int trespass_endio(struct scsi_device *sdev, char *sense) 126 106 { 127 - int err = SCSI_DH_OK; 107 + int err = SCSI_DH_IO; 128 108 struct scsi_sense_hdr sshdr; 129 - struct clariion_dh_data *csdev = get_clariion_data(sdev); 130 - char *sense = csdev->sense; 131 109 132 - if (status_byte(result) == CHECK_CONDITION && 133 - scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) { 134 - sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, " 110 + if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) { 111 + sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, " 135 112 "0x%2x, 0x%2x while sending CLARiiON trespass " 136 - "command.\n", sshdr.sense_key, sshdr.asc, 137 - sshdr.ascq); 113 + "command.\n", CLARIION_NAME, sshdr.sense_key, 114 + sshdr.asc, sshdr.ascq); 138 115 139 116 if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) && 140 117 (sshdr.ascq == 0x00)) { ··· 139 122 * Array based copy in progress -- do not send 140 123 * mode_select or copy will be aborted mid-stream. 141 124 */ 142 - sdev_printk(KERN_INFO, sdev, "Array Based Copy in " 125 + sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in " 143 126 "progress while sending CLARiiON trespass " 144 - "command.\n"); 127 + "command.\n", CLARIION_NAME); 145 128 err = SCSI_DH_DEV_TEMP_BUSY; 146 129 } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) && 147 130 (sshdr.ascq == 0x03)) { ··· 149 132 * LUN Not Ready - Manual Intervention Required 150 133 * indicates in-progress ucode upgrade (NDU). 151 134 */ 152 - sdev_printk(KERN_INFO, sdev, "Detected in-progress " 135 + sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress " 153 136 "ucode upgrade NDU operation while sending " 154 - "CLARiiON trespass command.\n"); 137 + "CLARiiON trespass command.\n", CLARIION_NAME); 155 138 err = SCSI_DH_DEV_TEMP_BUSY; 156 139 } else 157 140 err = SCSI_DH_DEV_FAILED; 158 - } else if (result) { 159 - sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending " 160 - "CLARiiON trespass command.\n", result); 161 - err = SCSI_DH_IO; 141 + } else { 142 + sdev_printk(KERN_INFO, sdev, 143 + "%s: failed to send MODE SELECT, no sense available\n", 144 + CLARIION_NAME); 162 145 } 163 - 164 146 return err; 165 147 } 166 148 167 - static int parse_sp_info_reply(struct scsi_device *sdev, int result, 168 - int *default_sp, int *current_sp, int *new_current_sp) 149 + static int parse_sp_info_reply(struct scsi_device *sdev, 150 + struct clariion_dh_data *csdev) 169 151 { 170 152 int err = SCSI_DH_OK; 171 - struct clariion_dh_data *csdev = get_clariion_data(sdev); 172 153 173 - if (result == 0) { 174 - /* check for in-progress ucode upgrade (NDU) */ 175 - if (csdev->buffer[48] != 0) { 176 - sdev_printk(KERN_NOTICE, sdev, "Detected in-progress " 177 - "ucode upgrade NDU operation while finding " 178 - "current active SP."); 179 - err = SCSI_DH_DEV_TEMP_BUSY; 180 - } else { 181 - *default_sp = csdev->buffer[5]; 182 - 183 - if (csdev->buffer[4] == 2) 184 - /* SP for path is current */ 185 - *current_sp = csdev->buffer[8]; 186 - else { 187 - if (csdev->buffer[4] == 1) 188 - /* SP for this path is NOT current */ 189 - if (csdev->buffer[8] == 0) 190 - *current_sp = 1; 191 - else 192 - *current_sp = 0; 193 - else 194 - /* unbound LU or LUNZ */ 195 - *current_sp = CLARIION_UNBOUND_LU; 196 - } 197 - *new_current_sp = csdev->buffer[8]; 198 - } 199 - } else { 200 - struct scsi_sense_hdr sshdr; 201 - 202 - err = SCSI_DH_IO; 203 - 204 - if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE, 205 - &sshdr)) 206 - sdev_printk(KERN_ERR, sdev, "Found valid sense data " 207 - "0x%2x, 0x%2x, 0x%2x while finding current " 208 - "active SP.", sshdr.sense_key, sshdr.asc, 209 - sshdr.ascq); 210 - else 211 - sdev_printk(KERN_ERR, sdev, "Error 0x%x finding " 212 - "current active SP.", result); 154 + /* check for in-progress ucode upgrade (NDU) */ 155 + if (csdev->buffer[48] != 0) { 156 + sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress " 157 + "ucode upgrade NDU operation while finding " 158 + "current active SP.", CLARIION_NAME); 159 + err = SCSI_DH_DEV_TEMP_BUSY; 160 + goto out; 161 + } 162 + if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) { 163 + /* Invalid buffer format */ 164 + sdev_printk(KERN_NOTICE, sdev, 165 + "%s: invalid VPD page 0xC0 format\n", 166 + CLARIION_NAME); 167 + err = SCSI_DH_NOSYS; 168 + goto out; 169 + } 170 + switch (csdev->buffer[28] & 0x0f) { 171 + case 6: 172 + sdev_printk(KERN_NOTICE, sdev, 173 + "%s: ALUA failover mode detected\n", 174 + CLARIION_NAME); 175 + break; 176 + case 4: 177 + /* Linux failover */ 178 + break; 179 + default: 180 + sdev_printk(KERN_WARNING, sdev, 181 + "%s: Invalid failover mode %d\n", 182 + CLARIION_NAME, csdev->buffer[28] & 0x0f); 183 + err = SCSI_DH_NOSYS; 184 + goto out; 213 185 } 214 186 187 + csdev->default_sp = csdev->buffer[5]; 188 + csdev->lun_state = csdev->buffer[4]; 189 + csdev->current_sp = csdev->buffer[8]; 190 + csdev->port = csdev->buffer[7]; 191 + 192 + out: 215 193 return err; 216 194 } 217 195 218 - static int sp_info_endio(struct scsi_device *sdev, int result, 219 - int mode_select_sent, int *done) 196 + #define emc_default_str "FC (Legacy)" 197 + 198 + static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer) 220 199 { 221 - struct clariion_dh_data *csdev = get_clariion_data(sdev); 222 - int err_flags, default_sp, current_sp, new_current_sp; 200 + unsigned char len = buffer[4] + 5; 201 + char *sp_model = NULL; 202 + unsigned char sp_len, serial_len; 223 203 224 - err_flags = parse_sp_info_reply(sdev, result, &default_sp, 225 - &current_sp, &new_current_sp); 226 - 227 - if (err_flags != SCSI_DH_OK) 228 - goto done; 229 - 230 - if (mode_select_sent) { 231 - csdev->default_sp = default_sp; 232 - csdev->current_sp = current_sp; 233 - } else { 234 - /* 235 - * Issue the actual module_selec request IFF either 236 - * (1) we do not know the identity of the current SP OR 237 - * (2) what we think we know is actually correct. 238 - */ 239 - if ((current_sp != CLARIION_UNBOUND_LU) && 240 - (new_current_sp != current_sp)) { 241 - 242 - csdev->default_sp = default_sp; 243 - csdev->current_sp = current_sp; 244 - 245 - sdev_printk(KERN_INFO, sdev, "Ignoring path group " 246 - "switch-over command for CLARiiON SP%s since " 247 - " mapped device is already initialized.", 248 - current_sp ? "B" : "A"); 249 - if (done) 250 - *done = 1; /* as good as doing it */ 204 + if (len < 160) { 205 + sdev_printk(KERN_WARNING, sdev, 206 + "%s: Invalid information section length %d\n", 207 + CLARIION_NAME, len); 208 + /* Check for old FC arrays */ 209 + if (!strncmp(buffer + 8, "DGC", 3)) { 210 + /* Old FC array, not supporting extended information */ 211 + sp_model = emc_default_str; 251 212 } 213 + goto out; 252 214 } 253 - done: 254 - return err_flags; 215 + 216 + /* 217 + * Parse extended information for SP model number 218 + */ 219 + serial_len = buffer[160]; 220 + if (serial_len == 0 || serial_len + 161 > len) { 221 + sdev_printk(KERN_WARNING, sdev, 222 + "%s: Invalid array serial number length %d\n", 223 + CLARIION_NAME, serial_len); 224 + goto out; 225 + } 226 + sp_len = buffer[99]; 227 + if (sp_len == 0 || serial_len + sp_len + 161 > len) { 228 + sdev_printk(KERN_WARNING, sdev, 229 + "%s: Invalid model number length %d\n", 230 + CLARIION_NAME, sp_len); 231 + goto out; 232 + } 233 + sp_model = &buffer[serial_len + 161]; 234 + /* Strip whitespace at the end */ 235 + while (sp_len > 1 && sp_model[sp_len - 1] == ' ') 236 + sp_len--; 237 + 238 + sp_model[sp_len] = '\0'; 239 + 240 + out: 241 + return sp_model; 255 242 } 256 243 257 244 /* 258 - * Get block request for REQ_BLOCK_PC command issued to path. Currently 259 - * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands. 260 - * 261 - * Uses data and sense buffers in hardware handler context structure and 262 - * assumes serial servicing of commands, both issuance and completion. 263 - */ 264 - static struct request *get_req(struct scsi_device *sdev, int cmd) 245 + * Get block request for REQ_BLOCK_PC command issued to path. Currently 246 + * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands. 247 + * 248 + * Uses data and sense buffers in hardware handler context structure and 249 + * assumes serial servicing of commands, both issuance and completion. 250 + */ 251 + static struct request *get_req(struct scsi_device *sdev, int cmd, 252 + unsigned char *buffer) 265 253 { 266 - struct clariion_dh_data *csdev = get_clariion_data(sdev); 267 254 struct request *rq; 268 - unsigned char *page22; 269 255 int len = 0; 270 256 271 257 rq = blk_get_request(sdev->request_queue, 272 - (cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC); 258 + (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO); 273 259 if (!rq) { 274 260 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); 275 261 return NULL; 276 262 } 277 263 278 - memset(&rq->cmd, 0, BLK_MAX_CDB); 264 + memset(rq->cmd, 0, BLK_MAX_CDB); 265 + rq->cmd_len = COMMAND_SIZE(cmd); 279 266 rq->cmd[0] = cmd; 280 - rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); 281 267 282 268 switch (cmd) { 283 269 case MODE_SELECT: 284 - if (csdev->short_trespass) { 285 - page22 = csdev->hr ? short_trespass_hr : short_trespass; 286 - len = sizeof(short_trespass); 287 - } else { 288 - page22 = csdev->hr ? long_trespass_hr : long_trespass; 289 - len = sizeof(long_trespass); 290 - } 291 - /* 292 - * Can't DMA from kernel BSS -- must copy selected trespass 293 - * command mode page contents to context buffer which is 294 - * allocated by kmalloc. 295 - */ 296 - BUG_ON((len > CLARIION_BUFFER_SIZE)); 297 - memcpy(csdev->buffer, page22, len); 270 + len = sizeof(short_trespass); 271 + rq->cmd_flags |= REQ_RW; 272 + rq->cmd[1] = 0x10; 273 + break; 274 + case MODE_SELECT_10: 275 + len = sizeof(long_trespass); 298 276 rq->cmd_flags |= REQ_RW; 299 277 rq->cmd[1] = 0x10; 300 278 break; 301 279 case INQUIRY: 302 - rq->cmd[1] = 0x1; 303 - rq->cmd[2] = 0xC0; 304 280 len = CLARIION_BUFFER_SIZE; 305 - memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE); 281 + memset(buffer, 0, len); 306 282 break; 307 283 default: 308 284 BUG_ON(1); ··· 308 298 rq->timeout = CLARIION_TIMEOUT; 309 299 rq->retries = CLARIION_RETRIES; 310 300 311 - rq->sense = csdev->sense; 312 - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 313 - rq->sense_len = 0; 314 - 315 - if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer, 316 - len, GFP_ATOMIC)) { 317 - __blk_put_request(rq->q, rq); 301 + if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) { 302 + blk_put_request(rq); 318 303 return NULL; 319 304 } 320 305 321 306 return rq; 322 307 } 323 308 324 - static int send_cmd(struct scsi_device *sdev, int cmd) 309 + static int send_inquiry_cmd(struct scsi_device *sdev, int page, 310 + struct clariion_dh_data *csdev) 325 311 { 326 - struct request *rq = get_req(sdev, cmd); 312 + struct request *rq = get_req(sdev, INQUIRY, csdev->buffer); 313 + int err; 327 314 328 315 if (!rq) 329 316 return SCSI_DH_RES_TEMP_UNAVAIL; 330 317 331 - return blk_execute_rq(sdev->request_queue, NULL, rq, 1); 318 + rq->sense = csdev->sense; 319 + memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 320 + rq->sense_len = csdev->senselen = 0; 321 + 322 + rq->cmd[0] = INQUIRY; 323 + if (page != 0) { 324 + rq->cmd[1] = 1; 325 + rq->cmd[2] = page; 326 + } 327 + err = blk_execute_rq(sdev->request_queue, NULL, rq, 1); 328 + if (err == -EIO) { 329 + sdev_printk(KERN_INFO, sdev, 330 + "%s: failed to send %s INQUIRY: %x\n", 331 + CLARIION_NAME, page?"EVPD":"standard", 332 + rq->errors); 333 + csdev->senselen = rq->sense_len; 334 + err = SCSI_DH_IO; 335 + } 336 + 337 + blk_put_request(rq); 338 + 339 + return err; 332 340 } 333 341 334 - static int clariion_activate(struct scsi_device *sdev) 342 + static int send_trespass_cmd(struct scsi_device *sdev, 343 + struct clariion_dh_data *csdev) 335 344 { 336 - int result, done = 0; 345 + struct request *rq; 346 + unsigned char *page22; 347 + int err, len, cmd; 337 348 338 - result = send_cmd(sdev, INQUIRY); 339 - result = sp_info_endio(sdev, result, 0, &done); 340 - if (result || done) 341 - goto done; 349 + if (csdev->flags & CLARIION_SHORT_TRESPASS) { 350 + page22 = short_trespass; 351 + if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) 352 + /* Set Honor Reservations bit */ 353 + page22[6] |= 0x80; 354 + len = sizeof(short_trespass); 355 + cmd = MODE_SELECT; 356 + } else { 357 + page22 = long_trespass; 358 + if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) 359 + /* Set Honor Reservations bit */ 360 + page22[10] |= 0x80; 361 + len = sizeof(long_trespass); 362 + cmd = MODE_SELECT_10; 363 + } 364 + BUG_ON((len > CLARIION_BUFFER_SIZE)); 365 + memcpy(csdev->buffer, page22, len); 342 366 343 - result = send_cmd(sdev, MODE_SELECT); 344 - result = trespass_endio(sdev, result); 345 - if (result) 346 - goto done; 367 + rq = get_req(sdev, cmd, csdev->buffer); 368 + if (!rq) 369 + return SCSI_DH_RES_TEMP_UNAVAIL; 347 370 348 - result = send_cmd(sdev, INQUIRY); 349 - result = sp_info_endio(sdev, result, 1, NULL); 350 - done: 351 - return result; 371 + rq->sense = csdev->sense; 372 + memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 373 + rq->sense_len = csdev->senselen = 0; 374 + 375 + err = blk_execute_rq(sdev->request_queue, NULL, rq, 1); 376 + if (err == -EIO) { 377 + if (rq->sense_len) { 378 + err = trespass_endio(sdev, csdev->sense); 379 + } else { 380 + sdev_printk(KERN_INFO, sdev, 381 + "%s: failed to send MODE SELECT: %x\n", 382 + CLARIION_NAME, rq->errors); 383 + } 384 + } 385 + 386 + blk_put_request(rq); 387 + 388 + return err; 352 389 } 353 390 354 391 static int clariion_check_sense(struct scsi_device *sdev, ··· 443 386 break; 444 387 } 445 388 446 - /* success just means we do not care what scsi-ml does */ 447 - return SUCCESS; 389 + return SCSI_RETURN_NOT_HANDLED; 448 390 } 449 391 450 - static const struct { 451 - char *vendor; 452 - char *model; 453 - } clariion_dev_list[] = { 392 + static int clariion_prep_fn(struct scsi_device *sdev, struct request *req) 393 + { 394 + struct clariion_dh_data *h = get_clariion_data(sdev); 395 + int ret = BLKPREP_OK; 396 + 397 + if (h->lun_state != CLARIION_LUN_OWNED) { 398 + ret = BLKPREP_KILL; 399 + req->cmd_flags |= REQ_QUIET; 400 + } 401 + return ret; 402 + 403 + } 404 + 405 + static int clariion_std_inquiry(struct scsi_device *sdev, 406 + struct clariion_dh_data *csdev) 407 + { 408 + int err; 409 + char *sp_model; 410 + 411 + err = send_inquiry_cmd(sdev, 0, csdev); 412 + if (err != SCSI_DH_OK && csdev->senselen) { 413 + struct scsi_sense_hdr sshdr; 414 + 415 + if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE, 416 + &sshdr)) { 417 + sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code " 418 + "%02x/%02x/%02x\n", CLARIION_NAME, 419 + sshdr.sense_key, sshdr.asc, sshdr.ascq); 420 + } 421 + err = SCSI_DH_IO; 422 + goto out; 423 + } 424 + 425 + sp_model = parse_sp_model(sdev, csdev->buffer); 426 + if (!sp_model) { 427 + err = SCSI_DH_DEV_UNSUPP; 428 + goto out; 429 + } 430 + 431 + /* 432 + * FC Series arrays do not support long trespass 433 + */ 434 + if (!strlen(sp_model) || !strncmp(sp_model, "FC",2)) 435 + csdev->flags |= CLARIION_SHORT_TRESPASS; 436 + 437 + sdev_printk(KERN_INFO, sdev, 438 + "%s: detected Clariion %s, flags %x\n", 439 + CLARIION_NAME, sp_model, csdev->flags); 440 + out: 441 + return err; 442 + } 443 + 444 + static int clariion_send_inquiry(struct scsi_device *sdev, 445 + struct clariion_dh_data *csdev) 446 + { 447 + int err, retry = CLARIION_RETRIES; 448 + 449 + retry: 450 + err = send_inquiry_cmd(sdev, 0xC0, csdev); 451 + if (err != SCSI_DH_OK && csdev->senselen) { 452 + struct scsi_sense_hdr sshdr; 453 + 454 + err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE, 455 + &sshdr); 456 + if (!err) 457 + return SCSI_DH_IO; 458 + 459 + err = clariion_check_sense(sdev, &sshdr); 460 + if (retry > 0 && err == NEEDS_RETRY) { 461 + retry--; 462 + goto retry; 463 + } 464 + sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code " 465 + "%02x/%02x/%02x\n", CLARIION_NAME, 466 + sshdr.sense_key, sshdr.asc, sshdr.ascq); 467 + err = SCSI_DH_IO; 468 + } else { 469 + err = parse_sp_info_reply(sdev, csdev); 470 + } 471 + return err; 472 + } 473 + 474 + static int clariion_activate(struct scsi_device *sdev) 475 + { 476 + struct clariion_dh_data *csdev = get_clariion_data(sdev); 477 + int result; 478 + 479 + result = clariion_send_inquiry(sdev, csdev); 480 + if (result != SCSI_DH_OK) 481 + goto done; 482 + 483 + if (csdev->lun_state == CLARIION_LUN_OWNED) 484 + goto done; 485 + 486 + result = send_trespass_cmd(sdev, csdev); 487 + if (result != SCSI_DH_OK) 488 + goto done; 489 + sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n", 490 + CLARIION_NAME, 491 + csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" ); 492 + 493 + /* Update status */ 494 + result = clariion_send_inquiry(sdev, csdev); 495 + if (result != SCSI_DH_OK) 496 + goto done; 497 + 498 + done: 499 + sdev_printk(KERN_INFO, sdev, 500 + "%s: at SP %c Port %d (%s, default SP %c)\n", 501 + CLARIION_NAME, csdev->current_sp + 'A', 502 + csdev->port, lun_state[csdev->lun_state], 503 + csdev->default_sp + 'A'); 504 + 505 + return result; 506 + } 507 + 508 + const struct scsi_dh_devlist clariion_dev_list[] = { 454 509 {"DGC", "RAID"}, 455 510 {"DGC", "DISK"}, 511 + {"DGC", "VRAID"}, 456 512 {NULL, NULL}, 457 513 }; 458 514 459 - static int clariion_bus_notify(struct notifier_block *, unsigned long, void *); 515 + static int clariion_bus_attach(struct scsi_device *sdev); 516 + static void clariion_bus_detach(struct scsi_device *sdev); 460 517 461 518 static struct scsi_device_handler clariion_dh = { 462 519 .name = CLARIION_NAME, 463 520 .module = THIS_MODULE, 464 - .nb.notifier_call = clariion_bus_notify, 521 + .devlist = clariion_dev_list, 522 + .attach = clariion_bus_attach, 523 + .detach = clariion_bus_detach, 465 524 .check_sense = clariion_check_sense, 466 525 .activate = clariion_activate, 526 + .prep_fn = clariion_prep_fn, 467 527 }; 468 528 469 529 /* 470 530 * TODO: need some interface so we can set trespass values 471 531 */ 472 - static int clariion_bus_notify(struct notifier_block *nb, 473 - unsigned long action, void *data) 532 + static int clariion_bus_attach(struct scsi_device *sdev) 474 533 { 475 - struct device *dev = data; 476 - struct scsi_device *sdev; 477 534 struct scsi_dh_data *scsi_dh_data; 478 535 struct clariion_dh_data *h; 479 - int i, found = 0; 480 536 unsigned long flags; 537 + int err; 481 538 482 - if (!scsi_is_sdev_device(dev)) 483 - return 0; 484 - 485 - sdev = to_scsi_device(dev); 486 - 487 - if (action == BUS_NOTIFY_ADD_DEVICE) { 488 - for (i = 0; clariion_dev_list[i].vendor; i++) { 489 - if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor, 490 - strlen(clariion_dev_list[i].vendor)) && 491 - !strncmp(sdev->model, clariion_dev_list[i].model, 492 - strlen(clariion_dev_list[i].model))) { 493 - found = 1; 494 - break; 495 - } 496 - } 497 - if (!found) 498 - goto out; 499 - 500 - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 501 - + sizeof(*h) , GFP_KERNEL); 502 - if (!scsi_dh_data) { 503 - sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n", 504 - CLARIION_NAME); 505 - goto out; 506 - } 507 - 508 - scsi_dh_data->scsi_dh = &clariion_dh; 509 - h = (struct clariion_dh_data *) scsi_dh_data->buf; 510 - h->default_sp = CLARIION_UNBOUND_LU; 511 - h->current_sp = CLARIION_UNBOUND_LU; 512 - 513 - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 514 - sdev->scsi_dh_data = scsi_dh_data; 515 - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 516 - 517 - sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME); 518 - try_module_get(THIS_MODULE); 519 - 520 - } else if (action == BUS_NOTIFY_DEL_DEVICE) { 521 - if (sdev->scsi_dh_data == NULL || 522 - sdev->scsi_dh_data->scsi_dh != &clariion_dh) 523 - goto out; 524 - 525 - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 526 - scsi_dh_data = sdev->scsi_dh_data; 527 - sdev->scsi_dh_data = NULL; 528 - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 529 - 530 - sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", 539 + scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 540 + + sizeof(*h) , GFP_KERNEL); 541 + if (!scsi_dh_data) { 542 + sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", 531 543 CLARIION_NAME); 532 - 533 - kfree(scsi_dh_data); 534 - module_put(THIS_MODULE); 544 + return -ENOMEM; 535 545 } 536 546 537 - out: 547 + scsi_dh_data->scsi_dh = &clariion_dh; 548 + h = (struct clariion_dh_data *) scsi_dh_data->buf; 549 + h->lun_state = CLARIION_LUN_UNINITIALIZED; 550 + h->default_sp = CLARIION_UNBOUND_LU; 551 + h->current_sp = CLARIION_UNBOUND_LU; 552 + 553 + err = clariion_std_inquiry(sdev, h); 554 + if (err != SCSI_DH_OK) 555 + goto failed; 556 + 557 + err = clariion_send_inquiry(sdev, h); 558 + if (err != SCSI_DH_OK) 559 + goto failed; 560 + 561 + if (!try_module_get(THIS_MODULE)) 562 + goto failed; 563 + 564 + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 565 + sdev->scsi_dh_data = scsi_dh_data; 566 + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 567 + 568 + sdev_printk(KERN_INFO, sdev, 569 + "%s: connected to SP %c Port %d (%s, default SP %c)\n", 570 + CLARIION_NAME, h->current_sp + 'A', 571 + h->port, lun_state[h->lun_state], 572 + h->default_sp + 'A'); 573 + 538 574 return 0; 575 + 576 + failed: 577 + kfree(scsi_dh_data); 578 + sdev_printk(KERN_ERR, sdev, "%s: not attached\n", 579 + CLARIION_NAME); 580 + return -EINVAL; 581 + } 582 + 583 + static void clariion_bus_detach(struct scsi_device *sdev) 584 + { 585 + struct scsi_dh_data *scsi_dh_data; 586 + unsigned long flags; 587 + 588 + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 589 + scsi_dh_data = sdev->scsi_dh_data; 590 + sdev->scsi_dh_data = NULL; 591 + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 592 + 593 + sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", 594 + CLARIION_NAME); 595 + 596 + kfree(scsi_dh_data); 597 + module_put(THIS_MODULE); 539 598 } 540 599 541 600 static int __init clariion_init(void) ··· 660 487 661 488 r = scsi_register_device_handler(&clariion_dh); 662 489 if (r != 0) 663 - printk(KERN_ERR "Failed to register scsi device handler."); 490 + printk(KERN_ERR "%s: Failed to register scsi device handler.", 491 + CLARIION_NAME); 664 492 return r; 665 493 } 666 494
+266 -92
drivers/scsi/device_handler/scsi_dh_hp_sw.c
··· 4 4 * 5 5 * Copyright (C) 2006 Red Hat, Inc. All rights reserved. 6 6 * Copyright (C) 2006 Mike Christie 7 + * Copyright (C) 2008 Hannes Reinecke <hare@suse.de> 7 8 * 8 9 * This program is free software; you can redistribute it and/or modify 9 10 * it under the terms of the GNU General Public License as published by ··· 26 25 #include <scsi/scsi_eh.h> 27 26 #include <scsi/scsi_dh.h> 28 27 29 - #define HP_SW_NAME "hp_sw" 28 + #define HP_SW_NAME "hp_sw" 30 29 31 - #define HP_SW_TIMEOUT (60 * HZ) 32 - #define HP_SW_RETRIES 3 30 + #define HP_SW_TIMEOUT (60 * HZ) 31 + #define HP_SW_RETRIES 3 32 + 33 + #define HP_SW_PATH_UNINITIALIZED -1 34 + #define HP_SW_PATH_ACTIVE 0 35 + #define HP_SW_PATH_PASSIVE 1 33 36 34 37 struct hp_sw_dh_data { 35 38 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 39 + int path_state; 36 40 int retries; 37 41 }; 38 42 ··· 48 42 return ((struct hp_sw_dh_data *) scsi_dh_data->buf); 49 43 } 50 44 51 - static int hp_sw_done(struct scsi_device *sdev) 45 + /* 46 + * tur_done - Handle TEST UNIT READY return status 47 + * @sdev: sdev the command has been sent to 48 + * @errors: blk error code 49 + * 50 + * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path 51 + */ 52 + static int tur_done(struct scsi_device *sdev, unsigned char *sense) 52 53 { 53 - struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 54 + struct scsi_sense_hdr sshdr; 55 + int ret; 56 + 57 + ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr); 58 + if (!ret) { 59 + sdev_printk(KERN_WARNING, sdev, 60 + "%s: sending tur failed, no sense available\n", 61 + HP_SW_NAME); 62 + ret = SCSI_DH_IO; 63 + goto done; 64 + } 65 + switch (sshdr.sense_key) { 66 + case UNIT_ATTENTION: 67 + ret = SCSI_DH_IMM_RETRY; 68 + break; 69 + case NOT_READY: 70 + if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) { 71 + /* 72 + * LUN not ready - Initialization command required 73 + * 74 + * This is the passive path 75 + */ 76 + ret = SCSI_DH_DEV_OFFLINED; 77 + break; 78 + } 79 + /* Fallthrough */ 80 + default: 81 + sdev_printk(KERN_WARNING, sdev, 82 + "%s: sending tur failed, sense %x/%x/%x\n", 83 + HP_SW_NAME, sshdr.sense_key, sshdr.asc, 84 + sshdr.ascq); 85 + break; 86 + } 87 + 88 + done: 89 + return ret; 90 + } 91 + 92 + /* 93 + * hp_sw_tur - Send TEST UNIT READY 94 + * @sdev: sdev command should be sent to 95 + * 96 + * Use the TEST UNIT READY command to determine 97 + * the path state. 98 + */ 99 + static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) 100 + { 101 + struct request *req; 102 + int ret; 103 + 104 + req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); 105 + if (!req) 106 + return SCSI_DH_RES_TEMP_UNAVAIL; 107 + 108 + req->cmd_type = REQ_TYPE_BLOCK_PC; 109 + req->cmd_flags |= REQ_FAILFAST; 110 + req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); 111 + memset(req->cmd, 0, MAX_COMMAND_SIZE); 112 + req->cmd[0] = TEST_UNIT_READY; 113 + req->timeout = HP_SW_TIMEOUT; 114 + req->sense = h->sense; 115 + memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); 116 + req->sense_len = 0; 117 + 118 + retry: 119 + ret = blk_execute_rq(req->q, NULL, req, 1); 120 + if (ret == -EIO) { 121 + if (req->sense_len > 0) { 122 + ret = tur_done(sdev, h->sense); 123 + } else { 124 + sdev_printk(KERN_WARNING, sdev, 125 + "%s: sending tur failed with %x\n", 126 + HP_SW_NAME, req->errors); 127 + ret = SCSI_DH_IO; 128 + } 129 + } else { 130 + h->path_state = HP_SW_PATH_ACTIVE; 131 + ret = SCSI_DH_OK; 132 + } 133 + if (ret == SCSI_DH_IMM_RETRY) 134 + goto retry; 135 + if (ret == SCSI_DH_DEV_OFFLINED) { 136 + h->path_state = HP_SW_PATH_PASSIVE; 137 + ret = SCSI_DH_OK; 138 + } 139 + 140 + blk_put_request(req); 141 + 142 + return ret; 143 + } 144 + 145 + /* 146 + * start_done - Handle START STOP UNIT return status 147 + * @sdev: sdev the command has been sent to 148 + * @errors: blk error code 149 + */ 150 + static int start_done(struct scsi_device *sdev, unsigned char *sense) 151 + { 54 152 struct scsi_sense_hdr sshdr; 55 153 int rc; 56 154 57 - sdev_printk(KERN_INFO, sdev, "hp_sw_done\n"); 58 - 59 - rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr); 60 - if (!rc) 61 - goto done; 155 + rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr); 156 + if (!rc) { 157 + sdev_printk(KERN_WARNING, sdev, 158 + "%s: sending start_stop_unit failed, " 159 + "no sense available\n", 160 + HP_SW_NAME); 161 + return SCSI_DH_IO; 162 + } 62 163 switch (sshdr.sense_key) { 63 164 case NOT_READY: 64 165 if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) { 166 + /* 167 + * LUN not ready - manual intervention required 168 + * 169 + * Switch-over in progress, retry. 170 + */ 65 171 rc = SCSI_DH_RETRY; 66 - h->retries++; 67 172 break; 68 173 } 69 174 /* fall through */ 70 175 default: 71 - h->retries++; 72 - rc = SCSI_DH_IMM_RETRY; 73 - } 74 - 75 - done: 76 - if (rc == SCSI_DH_OK || rc == SCSI_DH_IO) 77 - h->retries = 0; 78 - else if (h->retries > HP_SW_RETRIES) { 79 - h->retries = 0; 176 + sdev_printk(KERN_WARNING, sdev, 177 + "%s: sending start_stop_unit failed, sense %x/%x/%x\n", 178 + HP_SW_NAME, sshdr.sense_key, sshdr.asc, 179 + sshdr.ascq); 80 180 rc = SCSI_DH_IO; 81 181 } 182 + 82 183 return rc; 83 184 } 84 185 85 - static int hp_sw_activate(struct scsi_device *sdev) 186 + /* 187 + * hp_sw_start_stop - Send START STOP UNIT command 188 + * @sdev: sdev command should be sent to 189 + * 190 + * Sending START STOP UNIT activates the SP. 191 + */ 192 + static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h) 86 193 { 87 - struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 88 194 struct request *req; 89 - int ret = SCSI_DH_RES_TEMP_UNAVAIL; 195 + int ret, retry; 90 196 91 - req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC); 197 + req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); 92 198 if (!req) 93 - goto done; 94 - 95 - sdev_printk(KERN_INFO, sdev, "sending START_STOP."); 199 + return SCSI_DH_RES_TEMP_UNAVAIL; 96 200 97 201 req->cmd_type = REQ_TYPE_BLOCK_PC; 98 202 req->cmd_flags |= REQ_FAILFAST; ··· 214 98 req->sense = h->sense; 215 99 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); 216 100 req->sense_len = 0; 101 + retry = h->retries; 217 102 103 + retry: 218 104 ret = blk_execute_rq(req->q, NULL, req, 1); 219 - if (!ret) /* SUCCESS */ 220 - ret = hp_sw_done(sdev); 221 - else 105 + if (ret == -EIO) { 106 + if (req->sense_len > 0) { 107 + ret = start_done(sdev, h->sense); 108 + } else { 109 + sdev_printk(KERN_WARNING, sdev, 110 + "%s: sending start_stop_unit failed with %x\n", 111 + HP_SW_NAME, req->errors); 112 + ret = SCSI_DH_IO; 113 + } 114 + } else 115 + ret = SCSI_DH_OK; 116 + 117 + if (ret == SCSI_DH_RETRY) { 118 + if (--retry) 119 + goto retry; 222 120 ret = SCSI_DH_IO; 223 - done: 121 + } 122 + 123 + blk_put_request(req); 124 + 224 125 return ret; 225 126 } 226 127 227 - static const struct { 228 - char *vendor; 229 - char *model; 230 - } hp_sw_dh_data_list[] = { 231 - {"COMPAQ", "MSA"}, 232 - {"HP", "HSV"}, 128 + static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) 129 + { 130 + struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 131 + int ret = BLKPREP_OK; 132 + 133 + if (h->path_state != HP_SW_PATH_ACTIVE) { 134 + ret = BLKPREP_KILL; 135 + req->cmd_flags |= REQ_QUIET; 136 + } 137 + return ret; 138 + 139 + } 140 + 141 + /* 142 + * hp_sw_activate - Activate a path 143 + * @sdev: sdev on the path to be activated 144 + * 145 + * The HP Active/Passive firmware is pretty simple; 146 + * the passive path reports NOT READY with sense codes 147 + * 0x04/0x02; a START STOP UNIT command will then 148 + * activate the passive path (and deactivate the 149 + * previously active one). 150 + */ 151 + static int hp_sw_activate(struct scsi_device *sdev) 152 + { 153 + int ret = SCSI_DH_OK; 154 + struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 155 + 156 + ret = hp_sw_tur(sdev, h); 157 + 158 + if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) { 159 + ret = hp_sw_start_stop(sdev, h); 160 + if (ret == SCSI_DH_OK) 161 + sdev_printk(KERN_INFO, sdev, 162 + "%s: activated path\n", 163 + HP_SW_NAME); 164 + } 165 + 166 + return ret; 167 + } 168 + 169 + const struct scsi_dh_devlist hp_sw_dh_data_list[] = { 170 + {"COMPAQ", "MSA1000 VOLUME"}, 171 + {"COMPAQ", "HSV110"}, 172 + {"HP", "HSV100"}, 233 173 {"DEC", "HSG80"}, 234 174 {NULL, NULL}, 235 175 }; 236 176 237 - static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *); 177 + static int hp_sw_bus_attach(struct scsi_device *sdev); 178 + static void hp_sw_bus_detach(struct scsi_device *sdev); 238 179 239 180 static struct scsi_device_handler hp_sw_dh = { 240 181 .name = HP_SW_NAME, 241 182 .module = THIS_MODULE, 242 - .nb.notifier_call = hp_sw_bus_notify, 183 + .devlist = hp_sw_dh_data_list, 184 + .attach = hp_sw_bus_attach, 185 + .detach = hp_sw_bus_detach, 243 186 .activate = hp_sw_activate, 187 + .prep_fn = hp_sw_prep_fn, 244 188 }; 245 189 246 - static int hp_sw_bus_notify(struct notifier_block *nb, 247 - unsigned long action, void *data) 190 + static int hp_sw_bus_attach(struct scsi_device *sdev) 248 191 { 249 - struct device *dev = data; 250 - struct scsi_device *sdev; 251 192 struct scsi_dh_data *scsi_dh_data; 252 - int i, found = 0; 193 + struct hp_sw_dh_data *h; 253 194 unsigned long flags; 195 + int ret; 254 196 255 - if (!scsi_is_sdev_device(dev)) 197 + scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 198 + + sizeof(struct hp_sw_dh_data) , GFP_KERNEL); 199 + if (!scsi_dh_data) { 200 + sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n", 201 + HP_SW_NAME); 256 202 return 0; 257 - 258 - sdev = to_scsi_device(dev); 259 - 260 - if (action == BUS_NOTIFY_ADD_DEVICE) { 261 - for (i = 0; hp_sw_dh_data_list[i].vendor; i++) { 262 - if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor, 263 - strlen(hp_sw_dh_data_list[i].vendor)) && 264 - !strncmp(sdev->model, hp_sw_dh_data_list[i].model, 265 - strlen(hp_sw_dh_data_list[i].model))) { 266 - found = 1; 267 - break; 268 - } 269 - } 270 - if (!found) 271 - goto out; 272 - 273 - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 274 - + sizeof(struct hp_sw_dh_data) , GFP_KERNEL); 275 - if (!scsi_dh_data) { 276 - sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n", 277 - HP_SW_NAME); 278 - goto out; 279 - } 280 - 281 - scsi_dh_data->scsi_dh = &hp_sw_dh; 282 - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 283 - sdev->scsi_dh_data = scsi_dh_data; 284 - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 285 - try_module_get(THIS_MODULE); 286 - 287 - sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME); 288 - } else if (action == BUS_NOTIFY_DEL_DEVICE) { 289 - if (sdev->scsi_dh_data == NULL || 290 - sdev->scsi_dh_data->scsi_dh != &hp_sw_dh) 291 - goto out; 292 - 293 - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 294 - scsi_dh_data = sdev->scsi_dh_data; 295 - sdev->scsi_dh_data = NULL; 296 - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 297 - module_put(THIS_MODULE); 298 - 299 - sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME); 300 - 301 - kfree(scsi_dh_data); 302 203 } 303 204 304 - out: 205 + scsi_dh_data->scsi_dh = &hp_sw_dh; 206 + h = (struct hp_sw_dh_data *) scsi_dh_data->buf; 207 + h->path_state = HP_SW_PATH_UNINITIALIZED; 208 + h->retries = HP_SW_RETRIES; 209 + 210 + ret = hp_sw_tur(sdev, h); 211 + if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED) 212 + goto failed; 213 + 214 + if (!try_module_get(THIS_MODULE)) 215 + goto failed; 216 + 217 + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 218 + sdev->scsi_dh_data = scsi_dh_data; 219 + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 220 + 221 + sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", 222 + HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? 223 + "active":"passive"); 224 + 305 225 return 0; 226 + 227 + failed: 228 + kfree(scsi_dh_data); 229 + sdev_printk(KERN_ERR, sdev, "%s: not attached\n", 230 + HP_SW_NAME); 231 + return -EINVAL; 232 + } 233 + 234 + static void hp_sw_bus_detach( struct scsi_device *sdev ) 235 + { 236 + struct scsi_dh_data *scsi_dh_data; 237 + unsigned long flags; 238 + 239 + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 240 + scsi_dh_data = sdev->scsi_dh_data; 241 + sdev->scsi_dh_data = NULL; 242 + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 243 + module_put(THIS_MODULE); 244 + 245 + sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", HP_SW_NAME); 246 + 247 + kfree(scsi_dh_data); 306 248 } 307 249 308 250 static int __init hp_sw_init(void) ··· 376 202 module_init(hp_sw_init); 377 203 module_exit(hp_sw_exit); 378 204 379 - MODULE_DESCRIPTION("HP MSA 1000"); 205 + MODULE_DESCRIPTION("HP Active/Passive driver"); 380 206 MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu"); 381 207 MODULE_LICENSE("GPL");
+134 -132
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 173 173 #define RDAC_STATE_ACTIVE 0 174 174 #define RDAC_STATE_PASSIVE 1 175 175 unsigned char state; 176 + 177 + #define RDAC_LUN_UNOWNED 0 178 + #define RDAC_LUN_OWNED 1 179 + #define RDAC_LUN_AVT 2 180 + char lun_state; 176 181 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 177 182 union { 178 183 struct c2_inquiry c2; ··· 185 180 struct c8_inquiry c8; 186 181 struct c9_inquiry c9; 187 182 } inq; 183 + }; 184 + 185 + static const char *lun_state[] = 186 + { 187 + "unowned", 188 + "owned", 189 + "owned (AVT mode)", 188 190 }; 189 191 190 192 static LIST_HEAD(ctlr_list); ··· 209 197 { 210 198 struct request *rq; 211 199 struct request_queue *q = sdev->request_queue; 212 - struct rdac_dh_data *h = get_rdac_data(sdev); 213 200 214 - rq = blk_get_request(q, rw, GFP_KERNEL); 201 + rq = blk_get_request(q, rw, GFP_NOIO); 215 202 216 203 if (!rq) { 217 204 sdev_printk(KERN_INFO, sdev, ··· 218 207 return NULL; 219 208 } 220 209 221 - if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) { 210 + if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { 222 211 blk_put_request(rq); 223 212 sdev_printk(KERN_INFO, sdev, 224 213 "get_rdac_req: blk_rq_map_kern failed.\n"); 225 214 return NULL; 226 215 } 227 216 228 - memset(&rq->cmd, 0, BLK_MAX_CDB); 229 - rq->sense = h->sense; 230 - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 231 - rq->sense_len = 0; 217 + memset(rq->cmd, 0, BLK_MAX_CDB); 232 218 233 219 rq->cmd_type = REQ_TYPE_BLOCK_PC; 234 220 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; ··· 235 227 return rq; 236 228 } 237 229 238 - static struct request *rdac_failover_get(struct scsi_device *sdev) 230 + static struct request *rdac_failover_get(struct scsi_device *sdev, 231 + struct rdac_dh_data *h) 239 232 { 240 233 struct request *rq; 241 234 struct rdac_mode_common *common; 242 235 unsigned data_size; 243 - struct rdac_dh_data *h = get_rdac_data(sdev); 244 236 245 237 if (h->ctlr->use_ms10) { 246 238 struct rdac_pg_expanded *rdac_pg; ··· 284 276 rq->cmd[4] = data_size; 285 277 } 286 278 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); 279 + 280 + rq->sense = h->sense; 281 + memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 282 + rq->sense_len = 0; 287 283 288 284 return rq; 289 285 } ··· 333 321 } 334 322 335 323 static int submit_inquiry(struct scsi_device *sdev, int page_code, 336 - unsigned int len) 324 + unsigned int len, struct rdac_dh_data *h) 337 325 { 338 326 struct request *rq; 339 327 struct request_queue *q = sdev->request_queue; 340 - struct rdac_dh_data *h = get_rdac_data(sdev); 341 328 int err = SCSI_DH_RES_TEMP_UNAVAIL; 342 329 343 330 rq = get_rdac_req(sdev, &h->inq, len, READ); ··· 349 338 rq->cmd[2] = page_code; 350 339 rq->cmd[4] = len; 351 340 rq->cmd_len = COMMAND_SIZE(INQUIRY); 341 + 342 + rq->sense = h->sense; 343 + memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 344 + rq->sense_len = 0; 345 + 352 346 err = blk_execute_rq(q, NULL, rq, 1); 353 347 if (err == -EIO) 354 348 err = SCSI_DH_IO; 349 + 350 + blk_put_request(rq); 355 351 done: 356 352 return err; 357 353 } 358 354 359 - static int get_lun(struct scsi_device *sdev) 355 + static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h) 360 356 { 361 357 int err; 362 358 struct c8_inquiry *inqp; 363 - struct rdac_dh_data *h = get_rdac_data(sdev); 364 359 365 - err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry)); 360 + err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h); 366 361 if (err == SCSI_DH_OK) { 367 362 inqp = &h->inq.c8; 368 - h->lun = inqp->lun[7]; /* currently it uses only one byte */ 363 + if (inqp->page_code != 0xc8) 364 + return SCSI_DH_NOSYS; 365 + if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || 366 + inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') 367 + return SCSI_DH_NOSYS; 368 + h->lun = scsilun_to_int((struct scsi_lun *)inqp->lun); 369 369 } 370 370 return err; 371 371 } 372 372 373 - #define RDAC_OWNED 0 374 - #define RDAC_UNOWNED 1 375 - #define RDAC_FAILED 2 376 - static int check_ownership(struct scsi_device *sdev) 373 + static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) 377 374 { 378 375 int err; 379 376 struct c9_inquiry *inqp; 380 - struct rdac_dh_data *h = get_rdac_data(sdev); 381 377 382 - err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry)); 378 + err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); 383 379 if (err == SCSI_DH_OK) { 384 - err = RDAC_UNOWNED; 385 380 inqp = &h->inq.c9; 386 - /* 387 - * If in AVT mode or if the path already owns the LUN, 388 - * return RDAC_OWNED; 389 - */ 390 - if (((inqp->avte_cvp >> 7) == 0x1) || 391 - ((inqp->avte_cvp & 0x1) != 0)) 392 - err = RDAC_OWNED; 393 - } else 394 - err = RDAC_FAILED; 381 + if ((inqp->avte_cvp >> 7) == 0x1) { 382 + /* LUN in AVT mode */ 383 + sdev_printk(KERN_NOTICE, sdev, 384 + "%s: AVT mode detected\n", 385 + RDAC_NAME); 386 + h->lun_state = RDAC_LUN_AVT; 387 + } else if ((inqp->avte_cvp & 0x1) != 0) { 388 + /* LUN was owned by the controller */ 389 + h->lun_state = RDAC_LUN_OWNED; 390 + } 391 + } 392 + 395 393 return err; 396 394 } 397 395 398 - static int initialize_controller(struct scsi_device *sdev) 396 + static int initialize_controller(struct scsi_device *sdev, 397 + struct rdac_dh_data *h) 399 398 { 400 399 int err; 401 400 struct c4_inquiry *inqp; 402 - struct rdac_dh_data *h = get_rdac_data(sdev); 403 401 404 - err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry)); 402 + err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); 405 403 if (err == SCSI_DH_OK) { 406 404 inqp = &h->inq.c4; 407 405 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id); ··· 420 400 return err; 421 401 } 422 402 423 - static int set_mode_select(struct scsi_device *sdev) 403 + static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) 424 404 { 425 405 int err; 426 406 struct c2_inquiry *inqp; 427 - struct rdac_dh_data *h = get_rdac_data(sdev); 428 407 429 - err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry)); 408 + err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h); 430 409 if (err == SCSI_DH_OK) { 431 410 inqp = &h->inq.c2; 432 411 /* ··· 440 421 return err; 441 422 } 442 423 443 - static int mode_select_handle_sense(struct scsi_device *sdev) 424 + static int mode_select_handle_sense(struct scsi_device *sdev, 425 + unsigned char *sensebuf) 444 426 { 445 427 struct scsi_sense_hdr sense_hdr; 446 - struct rdac_dh_data *h = get_rdac_data(sdev); 447 428 int sense, err = SCSI_DH_IO, ret; 448 429 449 - ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 430 + ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 450 431 if (!ret) 451 432 goto done; 452 433 ··· 470 451 return err; 471 452 } 472 453 473 - static int send_mode_select(struct scsi_device *sdev) 454 + static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) 474 455 { 475 456 struct request *rq; 476 457 struct request_queue *q = sdev->request_queue; 477 - struct rdac_dh_data *h = get_rdac_data(sdev); 478 458 int err = SCSI_DH_RES_TEMP_UNAVAIL; 479 459 480 - rq = rdac_failover_get(sdev); 460 + rq = rdac_failover_get(sdev, h); 481 461 if (!rq) 482 462 goto done; 483 463 ··· 484 466 485 467 err = blk_execute_rq(q, NULL, rq, 1); 486 468 if (err != SCSI_DH_OK) 487 - err = mode_select_handle_sense(sdev); 469 + err = mode_select_handle_sense(sdev, h->sense); 488 470 if (err == SCSI_DH_OK) 489 471 h->state = RDAC_STATE_ACTIVE; 472 + 473 + blk_put_request(rq); 490 474 done: 491 475 return err; 492 476 } ··· 498 478 struct rdac_dh_data *h = get_rdac_data(sdev); 499 479 int err = SCSI_DH_OK; 500 480 501 - if (h->lun == UNINITIALIZED_LUN) { 502 - err = get_lun(sdev); 503 - if (err != SCSI_DH_OK) 504 - goto done; 505 - } 506 - 507 - err = check_ownership(sdev); 508 - switch (err) { 509 - case RDAC_UNOWNED: 510 - break; 511 - case RDAC_OWNED: 512 - err = SCSI_DH_OK; 481 + err = check_ownership(sdev, h); 482 + if (err != SCSI_DH_OK) 513 483 goto done; 514 - case RDAC_FAILED: 515 - default: 516 - err = SCSI_DH_IO; 517 - goto done; 518 - } 519 484 520 485 if (!h->ctlr) { 521 - err = initialize_controller(sdev); 486 + err = initialize_controller(sdev, h); 522 487 if (err != SCSI_DH_OK) 523 488 goto done; 524 489 } 525 490 526 491 if (h->ctlr->use_ms10 == -1) { 527 - err = set_mode_select(sdev); 492 + err = set_mode_select(sdev, h); 528 493 if (err != SCSI_DH_OK) 529 494 goto done; 530 495 } 531 - 532 - err = send_mode_select(sdev); 496 + if (h->lun_state == RDAC_LUN_UNOWNED) 497 + err = send_mode_select(sdev, h); 533 498 done: 534 499 return err; 535 500 } ··· 574 569 return SCSI_RETURN_NOT_HANDLED; 575 570 } 576 571 577 - static const struct { 578 - char *vendor; 579 - char *model; 580 - } rdac_dev_list[] = { 572 + const struct scsi_dh_devlist rdac_dev_list[] = { 581 573 {"IBM", "1722"}, 582 574 {"IBM", "1724"}, 583 575 {"IBM", "1726"}, ··· 592 590 {NULL, NULL}, 593 591 }; 594 592 595 - static int rdac_bus_notify(struct notifier_block *, unsigned long, void *); 593 + static int rdac_bus_attach(struct scsi_device *sdev); 594 + static void rdac_bus_detach(struct scsi_device *sdev); 596 595 597 596 static struct scsi_device_handler rdac_dh = { 598 597 .name = RDAC_NAME, 599 598 .module = THIS_MODULE, 600 - .nb.notifier_call = rdac_bus_notify, 599 + .devlist = rdac_dev_list, 601 600 .prep_fn = rdac_prep_fn, 602 601 .check_sense = rdac_check_sense, 602 + .attach = rdac_bus_attach, 603 + .detach = rdac_bus_detach, 603 604 .activate = rdac_activate, 604 605 }; 605 606 606 - /* 607 - * TODO: need some interface so we can set trespass values 608 - */ 609 - static int rdac_bus_notify(struct notifier_block *nb, 610 - unsigned long action, void *data) 607 + static int rdac_bus_attach(struct scsi_device *sdev) 611 608 { 612 - struct device *dev = data; 613 - struct scsi_device *sdev; 614 609 struct scsi_dh_data *scsi_dh_data; 615 610 struct rdac_dh_data *h; 616 - int i, found = 0; 617 611 unsigned long flags; 612 + int err; 618 613 619 - if (!scsi_is_sdev_device(dev)) 614 + scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 615 + + sizeof(*h) , GFP_KERNEL); 616 + if (!scsi_dh_data) { 617 + sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", 618 + RDAC_NAME); 620 619 return 0; 621 - 622 - sdev = to_scsi_device(dev); 623 - 624 - if (action == BUS_NOTIFY_ADD_DEVICE) { 625 - for (i = 0; rdac_dev_list[i].vendor; i++) { 626 - if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor, 627 - strlen(rdac_dev_list[i].vendor)) && 628 - !strncmp(sdev->model, rdac_dev_list[i].model, 629 - strlen(rdac_dev_list[i].model))) { 630 - found = 1; 631 - break; 632 - } 633 - } 634 - if (!found) 635 - goto out; 636 - 637 - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 638 - + sizeof(*h) , GFP_KERNEL); 639 - if (!scsi_dh_data) { 640 - sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n", 641 - RDAC_NAME); 642 - goto out; 643 - } 644 - 645 - scsi_dh_data->scsi_dh = &rdac_dh; 646 - h = (struct rdac_dh_data *) scsi_dh_data->buf; 647 - h->lun = UNINITIALIZED_LUN; 648 - h->state = RDAC_STATE_ACTIVE; 649 - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 650 - sdev->scsi_dh_data = scsi_dh_data; 651 - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 652 - try_module_get(THIS_MODULE); 653 - 654 - sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME); 655 - 656 - } else if (action == BUS_NOTIFY_DEL_DEVICE) { 657 - if (sdev->scsi_dh_data == NULL || 658 - sdev->scsi_dh_data->scsi_dh != &rdac_dh) 659 - goto out; 660 - 661 - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 662 - scsi_dh_data = sdev->scsi_dh_data; 663 - sdev->scsi_dh_data = NULL; 664 - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 665 - 666 - h = (struct rdac_dh_data *) scsi_dh_data->buf; 667 - if (h->ctlr) 668 - kref_put(&h->ctlr->kref, release_controller); 669 - kfree(scsi_dh_data); 670 - module_put(THIS_MODULE); 671 - sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME); 672 620 } 673 621 674 - out: 622 + scsi_dh_data->scsi_dh = &rdac_dh; 623 + h = (struct rdac_dh_data *) scsi_dh_data->buf; 624 + h->lun = UNINITIALIZED_LUN; 625 + h->state = RDAC_STATE_ACTIVE; 626 + 627 + err = get_lun(sdev, h); 628 + if (err != SCSI_DH_OK) 629 + goto failed; 630 + 631 + err = check_ownership(sdev, h); 632 + if (err != SCSI_DH_OK) 633 + goto failed; 634 + 635 + if (!try_module_get(THIS_MODULE)) 636 + goto failed; 637 + 638 + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 639 + sdev->scsi_dh_data = scsi_dh_data; 640 + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 641 + 642 + sdev_printk(KERN_NOTICE, sdev, 643 + "%s: LUN %d (%s)\n", 644 + RDAC_NAME, h->lun, lun_state[(int)h->lun_state]); 645 + 675 646 return 0; 647 + 648 + failed: 649 + kfree(scsi_dh_data); 650 + sdev_printk(KERN_ERR, sdev, "%s: not attached\n", 651 + RDAC_NAME); 652 + return -EINVAL; 676 653 } 654 + 655 + static void rdac_bus_detach( struct scsi_device *sdev ) 656 + { 657 + struct scsi_dh_data *scsi_dh_data; 658 + struct rdac_dh_data *h; 659 + unsigned long flags; 660 + 661 + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 662 + scsi_dh_data = sdev->scsi_dh_data; 663 + sdev->scsi_dh_data = NULL; 664 + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 665 + 666 + h = (struct rdac_dh_data *) scsi_dh_data->buf; 667 + if (h->ctlr) 668 + kref_put(&h->ctlr->kref, release_controller); 669 + kfree(scsi_dh_data); 670 + module_put(THIS_MODULE); 671 + sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME); 672 + } 673 + 674 + 677 675 678 676 static int __init rdac_init(void) 679 677 {
+177 -27
drivers/scsi/ibmvscsi/ibmvfc.c
··· 521 521 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost) 522 522 { 523 523 if (vhost->action == IBMVFC_HOST_ACTION_NONE) { 524 - scsi_block_requests(vhost->host); 525 - ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING); 526 - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); 524 + if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { 525 + scsi_block_requests(vhost->host); 526 + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); 527 + } 527 528 } else 528 529 vhost->reinit = 1; 529 530 ··· 855 854 } 856 855 857 856 /** 858 - * __ibmvfc_find_target - Find the specified scsi_target (no locking) 857 + * __ibmvfc_get_target - Find the specified scsi_target (no locking) 859 858 * @starget: scsi target struct 860 859 * 861 860 * Return value: 862 861 * ibmvfc_target struct / NULL if not found 863 862 **/ 864 - static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget) 863 + static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget) 865 864 { 866 865 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 867 866 struct ibmvfc_host *vhost = shost_priv(shost); 868 867 struct ibmvfc_target *tgt; 869 868 870 869 list_for_each_entry(tgt, &vhost->targets, queue) 871 - if (tgt->target_id == starget->id) 870 + if (tgt->target_id == starget->id) { 871 + kref_get(&tgt->kref); 872 872 return tgt; 873 + } 873 874 return NULL; 874 875 } 875 876 876 877 /** 877 - * ibmvfc_find_target - Find the specified scsi_target 878 + * ibmvfc_get_target - Find the specified scsi_target 878 879 * @starget: scsi target struct 879 880 * 880 881 * Return value: 881 882 * ibmvfc_target struct / NULL if not found 882 883 **/ 883 - static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget) 884 + static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget) 884 885 { 885 886 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 886 887 struct ibmvfc_target *tgt; 887 888 unsigned long flags; 888 889 889 890 spin_lock_irqsave(shost->host_lock, flags); 890 - tgt = __ibmvfc_find_target(starget); 891 + tgt = __ibmvfc_get_target(starget); 891 892 spin_unlock_irqrestore(shost->host_lock, flags); 892 893 return tgt; 893 894 } ··· 966 963 case IBMVFC_HALTED: 967 964 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED; 968 965 break; 966 + case IBMVFC_NO_CRQ: 967 + fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 968 + break; 969 969 default: 970 970 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state); 971 971 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; ··· 994 988 } 995 989 996 990 /** 991 + * ibmvfc_release_tgt - Free memory allocated for a target 992 + * @kref: kref struct 993 + * 994 + **/ 995 + static void ibmvfc_release_tgt(struct kref *kref) 996 + { 997 + struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref); 998 + kfree(tgt); 999 + } 1000 + 1001 + /** 997 1002 * ibmvfc_get_starget_node_name - Get SCSI target's node name 998 1003 * @starget: scsi target struct 999 1004 * ··· 1013 996 **/ 1014 997 static void ibmvfc_get_starget_node_name(struct scsi_target *starget) 1015 998 { 1016 - struct ibmvfc_target *tgt = ibmvfc_find_target(starget); 999 + struct ibmvfc_target *tgt = ibmvfc_get_target(starget); 1017 1000 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0; 1001 + if (tgt) 1002 + kref_put(&tgt->kref, ibmvfc_release_tgt); 1018 1003 } 1019 1004 1020 1005 /** ··· 1028 1009 **/ 1029 1010 static void ibmvfc_get_starget_port_name(struct scsi_target *starget) 1030 1011 { 1031 - struct ibmvfc_target *tgt = ibmvfc_find_target(starget); 1012 + struct ibmvfc_target *tgt = ibmvfc_get_target(starget); 1032 1013 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0; 1014 + if (tgt) 1015 + kref_put(&tgt->kref, ibmvfc_release_tgt); 1033 1016 } 1034 1017 1035 1018 /** ··· 1043 1022 **/ 1044 1023 static void ibmvfc_get_starget_port_id(struct scsi_target *starget) 1045 1024 { 1046 - struct ibmvfc_target *tgt = ibmvfc_find_target(starget); 1025 + struct ibmvfc_target *tgt = ibmvfc_get_target(starget); 1047 1026 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1; 1027 + if (tgt) 1028 + kref_put(&tgt->kref, ibmvfc_release_tgt); 1048 1029 } 1049 1030 1050 1031 /** ··· 1136 1113 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; 1137 1114 login_info->capabilities = IBMVFC_CAN_MIGRATE; 1138 1115 login_info->async.va = vhost->async_crq.msg_token; 1139 - login_info->async.len = vhost->async_crq.size; 1116 + login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs); 1140 1117 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); 1141 1118 strncpy(login_info->device_name, 1142 1119 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME); ··· 1427 1404 err = cmd_status[index].name; 1428 1405 } 1429 1406 1430 - if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL)) 1407 + if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1))) 1431 1408 return; 1432 1409 1433 1410 if (rsp->flags & FCP_RSP_LEN_VALID) ··· 2077 2054 { 2078 2055 const char *desc = ibmvfc_get_ae_desc(crq->event); 2079 2056 2080 - ibmvfc_log(vhost, 2, "%s event received\n", desc); 2057 + ibmvfc_log(vhost, 3, "%s event received\n", desc); 2081 2058 2082 2059 switch (crq->event) { 2083 2060 case IBMVFC_AE_LINK_UP: ··· 2671 2648 } 2672 2649 2673 2650 /** 2674 - * ibmvfc_release_tgt - Free memory allocated for a target 2675 - * @kref: kref struct 2676 - * 2677 - **/ 2678 - static void ibmvfc_release_tgt(struct kref *kref) 2679 - { 2680 - struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref); 2681 - kfree(tgt); 2682 - } 2683 - 2684 - /** 2685 2651 * ibmvfc_tgt_prli_done - Completion handler for Process Login 2686 2652 * @evt: ibmvfc event struct 2687 2653 * ··· 2914 2902 } 2915 2903 2916 2904 /** 2905 + * ibmvfc_adisc_needs_plogi - Does device need PLOGI? 2906 + * @mad: ibmvfc passthru mad struct 2907 + * @tgt: ibmvfc target struct 2908 + * 2909 + * Returns: 2910 + * 1 if PLOGI needed / 0 if PLOGI not needed 2911 + **/ 2912 + static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad, 2913 + struct ibmvfc_target *tgt) 2914 + { 2915 + if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name, 2916 + sizeof(tgt->ids.port_name))) 2917 + return 1; 2918 + if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name, 2919 + sizeof(tgt->ids.node_name))) 2920 + return 1; 2921 + if (mad->fc_iu.response[6] != tgt->scsi_id) 2922 + return 1; 2923 + return 0; 2924 + } 2925 + 2926 + /** 2927 + * ibmvfc_tgt_adisc_done - Completion handler for ADISC 2928 + * @evt: ibmvfc event struct 2929 + * 2930 + **/ 2931 + static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) 2932 + { 2933 + struct ibmvfc_target *tgt = evt->tgt; 2934 + struct ibmvfc_host *vhost = evt->vhost; 2935 + struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru; 2936 + u32 status = mad->common.status; 2937 + u8 fc_reason, fc_explain; 2938 + 2939 + vhost->discovery_threads--; 2940 + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2941 + 2942 + switch (status) { 2943 + case IBMVFC_MAD_SUCCESS: 2944 + tgt_dbg(tgt, "ADISC succeeded\n"); 2945 + if (ibmvfc_adisc_needs_plogi(mad, tgt)) 2946 + tgt->need_login = 1; 2947 + break; 2948 + case IBMVFC_MAD_DRIVER_FAILED: 2949 + break; 2950 + case IBMVFC_MAD_FAILED: 2951 + default: 2952 + tgt->need_login = 1; 2953 + fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; 2954 + fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; 2955 + tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 2956 + ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error), 2957 + mad->iu.status, mad->iu.error, 2958 + ibmvfc_get_fc_type(fc_reason), fc_reason, 2959 + ibmvfc_get_ls_explain(fc_explain), fc_explain, status); 2960 + break; 2961 + }; 2962 + 2963 + kref_put(&tgt->kref, ibmvfc_release_tgt); 2964 + ibmvfc_free_event(evt); 2965 + wake_up(&vhost->work_wait_q); 2966 + } 2967 + 2968 + /** 2969 + * ibmvfc_init_passthru - Initialize an event struct for FC passthru 2970 + * @evt: ibmvfc event struct 2971 + * 2972 + **/ 2973 + static void ibmvfc_init_passthru(struct ibmvfc_event *evt) 2974 + { 2975 + struct ibmvfc_passthru_mad *mad = &evt->iu.passthru; 2976 + 2977 + memset(mad, 0, sizeof(*mad)); 2978 + mad->common.version = 1; 2979 + mad->common.opcode = IBMVFC_PASSTHRU; 2980 + mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu); 2981 + mad->cmd_ioba.va = (u64)evt->crq.ioba + 2982 + offsetof(struct ibmvfc_passthru_mad, iu); 2983 + mad->cmd_ioba.len = sizeof(mad->iu); 2984 + mad->iu.cmd_len = sizeof(mad->fc_iu.payload); 2985 + mad->iu.rsp_len = sizeof(mad->fc_iu.response); 2986 + mad->iu.cmd.va = (u64)evt->crq.ioba + 2987 + offsetof(struct ibmvfc_passthru_mad, fc_iu) + 2988 + offsetof(struct ibmvfc_passthru_fc_iu, payload); 2989 + mad->iu.cmd.len = sizeof(mad->fc_iu.payload); 2990 + mad->iu.rsp.va = (u64)evt->crq.ioba + 2991 + offsetof(struct ibmvfc_passthru_mad, fc_iu) + 2992 + offsetof(struct ibmvfc_passthru_fc_iu, response); 2993 + mad->iu.rsp.len = sizeof(mad->fc_iu.response); 2994 + } 2995 + 2996 + /** 2997 + * ibmvfc_tgt_adisc - Initiate an ADISC for specified target 2998 + * @tgt: ibmvfc target struct 2999 + * 3000 + **/ 3001 + static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt) 3002 + { 3003 + struct ibmvfc_passthru_mad *mad; 3004 + struct ibmvfc_host *vhost = tgt->vhost; 3005 + struct ibmvfc_event *evt; 3006 + 3007 + if (vhost->discovery_threads >= disc_threads) 3008 + return; 3009 + 3010 + kref_get(&tgt->kref); 3011 + evt = ibmvfc_get_event(vhost); 3012 + vhost->discovery_threads++; 3013 + ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT); 3014 + evt->tgt = tgt; 3015 + 3016 + ibmvfc_init_passthru(evt); 3017 + mad = &evt->iu.passthru; 3018 + mad->iu.flags = IBMVFC_FC_ELS; 3019 + mad->iu.scsi_id = tgt->scsi_id; 3020 + 3021 + mad->fc_iu.payload[0] = IBMVFC_ADISC; 3022 + memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name, 3023 + sizeof(vhost->login_buf->resp.port_name)); 3024 + memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name, 3025 + sizeof(vhost->login_buf->resp.node_name)); 3026 + mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff; 3027 + 3028 + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); 3029 + if (ibmvfc_send_event(evt, vhost, default_timeout)) { 3030 + vhost->discovery_threads--; 3031 + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3032 + kref_put(&tgt->kref, ibmvfc_release_tgt); 3033 + } else 3034 + tgt_dbg(tgt, "Sent ADISC\n"); 3035 + } 3036 + 3037 + /** 2917 3038 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD 2918 3039 * @evt: ibmvfc event struct 2919 3040 * ··· 3066 2921 tgt->new_scsi_id = rsp->scsi_id; 3067 2922 if (rsp->scsi_id != tgt->scsi_id) 3068 2923 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); 2924 + else 2925 + ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc); 3069 2926 break; 3070 2927 case IBMVFC_MAD_DRIVER_FAILED: 3071 2928 break; ··· 3483 3336 tgt_dbg(tgt, "rport add succeeded\n"); 3484 3337 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; 3485 3338 rport->supported_classes = 0; 3339 + tgt->target_id = rport->scsi_target_id; 3486 3340 if (tgt->service_parms.class1_parms[0] & 0x80000000) 3487 3341 rport->supported_classes |= FC_COS_CLASS1; 3488 3342 if (tgt->service_parms.class2_parms[0] & 0x80000000) ··· 3948 3800 3949 3801 ENTER; 3950 3802 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); 3803 + ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 3804 + ibmvfc_wait_while_resetting(vhost); 3805 + ibmvfc_release_crq_queue(vhost); 3951 3806 kthread_stop(vhost->work_thread); 3952 3807 fc_remove_host(vhost->host); 3953 3808 scsi_remove_host(vhost->host); 3954 - ibmvfc_release_crq_queue(vhost); 3955 3809 3956 3810 spin_lock_irqsave(vhost->host->host_lock, flags); 3957 3811 ibmvfc_purge_requests(vhost, DID_ERROR);
+40 -4
drivers/scsi/ibmvscsi/ibmvfc.h
··· 29 29 #include "viosrp.h" 30 30 31 31 #define IBMVFC_NAME "ibmvfc" 32 - #define IBMVFC_DRIVER_VERSION "1.0.0" 33 - #define IBMVFC_DRIVER_DATE "(July 1, 2008)" 32 + #define IBMVFC_DRIVER_VERSION "1.0.1" 33 + #define IBMVFC_DRIVER_DATE "(July 11, 2008)" 34 34 35 35 #define IBMVFC_DEFAULT_TIMEOUT 15 36 36 #define IBMVFC_INIT_TIMEOUT 30 ··· 119 119 IBMVFC_PROCESS_LOGIN = 0x0008, 120 120 IBMVFC_QUERY_TARGET = 0x0010, 121 121 IBMVFC_IMPLICIT_LOGOUT = 0x0040, 122 + IBMVFC_PASSTHRU = 0x0200, 122 123 IBMVFC_TMF_MAD = 0x0100, 123 124 }; 124 125 ··· 440 439 struct ibmvfc_fcp_rsp rsp; 441 440 }__attribute__((packed, aligned (8))); 442 441 442 + struct ibmvfc_passthru_fc_iu { 443 + u32 payload[7]; 444 + #define IBMVFC_ADISC 0x52000000 445 + u32 response[7]; 446 + }; 447 + 448 + struct ibmvfc_passthru_iu { 449 + u64 task_tag; 450 + u32 cmd_len; 451 + u32 rsp_len; 452 + u16 status; 453 + u16 error; 454 + u32 flags; 455 + #define IBMVFC_FC_ELS 0x01 456 + u32 cancel_key; 457 + u32 reserved; 458 + struct srp_direct_buf cmd; 459 + struct srp_direct_buf rsp; 460 + u64 correlation; 461 + u64 scsi_id; 462 + u64 tag; 463 + u64 reserved2[2]; 464 + }__attribute__((packed, aligned (8))); 465 + 466 + struct ibmvfc_passthru_mad { 467 + struct ibmvfc_mad_common common; 468 + struct srp_direct_buf cmd_ioba; 469 + struct ibmvfc_passthru_iu iu; 470 + struct ibmvfc_passthru_fc_iu fc_iu; 471 + }__attribute__((packed, aligned (8))); 472 + 443 473 struct ibmvfc_trace_start_entry { 444 474 u32 xfer_len; 445 475 }__attribute__((packed)); ··· 563 531 struct ibmvfc_implicit_logout implicit_logout; 564 532 struct ibmvfc_tmf tmf; 565 533 struct ibmvfc_cmd cmd; 534 + struct ibmvfc_passthru_mad passthru; 566 535 }__attribute__((packed, aligned (8))); 567 536 568 537 enum ibmvfc_target_action { ··· 689 656 #define tgt_dbg(t, fmt, ...) \ 690 657 DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)) 691 658 659 + #define tgt_info(t, fmt, ...) \ 660 + dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 661 + 692 662 #define tgt_err(t, fmt, ...) \ 693 663 dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 694 664 ··· 704 668 dev_err((vhost)->dev, ##__VA_ARGS__); \ 705 669 } while (0) 706 670 707 - #define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__)) 708 - #define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__)) 671 + #define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __func__)) 672 + #define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __func__)) 709 673 710 674 #ifdef CONFIG_SCSI_IBMVFC_TRACE 711 675 #define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
+1 -1
drivers/scsi/ibmvscsi/ibmvstgt.c
··· 55 55 /* tmp - will replace with SCSI logging stuff */ 56 56 #define eprintk(fmt, args...) \ 57 57 do { \ 58 - printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \ 58 + printk("%s(%d) " fmt, __func__, __LINE__, ##args); \ 59 59 } while (0) 60 60 /* #define dprintk eprintk */ 61 61 #define dprintk(fmt, args...)
+1 -1
drivers/scsi/imm.c
··· 163 163 164 164 #if IMM_DEBUG > 0 165 165 #define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\ 166 - y, __FUNCTION__, __LINE__); imm_fail_func(x,y); 166 + y, __func__, __LINE__); imm_fail_func(x,y); 167 167 static inline void 168 168 imm_fail_func(imm_struct *dev, int error_code) 169 169 #else
+3 -3
drivers/scsi/ipr.h
··· 1403 1403 } 1404 1404 1405 1405 #define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ 1406 - __FILE__, __FUNCTION__, __LINE__) 1406 + __FILE__, __func__, __LINE__) 1407 1407 1408 - #define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __FUNCTION__)) 1409 - #define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __FUNCTION__)) 1408 + #define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __func__)) 1409 + #define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __func__)) 1410 1410 1411 1411 #define ipr_err_separator \ 1412 1412 ipr_err("----------------------------------------------------------\n")
+8 -8
drivers/scsi/libsas/sas_ata.c
··· 74 74 case SAS_OPEN_TO: 75 75 case SAS_OPEN_REJECT: 76 76 SAS_DPRINTK("%s: Saw error %d. What to do?\n", 77 - __FUNCTION__, ts->stat); 77 + __func__, ts->stat); 78 78 return AC_ERR_OTHER; 79 79 80 80 case SAS_ABORTED_TASK: ··· 115 115 } else if (stat->stat != SAM_STAT_GOOD) { 116 116 ac = sas_to_ata_err(stat); 117 117 if (ac) { 118 - SAS_DPRINTK("%s: SAS error %x\n", __FUNCTION__, 118 + SAS_DPRINTK("%s: SAS error %x\n", __func__, 119 119 stat->stat); 120 120 /* We saw a SAS error. Send a vague error. */ 121 121 qc->err_mask = ac; ··· 244 244 res = i->dft->lldd_I_T_nexus_reset(dev); 245 245 246 246 if (res != TMF_RESP_FUNC_COMPLETE) 247 - SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); 247 + SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__); 248 248 249 249 switch (dev->sata_dev.command_set) { 250 250 case ATA_COMMAND_SET: 251 - SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__); 251 + SAS_DPRINTK("%s: Found ATA device.\n", __func__); 252 252 ap->link.device[0].class = ATA_DEV_ATA; 253 253 break; 254 254 case ATAPI_COMMAND_SET: 255 - SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__); 255 + SAS_DPRINTK("%s: Found ATAPI device.\n", __func__); 256 256 ap->link.device[0].class = ATA_DEV_ATAPI; 257 257 break; 258 258 default: 259 259 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n", 260 - __FUNCTION__, 260 + __func__, 261 261 dev->sata_dev.command_set); 262 262 ap->link.device[0].class = ATA_DEV_UNKNOWN; 263 263 break; ··· 299 299 { 300 300 struct domain_device *dev = ap->private_data; 301 301 302 - SAS_DPRINTK("STUB %s\n", __FUNCTION__); 302 + SAS_DPRINTK("STUB %s\n", __func__); 303 303 switch (sc_reg_in) { 304 304 case SCR_STATUS: 305 305 dev->sata_dev.sstatus = val; ··· 324 324 { 325 325 struct domain_device *dev = ap->private_data; 326 326 327 - SAS_DPRINTK("STUB %s\n", __FUNCTION__); 327 + SAS_DPRINTK("STUB %s\n", __func__); 328 328 switch (sc_reg_in) { 329 329 case SCR_STATUS: 330 330 *val = dev->sata_dev.sstatus;
+6 -6
drivers/scsi/libsas/sas_expander.c
··· 121 121 break; 122 122 } else { 123 123 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " 124 - "status 0x%x\n", __FUNCTION__, 124 + "status 0x%x\n", __func__, 125 125 SAS_ADDR(dev->sas_addr), 126 126 task->task_status.resp, 127 127 task->task_status.stat); ··· 1279 1279 goto out; 1280 1280 } else if (res != SMP_RESP_FUNC_ACC) { 1281 1281 SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " 1282 - "result 0x%x\n", __FUNCTION__, 1282 + "result 0x%x\n", __func__, 1283 1283 SAS_ADDR(dev->sas_addr), phy_id, i, res); 1284 1284 goto out; 1285 1285 } ··· 1901 1901 1902 1902 if (!rsp) { 1903 1903 printk("%s: space for a smp response is missing\n", 1904 - __FUNCTION__); 1904 + __func__); 1905 1905 return -EINVAL; 1906 1906 } 1907 1907 ··· 1914 1914 if (type != SAS_EDGE_EXPANDER_DEVICE && 1915 1915 type != SAS_FANOUT_EXPANDER_DEVICE) { 1916 1916 printk("%s: can we send a smp request to a device?\n", 1917 - __FUNCTION__); 1917 + __func__); 1918 1918 return -EINVAL; 1919 1919 } 1920 1920 1921 1921 dev = sas_find_dev_by_rphy(rphy); 1922 1922 if (!dev) { 1923 - printk("%s: fail to find a domain_device?\n", __FUNCTION__); 1923 + printk("%s: fail to find a domain_device?\n", __func__); 1924 1924 return -EINVAL; 1925 1925 } 1926 1926 1927 1927 /* do we need to support multiple segments? */ 1928 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1929 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1930 - __FUNCTION__, req->bio->bi_vcnt, req->data_len, 1930 + __func__, req->bio->bi_vcnt, req->data_len, 1931 1931 rsp->bio->bi_vcnt, rsp->data_len); 1932 1932 return -EINVAL; 1933 1933 }
+2 -2
drivers/scsi/libsas/sas_port.c
··· 50 50 sas_deform_port(phy); 51 51 else { 52 52 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", 53 - __FUNCTION__, phy->id, phy->port->id, 53 + __func__, phy->id, phy->port->id, 54 54 phy->port->num_phys); 55 55 return; 56 56 } ··· 78 78 79 79 if (i >= sas_ha->num_phys) { 80 80 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", 81 - __FUNCTION__); 81 + __func__); 82 82 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); 83 83 return; 84 84 }
+15 -15
drivers/scsi/libsas/sas_scsi_host.c
··· 343 343 flags); 344 344 SAS_DPRINTK("%s: task 0x%p aborted from " 345 345 "task_queue\n", 346 - __FUNCTION__, task); 346 + __func__, task); 347 347 return TASK_IS_ABORTED; 348 348 } 349 349 } ··· 351 351 } 352 352 353 353 for (i = 0; i < 5; i++) { 354 - SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); 354 + SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task); 355 355 res = si->dft->lldd_abort_task(task); 356 356 357 357 spin_lock_irqsave(&task->task_state_lock, flags); 358 358 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 359 359 spin_unlock_irqrestore(&task->task_state_lock, flags); 360 - SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, 360 + SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 361 361 task); 362 362 return TASK_IS_DONE; 363 363 } ··· 365 365 366 366 if (res == TMF_RESP_FUNC_COMPLETE) { 367 367 SAS_DPRINTK("%s: task 0x%p is aborted\n", 368 - __FUNCTION__, task); 368 + __func__, task); 369 369 return TASK_IS_ABORTED; 370 370 } else if (si->dft->lldd_query_task) { 371 371 SAS_DPRINTK("%s: querying task 0x%p\n", 372 - __FUNCTION__, task); 372 + __func__, task); 373 373 res = si->dft->lldd_query_task(task); 374 374 switch (res) { 375 375 case TMF_RESP_FUNC_SUCC: 376 376 SAS_DPRINTK("%s: task 0x%p at LU\n", 377 - __FUNCTION__, task); 377 + __func__, task); 378 378 return TASK_IS_AT_LU; 379 379 case TMF_RESP_FUNC_COMPLETE: 380 380 SAS_DPRINTK("%s: task 0x%p not at LU\n", 381 - __FUNCTION__, task); 381 + __func__, task); 382 382 return TASK_IS_NOT_AT_LU; 383 383 case TMF_RESP_FUNC_FAILED: 384 384 SAS_DPRINTK("%s: task 0x%p failed to abort\n", 385 - __FUNCTION__, task); 385 + __func__, task); 386 386 return TASK_ABORT_FAILED; 387 387 } 388 388 ··· 545 545 546 546 if (need_reset) { 547 547 SAS_DPRINTK("%s: task 0x%p requests reset\n", 548 - __FUNCTION__, task); 548 + __func__, task); 549 549 goto reset; 550 550 } 551 551 ··· 556 556 557 557 switch (res) { 558 558 case TASK_IS_DONE: 559 - SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, 559 + SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 560 560 task); 561 561 sas_eh_finish_cmd(cmd); 562 562 continue; 563 563 case TASK_IS_ABORTED: 564 564 SAS_DPRINTK("%s: task 0x%p is aborted\n", 565 - __FUNCTION__, task); 565 + __func__, task); 566 566 sas_eh_finish_cmd(cmd); 567 567 continue; 568 568 case TASK_IS_AT_LU: ··· 633 633 } 634 634 return list_empty(work_q); 635 635 clear_q: 636 - SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); 636 + SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__); 637 637 list_for_each_entry_safe(cmd, n, work_q, eh_entry) 638 638 sas_eh_finish_cmd(cmd); 639 639 ··· 650 650 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 651 651 spin_unlock_irqrestore(shost->host_lock, flags); 652 652 653 - SAS_DPRINTK("Enter %s\n", __FUNCTION__); 653 + SAS_DPRINTK("Enter %s\n", __func__); 654 654 /* 655 655 * Deal with commands that still have SAS tasks (i.e. they didn't 656 656 * complete via the normal sas_task completion mechanism) ··· 669 669 670 670 out: 671 671 scsi_eh_flush_done_q(&ha->eh_done_q); 672 - SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); 672 + SAS_DPRINTK("--- Exit %s\n", __func__); 673 673 return; 674 674 } 675 675 ··· 990 990 if (task->task_state_flags & SAS_TASK_STATE_ABORTED || 991 991 task->task_state_flags & SAS_TASK_STATE_DONE) { 992 992 spin_unlock_irqrestore(&task->task_state_lock, flags); 993 - SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__, 993 + SAS_DPRINTK("%s: Task %p already finished.\n", __func__, 994 994 task); 995 995 return 0; 996 996 }
+1 -1
drivers/scsi/libsrp.c
··· 39 39 /* tmp - will replace with SCSI logging stuff */ 40 40 #define eprintk(fmt, args...) \ 41 41 do { \ 42 - printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \ 42 + printk("%s(%d) " fmt, __func__, __LINE__, ##args); \ 43 43 } while (0) 44 44 /* #define dprintk eprintk */ 45 45 #define dprintk(fmt, args...)
+2 -2
drivers/scsi/lpfc/lpfc_init.c
··· 2083 2083 if (iocbq_entry == NULL) { 2084 2084 printk(KERN_ERR "%s: only allocated %d iocbs of " 2085 2085 "expected %d count. Unloading driver.\n", 2086 - __FUNCTION__, i, LPFC_IOCB_LIST_CNT); 2086 + __func__, i, LPFC_IOCB_LIST_CNT); 2087 2087 error = -ENOMEM; 2088 2088 goto out_free_iocbq; 2089 2089 } ··· 2093 2093 kfree (iocbq_entry); 2094 2094 printk(KERN_ERR "%s: failed to allocate IOTAG. " 2095 2095 "Unloading driver.\n", 2096 - __FUNCTION__); 2096 + __func__); 2097 2097 error = -ENOMEM; 2098 2098 goto out_free_iocbq; 2099 2099 }
+1 -1
drivers/scsi/lpfc/lpfc_scsi.c
··· 341 341 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 342 342 printk(KERN_ERR "%s: Too many sg segments from " 343 343 "dma_map_sg. Config %d, seg_cnt %d", 344 - __FUNCTION__, phba->cfg_sg_seg_cnt, 344 + __func__, phba->cfg_sg_seg_cnt, 345 345 lpfc_cmd->seg_cnt); 346 346 scsi_dma_unmap(scsi_cmnd); 347 347 return 1;
+3 -3
drivers/scsi/lpfc/lpfc_sli.c
··· 219 219 case CMD_IOCB_LOGENTRY_CN: 220 220 case CMD_IOCB_LOGENTRY_ASYNC_CN: 221 221 printk("%s - Unhandled SLI-3 Command x%x\n", 222 - __FUNCTION__, iocb_cmnd); 222 + __func__, iocb_cmnd); 223 223 type = LPFC_UNKNOWN_IOCB; 224 224 break; 225 225 default: ··· 1715 1715 rspiocbp = __lpfc_sli_get_iocbq(phba); 1716 1716 if (rspiocbp == NULL) { 1717 1717 printk(KERN_ERR "%s: out of buffers! Failing " 1718 - "completion.\n", __FUNCTION__); 1718 + "completion.\n", __func__); 1719 1719 break; 1720 1720 } 1721 1721 ··· 3793 3793 break; 3794 3794 default: 3795 3795 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 3796 - __FUNCTION__, ctx_cmd); 3796 + __func__, ctx_cmd); 3797 3797 break; 3798 3798 } 3799 3799
+1 -1
drivers/scsi/megaraid/mega_common.h
··· 265 265 #define ASSERT(expression) \ 266 266 if (!(expression)) { \ 267 267 ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \ 268 - #expression, __FILE__, __LINE__, __FUNCTION__); \ 268 + #expression, __FILE__, __LINE__, __func__); \ 269 269 } 270 270 #else 271 271 #define ASSERT(expression)
+8 -8
drivers/scsi/megaraid/megaraid_mbox.c
··· 458 458 459 459 if (adapter == NULL) { 460 460 con_log(CL_ANN, (KERN_WARNING 461 - "megaraid: out of memory, %s %d.\n", __FUNCTION__, __LINE__)); 461 + "megaraid: out of memory, %s %d.\n", __func__, __LINE__)); 462 462 463 463 goto out_probe_one; 464 464 } ··· 1002 1002 1003 1003 if (!raid_dev->una_mbox64) { 1004 1004 con_log(CL_ANN, (KERN_WARNING 1005 - "megaraid: out of memory, %s %d\n", __FUNCTION__, 1005 + "megaraid: out of memory, %s %d\n", __func__, 1006 1006 __LINE__)); 1007 1007 return -1; 1008 1008 } ··· 1030 1030 if (!adapter->ibuf) { 1031 1031 1032 1032 con_log(CL_ANN, (KERN_WARNING 1033 - "megaraid: out of memory, %s %d\n", __FUNCTION__, 1033 + "megaraid: out of memory, %s %d\n", __func__, 1034 1034 __LINE__)); 1035 1035 1036 1036 goto out_free_common_mbox; ··· 1052 1052 1053 1053 if (adapter->kscb_list == NULL) { 1054 1054 con_log(CL_ANN, (KERN_WARNING 1055 - "megaraid: out of memory, %s %d\n", __FUNCTION__, 1055 + "megaraid: out of memory, %s %d\n", __func__, 1056 1056 __LINE__)); 1057 1057 goto out_free_ibuf; 1058 1058 } ··· 1060 1060 // memory allocation for our command packets 1061 1061 if (megaraid_mbox_setup_dma_pools(adapter) != 0) { 1062 1062 con_log(CL_ANN, (KERN_WARNING 1063 - "megaraid: out of memory, %s %d\n", __FUNCTION__, 1063 + "megaraid: out of memory, %s %d\n", __func__, 1064 1064 __LINE__)); 1065 1065 goto out_free_scb_list; 1066 1066 } ··· 2981 2981 2982 2982 if (pinfo == NULL) { 2983 2983 con_log(CL_ANN, (KERN_WARNING 2984 - "megaraid: out of memory, %s %d\n", __FUNCTION__, 2984 + "megaraid: out of memory, %s %d\n", __func__, 2985 2985 __LINE__)); 2986 2986 2987 2987 return -1; ··· 3508 3508 3509 3509 if (adapter->uscb_list == NULL) { 3510 3510 con_log(CL_ANN, (KERN_WARNING 3511 - "megaraid: out of memory, %s %d\n", __FUNCTION__, 3511 + "megaraid: out of memory, %s %d\n", __func__, 3512 3512 __LINE__)); 3513 3513 return -1; 3514 3514 } ··· 3879 3879 !raid_dev->sysfs_buffer) { 3880 3880 3881 3881 con_log(CL_ANN, (KERN_WARNING 3882 - "megaraid: out of memory, %s %d\n", __FUNCTION__, 3882 + "megaraid: out of memory, %s %d\n", __func__, 3883 3883 __LINE__)); 3884 3884 3885 3885 rval = -ENOMEM;
+2 -2
drivers/scsi/megaraid/megaraid_mm.c
··· 929 929 !adapter->pthru_dma_pool) { 930 930 931 931 con_log(CL_ANN, (KERN_WARNING 932 - "megaraid cmm: out of memory, %s %d\n", __FUNCTION__, 932 + "megaraid cmm: out of memory, %s %d\n", __func__, 933 933 __LINE__)); 934 934 935 935 rval = (-ENOMEM); ··· 957 957 958 958 con_log(CL_ANN, (KERN_WARNING 959 959 "megaraid cmm: out of memory, %s %d\n", 960 - __FUNCTION__, __LINE__)); 960 + __func__, __LINE__)); 961 961 962 962 rval = (-ENOMEM); 963 963
+2 -2
drivers/scsi/nsp32.c
··· 299 299 #else 300 300 # define NSP32_DEBUG_MASK 0xffffff 301 301 # define nsp32_msg(type, args...) \ 302 - nsp32_message (__FUNCTION__, __LINE__, (type), args) 302 + nsp32_message (__func__, __LINE__, (type), args) 303 303 # define nsp32_dbg(mask, args...) \ 304 - nsp32_dmessage(__FUNCTION__, __LINE__, (mask), args) 304 + nsp32_dmessage(__func__, __LINE__, (mask), args) 305 305 #endif 306 306 307 307 #define NSP32_DEBUG_QUEUECOMMAND BIT(0)
+1 -1
drivers/scsi/nsp32_debug.c
··· 88 88 int i,s; 89 89 // printk(KERN_DEBUG); 90 90 print_opcodek(command[0]); 91 - /*printk(KERN_DEBUG "%s ", __FUNCTION__);*/ 91 + /*printk(KERN_DEBUG "%s ", __func__);*/ 92 92 if ((command[0] >> 5) == 6 || 93 93 (command[0] >> 5) == 7 ) { 94 94 s = 12; /* vender specific */
+2 -2
drivers/scsi/pcmcia/nsp_cs.c
··· 107 107 #else 108 108 # define NSP_DEBUG_MASK 0xffffff 109 109 # define nsp_msg(type, args...) \ 110 - nsp_cs_message (__FUNCTION__, __LINE__, (type), args) 110 + nsp_cs_message (__func__, __LINE__, (type), args) 111 111 # define nsp_dbg(mask, args...) \ 112 - nsp_cs_dmessage(__FUNCTION__, __LINE__, (mask), args) 112 + nsp_cs_dmessage(__func__, __LINE__, (mask), args) 113 113 #endif 114 114 115 115 #define NSP_DEBUG_QUEUECOMMAND BIT(0)
+1 -1
drivers/scsi/pcmcia/nsp_debug.c
··· 90 90 int i, s; 91 91 printk(KERN_DEBUG); 92 92 print_opcodek(command[0]); 93 - /*printk(KERN_DEBUG "%s ", __FUNCTION__);*/ 93 + /*printk(KERN_DEBUG "%s ", __func__);*/ 94 94 if ((command[0] >> 5) == 6 || 95 95 (command[0] >> 5) == 7 ) { 96 96 s = 12; /* vender specific */
+1 -1
drivers/scsi/ppa.c
··· 171 171 172 172 #if PPA_DEBUG > 0 173 173 #define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\ 174 - y, __FUNCTION__, __LINE__); ppa_fail_func(x,y); 174 + y, __func__, __LINE__); ppa_fail_func(x,y); 175 175 static inline void ppa_fail_func(ppa_struct *dev, int error_code) 176 176 #else 177 177 static inline void ppa_fail(ppa_struct *dev, int error_code)
+6 -6
drivers/scsi/qla1280.c
··· 1695 1695 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen; 1696 1696 1697 1697 dprintk(1, "%s: DMA RISC code (%i) words\n", 1698 - __FUNCTION__, risc_code_size); 1698 + __func__, risc_code_size); 1699 1699 1700 1700 num = 0; 1701 1701 while (risc_code_size > 0) { ··· 1721 1721 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1722 1722 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1723 1723 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n", 1724 - __FUNCTION__, mb[0], 1724 + __func__, mb[0], 1725 1725 (void *)(long)ha->request_dma, 1726 1726 mb[6], mb[7], mb[2], mb[3]); 1727 1727 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | ··· 1753 1753 if (tbuf[i] != sp[i] && warn++ < 10) { 1754 1754 printk(KERN_ERR "%s: FW compare error @ " 1755 1755 "byte(0x%x) loop#=%x\n", 1756 - __FUNCTION__, i, num); 1756 + __func__, i, num); 1757 1757 printk(KERN_ERR "%s: FWbyte=%x " 1758 1758 "FWfromChip=%x\n", 1759 - __FUNCTION__, sp[i], tbuf[i]); 1759 + __func__, sp[i], tbuf[i]); 1760 1760 /*break; */ 1761 1761 } 1762 1762 } ··· 1781 1781 int err; 1782 1782 1783 1783 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n", 1784 - __FUNCTION__); 1784 + __func__); 1785 1785 1786 1786 /* Verify checksum of loaded RISC code. */ 1787 1787 mb[0] = MBC_VERIFY_CHECKSUM; ··· 1794 1794 } 1795 1795 1796 1796 /* Start firmware execution. */ 1797 - dprintk(1, "%s: start firmware running.\n", __FUNCTION__); 1797 + dprintk(1, "%s: start firmware running.\n", __func__); 1798 1798 mb[0] = MBC_EXECUTE_FIRMWARE; 1799 1799 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart; 1800 1800 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
+61 -57
drivers/scsi/qla2xxx/qla_attr.c
··· 20 20 { 21 21 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 22 22 struct device, kobj))); 23 - char *rbuf = (char *)ha->fw_dump; 24 23 25 24 if (ha->fw_dump_reading == 0) 26 25 return 0; 27 - if (off > ha->fw_dump_len) 28 - return 0; 29 - if (off + count > ha->fw_dump_len) 30 - count = ha->fw_dump_len - off; 31 26 32 - memcpy(buf, &rbuf[off], count); 33 - 34 - return (count); 27 + return memory_read_from_buffer(buf, count, &off, ha->fw_dump, 28 + ha->fw_dump_len); 35 29 } 36 30 37 31 static ssize_t ··· 88 94 { 89 95 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 90 96 struct device, kobj))); 91 - int size = ha->nvram_size; 92 - char *nvram_cache = ha->nvram; 93 97 94 - if (!capable(CAP_SYS_ADMIN) || off > size || count == 0) 98 + if (!capable(CAP_SYS_ADMIN)) 95 99 return 0; 96 - if (off + count > size) { 97 - size -= off; 98 - count = size; 99 - } 100 100 101 101 /* Read NVRAM data from cache. */ 102 - memcpy(buf, &nvram_cache[off], count); 103 - 104 - return count; 102 + return memory_read_from_buffer(buf, count, &off, ha->nvram, 103 + ha->nvram_size); 105 104 } 106 105 107 106 static ssize_t ··· 162 175 163 176 if (ha->optrom_state != QLA_SREADING) 164 177 return 0; 165 - if (off > ha->optrom_region_size) 166 - return 0; 167 - if (off + count > ha->optrom_region_size) 168 - count = ha->optrom_region_size - off; 169 178 170 - memcpy(buf, &ha->optrom_buffer[off], count); 171 - 172 - return count; 179 + return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 180 + ha->optrom_region_size); 173 181 } 174 182 175 183 static ssize_t ··· 356 374 { 357 375 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 358 376 struct device, kobj))); 359 - int size = ha->vpd_size; 360 - char *vpd_cache = ha->vpd; 361 377 362 - if (!capable(CAP_SYS_ADMIN) || off > size || count == 0) 378 + if (!capable(CAP_SYS_ADMIN)) 363 379 return 0; 364 - if (off + count > size) { 365 - size -= off; 366 - count = size; 367 - } 368 380 369 381 /* Read NVRAM data from cache. */ 370 - memcpy(buf, &vpd_cache[off], count); 371 - 372 - return count; 382 + return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); 373 383 } 374 384 375 385 static ssize_t ··· 531 557 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 532 558 uint32_t sn; 533 559 534 - if (IS_FWI2_CAPABLE(ha)) 535 - return snprintf(buf, PAGE_SIZE, "\n"); 560 + if (IS_FWI2_CAPABLE(ha)) { 561 + qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE); 562 + return snprintf(buf, PAGE_SIZE, "%s\n", buf); 563 + } 536 564 537 565 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; 538 566 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, ··· 785 809 ha->fw_revision[3]); 786 810 } 787 811 812 + static ssize_t 813 + qla2x00_total_isp_aborts_show(struct device *dev, 814 + struct device_attribute *attr, char *buf) 815 + { 816 + scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 817 + 818 + return snprintf(buf, PAGE_SIZE, "%d\n", 819 + ha->qla_stats.total_isp_aborts); 820 + } 821 + 788 822 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 789 823 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 790 824 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); ··· 817 831 qla2x00_optrom_fcode_version_show, NULL); 818 832 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 819 833 NULL); 834 + static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 835 + NULL); 820 836 821 837 struct device_attribute *qla2x00_host_attrs[] = { 822 838 &dev_attr_driver_version, ··· 837 849 &dev_attr_optrom_efi_version, 838 850 &dev_attr_optrom_fcode_version, 839 851 &dev_attr_optrom_fw_version, 852 + &dev_attr_total_isp_aborts, 840 853 NULL, 841 854 }; 842 855 ··· 961 972 } 962 973 963 974 static void 964 - qla2x00_get_rport_loss_tmo(struct fc_rport *rport) 975 + qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 965 976 { 966 - struct Scsi_Host *host = rport_to_shost(rport); 967 - scsi_qla_host_t *ha = shost_priv(host); 968 - 969 - rport->dev_loss_tmo = ha->port_down_retry_count + 5; 977 + if (timeout) 978 + rport->dev_loss_tmo = timeout; 979 + else 980 + rport->dev_loss_tmo = 1; 970 981 } 971 982 972 983 static void 973 - qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 984 + qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) 974 985 { 975 986 struct Scsi_Host *host = rport_to_shost(rport); 976 - scsi_qla_host_t *ha = shost_priv(host); 987 + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 977 988 978 - if (timeout) 979 - ha->port_down_retry_count = timeout; 980 - else 981 - ha->port_down_retry_count = 1; 989 + qla2x00_abort_fcport_cmds(fcport); 982 990 983 - rport->dev_loss_tmo = ha->port_down_retry_count + 5; 991 + /* 992 + * Transport has effectively 'deleted' the rport, clear 993 + * all local references. 994 + */ 995 + spin_lock_irq(host->host_lock); 996 + fcport->rport = NULL; 997 + *((fc_port_t **)rport->dd_data) = NULL; 998 + spin_unlock_irq(host->host_lock); 999 + } 1000 + 1001 + static void 1002 + qla2x00_terminate_rport_io(struct fc_rport *rport) 1003 + { 1004 + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1005 + 1006 + qla2x00_abort_fcport_cmds(fcport); 1007 + scsi_target_unblock(&rport->dev); 984 1008 } 985 1009 986 1010 static int ··· 1047 1045 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt; 1048 1046 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt; 1049 1047 if (IS_FWI2_CAPABLE(ha)) { 1048 + pfc_host_stat->lip_count = stats->lip_cnt; 1050 1049 pfc_host_stat->tx_frames = stats->tx_frames; 1051 1050 pfc_host_stat->rx_frames = stats->rx_frames; 1052 1051 pfc_host_stat->dumped_frames = stats->dumped_frames; ··· 1176 1173 static int 1177 1174 qla24xx_vport_delete(struct fc_vport *fc_vport) 1178 1175 { 1179 - scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 1180 1176 scsi_qla_host_t *vha = fc_vport->dd_data; 1177 + scsi_qla_host_t *pha = to_qla_parent(vha); 1178 + 1179 + while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || 1180 + test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags)) 1181 + msleep(1000); 1181 1182 1182 1183 qla24xx_disable_vp(vha); 1183 1184 qla24xx_deallocate_vp_id(vha); 1184 - 1185 - mutex_lock(&ha->vport_lock); 1186 - ha->cur_vport_count--; 1187 - clear_bit(vha->vp_idx, ha->vp_idx_map); 1188 - mutex_unlock(&ha->vport_lock); 1189 1185 1190 1186 kfree(vha->node_name); 1191 1187 kfree(vha->port_name); ··· 1250 1248 .get_starget_port_id = qla2x00_get_starget_port_id, 1251 1249 .show_starget_port_id = 1, 1252 1250 1253 - .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo, 1254 1251 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 1255 1252 .show_rport_dev_loss_tmo = 1, 1256 1253 1257 1254 .issue_fc_host_lip = qla2x00_issue_lip, 1255 + .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 1256 + .terminate_rport_io = qla2x00_terminate_rport_io, 1258 1257 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1259 1258 1260 1259 .vport_create = qla24xx_vport_create, ··· 1294 1291 .get_starget_port_id = qla2x00_get_starget_port_id, 1295 1292 .show_starget_port_id = 1, 1296 1293 1297 - .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo, 1298 1294 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 1299 1295 .show_rport_dev_loss_tmo = 1, 1300 1296 1301 1297 .issue_fc_host_lip = qla2x00_issue_lip, 1298 + .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 1299 + .terminate_rport_io = qla2x00_terminate_rport_io, 1302 1300 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1303 1301 }; 1304 1302
+1 -1
drivers/scsi/qla2xxx/qla_dbg.c
··· 216 216 217 217 static int 218 218 qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram, 219 - uint16_t ram_words, void **nxt) 219 + uint32_t ram_words, void **nxt) 220 220 { 221 221 int rval; 222 222 uint32_t cnt, stat, timer, words, idx;
+8 -4
drivers/scsi/qla2xxx/qla_def.h
··· 864 864 uint32_t prim_seq_err_cnt; 865 865 uint32_t inval_xmit_word_cnt; 866 866 uint32_t inval_crc_cnt; 867 - uint32_t unused1[0x1b]; 867 + uint32_t lip_cnt; 868 + uint32_t unused1[0x1a]; 868 869 uint32_t tx_frames; 869 870 uint32_t rx_frames; 870 871 uint32_t dumped_frames; ··· 1545 1544 int login_retry; 1546 1545 atomic_t port_down_timer; 1547 1546 1548 - spinlock_t rport_lock; 1549 1547 struct fc_rport *rport, *drport; 1550 1548 u32 supported_classes; 1551 1549 ··· 2155 2155 uint32_t gold_fw_version; 2156 2156 }; 2157 2157 2158 + struct qla_statistics { 2159 + uint32_t total_isp_aborts; 2160 + }; 2161 + 2158 2162 /* 2159 2163 * Linux Host Adapter structure 2160 2164 */ ··· 2170 2166 struct pci_dev *pdev; 2171 2167 2172 2168 unsigned long host_no; 2173 - unsigned long instance; 2174 2169 2175 2170 volatile struct { 2176 2171 uint32_t init_done :1; ··· 2518 2515 2519 2516 uint8_t model_number[16+1]; 2520 2517 #define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" 2521 - char *model_desc; 2518 + char model_desc[80]; 2522 2519 uint8_t adapter_id[16+1]; 2523 2520 2524 2521 uint8_t *node_name; ··· 2599 2596 int cur_vport_count; 2600 2597 2601 2598 struct qla_chip_state_84xx *cs84xx; 2599 + struct qla_statistics qla_stats; 2602 2600 } scsi_qla_host_t; 2603 2601 2604 2602
+4 -1
drivers/scsi/qla2xxx/qla_gbl.h
··· 62 62 extern int ql2xallocfwdump; 63 63 extern int ql2xextended_error_logging; 64 64 extern int ql2xqfullrampup; 65 - extern int num_hosts; 65 + extern int ql2xiidmaenable; 66 66 67 67 extern int qla2x00_loop_reset(scsi_qla_host_t *); 68 68 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); ··· 70 70 fc_host_event_code, u32); 71 71 extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t, 72 72 uint16_t, uint16_t); 73 + 74 + extern void qla2x00_abort_fcport_cmds(fc_port_t *); 73 75 74 76 /* 75 77 * Global Functions in qla_mid.c source file. ··· 314 312 uint16_t, uint16_t); 315 313 316 314 extern void qla2xxx_get_flash_info(scsi_qla_host_t *); 315 + extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); 317 316 318 317 /* 319 318 * Global Function Prototypes in qla_dbg.c source file.
+6
drivers/scsi/qla2xxx/qla_gs.c
··· 1661 1661 { 1662 1662 int rval; 1663 1663 1664 + if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1665 + DEBUG2(printk("scsi(%ld): FDMI unsupported on " 1666 + "ISP2100/ISP2200.\n", ha->host_no)); 1667 + return QLA_SUCCESS; 1668 + } 1669 + 1664 1670 rval = qla2x00_mgmt_svr_login(ha); 1665 1671 if (rval) 1666 1672 return rval;
+76 -63
drivers/scsi/qla2xxx/qla_init.c
··· 334 334 qla2x00_isp_firmware(scsi_qla_host_t *ha) 335 335 { 336 336 int rval; 337 + uint16_t loop_id, topo, sw_cap; 338 + uint8_t domain, area, al_pa; 337 339 338 340 /* Assume loading risc code */ 339 341 rval = QLA_FUNCTION_FAILED; ··· 347 345 348 346 /* Verify checksum of loaded RISC code. */ 349 347 rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address); 348 + if (rval == QLA_SUCCESS) { 349 + /* And, verify we are not in ROM code. */ 350 + rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa, 351 + &area, &domain, &topo, &sw_cap); 352 + } 350 353 } 351 354 352 355 if (rval) { ··· 729 722 /* Perform RISC reset. */ 730 723 qla24xx_reset_risc(ha); 731 724 732 - ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 725 + ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length; 733 726 734 727 rval = qla2x00_mbx_reg_test(ha); 735 728 if (rval) { ··· 775 768 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 776 769 sizeof(uint32_t); 777 770 771 + /* Allocate memory for Fibre Channel Event Buffer. */ 772 + if (!IS_QLA25XX(ha)) 773 + goto try_eft; 774 + 775 + tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 776 + GFP_KERNEL); 777 + if (!tc) { 778 + qla_printk(KERN_WARNING, ha, "Unable to allocate " 779 + "(%d KB) for FCE.\n", FCE_SIZE / 1024); 780 + goto try_eft; 781 + } 782 + 783 + memset(tc, 0, FCE_SIZE); 784 + rval = qla2x00_enable_fce_trace(ha, tc_dma, FCE_NUM_BUFFERS, 785 + ha->fce_mb, &ha->fce_bufs); 786 + if (rval) { 787 + qla_printk(KERN_WARNING, ha, "Unable to initialize " 788 + "FCE (%d).\n", rval); 789 + dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 790 + tc_dma); 791 + ha->flags.fce_enabled = 0; 792 + goto try_eft; 793 + } 794 + 795 + qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 796 + FCE_SIZE / 1024); 797 + 798 + fce_size = sizeof(struct qla2xxx_fce_chain) + EFT_SIZE; 799 + ha->flags.fce_enabled = 1; 800 + ha->fce_dma = tc_dma; 801 + ha->fce = tc; 802 + try_eft: 778 803 /* Allocate memory for Extended Trace Buffer. */ 779 804 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 780 805 GFP_KERNEL); ··· 832 793 eft_size = EFT_SIZE; 833 794 ha->eft_dma = tc_dma; 834 795 ha->eft = tc; 835 - 836 - /* Allocate memory for Fibre Channel Event Buffer. */ 837 - if (!IS_QLA25XX(ha)) 838 - goto cont_alloc; 839 - 840 - tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 841 - GFP_KERNEL); 842 - if (!tc) { 843 - qla_printk(KERN_WARNING, ha, "Unable to allocate " 844 - "(%d KB) for FCE.\n", FCE_SIZE / 1024); 845 - goto cont_alloc; 846 - } 847 - 848 - memset(tc, 0, FCE_SIZE); 849 - rval = qla2x00_enable_fce_trace(ha, tc_dma, FCE_NUM_BUFFERS, 850 - ha->fce_mb, &ha->fce_bufs); 851 - if (rval) { 852 - qla_printk(KERN_WARNING, ha, "Unable to initialize " 853 - "FCE (%d).\n", rval); 854 - dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 855 - tc_dma); 856 - ha->flags.fce_enabled = 0; 857 - goto cont_alloc; 858 - } 859 - 860 - qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 861 - FCE_SIZE / 1024); 862 - 863 - fce_size = sizeof(struct qla2xxx_fce_chain) + EFT_SIZE; 864 - ha->flags.fce_enabled = 1; 865 - ha->fce_dma = tc_dma; 866 - ha->fce = tc; 867 796 } 868 797 cont_alloc: 869 798 req_q_size = ha->request_q_length * sizeof(request_t); ··· 1508 1501 index = (ha->pdev->subsystem_device & 0xff); 1509 1502 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1510 1503 index < QLA_MODEL_NAMES) 1511 - ha->model_desc = qla2x00_model_name[index * 2 + 1]; 1504 + strncpy(ha->model_desc, 1505 + qla2x00_model_name[index * 2 + 1], 1506 + sizeof(ha->model_desc) - 1); 1512 1507 } else { 1513 1508 index = (ha->pdev->subsystem_device & 0xff); 1514 1509 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1515 1510 index < QLA_MODEL_NAMES) { 1516 1511 strcpy(ha->model_number, 1517 1512 qla2x00_model_name[index * 2]); 1518 - ha->model_desc = qla2x00_model_name[index * 2 + 1]; 1513 + strncpy(ha->model_desc, 1514 + qla2x00_model_name[index * 2 + 1], 1515 + sizeof(ha->model_desc) - 1); 1519 1516 } else { 1520 1517 strcpy(ha->model_number, def); 1521 1518 } 1522 1519 } 1520 + if (IS_FWI2_CAPABLE(ha)) 1521 + qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc, 1522 + sizeof(ha->model_desc)); 1523 1523 } 1524 1524 1525 1525 /* On sparc systems, obtain port and node WWN from firmware ··· 1878 1864 { 1879 1865 fc_port_t *fcport = data; 1880 1866 struct fc_rport *rport; 1881 - unsigned long flags; 1882 1867 1883 - spin_lock_irqsave(&fcport->rport_lock, flags); 1868 + spin_lock_irq(fcport->ha->host->host_lock); 1884 1869 rport = fcport->drport; 1885 1870 fcport->drport = NULL; 1886 - spin_unlock_irqrestore(&fcport->rport_lock, flags); 1871 + spin_unlock_irq(fcport->ha->host->host_lock); 1887 1872 if (rport) 1888 1873 fc_remote_port_delete(rport); 1889 1874 } ··· 1911 1898 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1912 1899 fcport->flags = FCF_RLC_SUPPORT; 1913 1900 fcport->supported_classes = FC_COS_UNSPECIFIED; 1914 - spin_lock_init(&fcport->rport_lock); 1915 1901 1916 1902 return fcport; 1917 1903 } ··· 2019 2007 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2020 2008 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2021 2009 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2022 - if (test_bit(RSCN_UPDATE, &save_flags)) 2010 + if (test_bit(RSCN_UPDATE, &save_flags)) { 2011 + ha->flags.rscn_queue_overflow = 1; 2023 2012 set_bit(RSCN_UPDATE, &ha->dpc_flags); 2013 + } 2024 2014 } 2025 2015 2026 2016 return (rval); ··· 2257 2243 { 2258 2244 struct fc_rport_identifiers rport_ids; 2259 2245 struct fc_rport *rport; 2260 - unsigned long flags; 2261 2246 2262 2247 if (fcport->drport) 2263 2248 qla2x00_rport_del(fcport); 2264 - if (fcport->rport) 2265 - return; 2266 2249 2267 2250 rport_ids.node_name = wwn_to_u64(fcport->node_name); 2268 2251 rport_ids.port_name = wwn_to_u64(fcport->port_name); 2269 2252 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2270 2253 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2271 2254 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2272 - rport = fc_remote_port_add(ha->host, 0, &rport_ids); 2255 + fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids); 2273 2256 if (!rport) { 2274 2257 qla_printk(KERN_WARNING, ha, 2275 2258 "Unable to allocate fc remote port!\n"); 2276 2259 return; 2277 2260 } 2278 - spin_lock_irqsave(&fcport->rport_lock, flags); 2279 - fcport->rport = rport; 2261 + spin_lock_irq(fcport->ha->host->host_lock); 2280 2262 *((fc_port_t **)rport->dd_data) = fcport; 2281 - spin_unlock_irqrestore(&fcport->rport_lock, flags); 2263 + spin_unlock_irq(fcport->ha->host->host_lock); 2282 2264 2283 2265 rport->supported_classes = fcport->supported_classes; 2284 2266 ··· 2575 2565 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) { 2576 2566 kfree(swl); 2577 2567 swl = NULL; 2578 - } else if (qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) { 2568 + } else if (ql2xiidmaenable && 2569 + qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) { 2579 2570 qla2x00_gpsc(ha, swl); 2580 2571 } 2581 2572 } ··· 3231 3220 3232 3221 /* Go with deferred removal of rport references. */ 3233 3222 list_for_each_entry(fcport, &ha->fcports, list) 3234 - if (fcport->drport) 3223 + if (fcport->drport && 3224 + atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3235 3225 qla2x00_rport_del(fcport); 3236 3226 } 3237 3227 ··· 3255 3243 if (ha->flags.online) { 3256 3244 ha->flags.online = 0; 3257 3245 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 3246 + ha->qla_stats.total_isp_aborts++; 3258 3247 3259 3248 qla_printk(KERN_INFO, ha, 3260 3249 "Performing ISP error recovery - ha= %p.\n", ha); ··· 3296 3283 ha->isp_abort_cnt = 0; 3297 3284 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3298 3285 3299 - if (ha->eft) { 3300 - memset(ha->eft, 0, EFT_SIZE); 3301 - rval = qla2x00_enable_eft_trace(ha, 3302 - ha->eft_dma, EFT_NUM_BUFFERS); 3303 - if (rval) { 3304 - qla_printk(KERN_WARNING, ha, 3305 - "Unable to reinitialize EFT " 3306 - "(%d).\n", rval); 3307 - } 3308 - } 3309 - 3310 3286 if (ha->fce) { 3311 3287 ha->flags.fce_enabled = 1; 3312 3288 memset(ha->fce, 0, ··· 3308 3306 "Unable to reinitialize FCE " 3309 3307 "(%d).\n", rval); 3310 3308 ha->flags.fce_enabled = 0; 3309 + } 3310 + } 3311 + 3312 + if (ha->eft) { 3313 + memset(ha->eft, 0, EFT_SIZE); 3314 + rval = qla2x00_enable_eft_trace(ha, 3315 + ha->eft_dma, EFT_NUM_BUFFERS); 3316 + if (rval) { 3317 + qla_printk(KERN_WARNING, ha, 3318 + "Unable to reinitialize EFT " 3319 + "(%d).\n", rval); 3311 3320 } 3312 3321 } 3313 3322 } else { /* failed the ISP abort */ ··· 4039 4026 ret = qla2x00_stop_firmware(ha); 4040 4027 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4041 4028 retries ; retries--) { 4042 - qla2x00_reset_chip(ha); 4043 - if (qla2x00_chip_diag(ha) != QLA_SUCCESS) 4029 + ha->isp_ops->reset_chip(ha); 4030 + if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS) 4044 4031 continue; 4045 4032 if (qla2x00_setup_chip(ha) != QLA_SUCCESS) 4046 4033 continue; ··· 4062 4049 rval = qla2x00_fw_ready(ha->parent); 4063 4050 if (rval == QLA_SUCCESS) { 4064 4051 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 4065 - qla2x00_marker(ha->parent, 0, 0, MK_SYNC_ALL); 4052 + qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 4066 4053 } 4067 4054 4068 4055 ha->flags.management_server_logged_in = 0;
+8 -6
drivers/scsi/qla2xxx/qla_iocb.c
··· 454 454 { 455 455 int ret; 456 456 unsigned long flags = 0; 457 + scsi_qla_host_t *pha = to_qla_parent(ha); 457 458 458 - spin_lock_irqsave(&ha->hardware_lock, flags); 459 + spin_lock_irqsave(&pha->hardware_lock, flags); 459 460 ret = __qla2x00_marker(ha, loop_id, lun, type); 460 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 461 + spin_unlock_irqrestore(&pha->hardware_lock, flags); 461 462 462 463 return (ret); 463 464 } ··· 673 672 { 674 673 int ret, nseg; 675 674 unsigned long flags; 676 - scsi_qla_host_t *ha; 675 + scsi_qla_host_t *ha, *pha; 677 676 struct scsi_cmnd *cmd; 678 677 uint32_t *clr_ptr; 679 678 uint32_t index; ··· 687 686 /* Setup device pointers. */ 688 687 ret = 0; 689 688 ha = sp->ha; 689 + pha = to_qla_parent(ha); 690 690 reg = &ha->iobase->isp24; 691 691 cmd = sp->cmd; 692 692 /* So we know we haven't pci_map'ed anything yet */ ··· 702 700 } 703 701 704 702 /* Acquire ring specific lock */ 705 - spin_lock_irqsave(&ha->hardware_lock, flags); 703 + spin_lock_irqsave(&pha->hardware_lock, flags); 706 704 707 705 /* Check for room in outstanding command list. */ 708 706 handle = ha->current_outstanding_cmd; ··· 797 795 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 798 796 qla24xx_process_response_queue(ha); 799 797 800 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 798 + spin_unlock_irqrestore(&pha->hardware_lock, flags); 801 799 return QLA_SUCCESS; 802 800 803 801 queuing_error: 804 802 if (tot_dsds) 805 803 scsi_dma_unmap(cmd); 806 804 807 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 805 + spin_unlock_irqrestore(&pha->hardware_lock, flags); 808 806 809 807 return QLA_FUNCTION_FAILED; 810 808 }
-4
drivers/scsi/qla2xxx/qla_isr.c
··· 542 542 break; 543 543 544 544 case MBA_PORT_UPDATE: /* Port database update */ 545 - /* Only handle SCNs for our Vport index. */ 546 - if (ha->parent && ha->vp_idx != (mb[3] & 0xff)) 547 - break; 548 - 549 545 /* 550 546 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET 551 547 * event etc. earlier indicating loop is down) then process
+6 -3
drivers/scsi/qla2xxx/qla_mbx.c
··· 918 918 rval = qla2x00_mailbox_command(ha, mcp); 919 919 if (mcp->mb[0] == MBS_COMMAND_ERROR) 920 920 rval = QLA_COMMAND_ERROR; 921 + else if (mcp->mb[0] == MBS_INVALID_COMMAND) 922 + rval = QLA_INVALID_COMMAND; 921 923 922 924 /* Return data. */ 923 925 *id = mcp->mb[1]; ··· 2163 2161 struct abort_entry_24xx *abt; 2164 2162 dma_addr_t abt_dma; 2165 2163 uint32_t handle; 2164 + scsi_qla_host_t *pha = to_qla_parent(ha); 2166 2165 2167 2166 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2168 2167 2169 2168 fcport = sp->fcport; 2170 2169 2171 - spin_lock_irqsave(&ha->hardware_lock, flags); 2170 + spin_lock_irqsave(&pha->hardware_lock, flags); 2172 2171 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2173 - if (ha->outstanding_cmds[handle] == sp) 2172 + if (pha->outstanding_cmds[handle] == sp) 2174 2173 break; 2175 2174 } 2176 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 2175 + spin_unlock_irqrestore(&pha->hardware_lock, flags); 2177 2176 if (handle == MAX_OUTSTANDING_COMMANDS) { 2178 2177 /* Command not found. */ 2179 2178 return QLA_FUNCTION_FAILED;
+6 -10
drivers/scsi/qla2xxx/qla_mid.c
··· 43 43 44 44 set_bit(vp_id, ha->vp_idx_map); 45 45 ha->num_vhosts++; 46 + ha->cur_vport_count++; 46 47 vha->vp_idx = vp_id; 47 48 list_add_tail(&vha->vp_list, &ha->vp_list); 48 49 mutex_unlock(&ha->vport_lock); ··· 59 58 mutex_lock(&ha->vport_lock); 60 59 vp_id = vha->vp_idx; 61 60 ha->num_vhosts--; 61 + ha->cur_vport_count--; 62 62 clear_bit(vp_id, ha->vp_idx_map); 63 63 list_del(&vha->vp_list); 64 64 mutex_unlock(&ha->vport_lock); ··· 105 103 "loop_id=0x%04x :%x\n", 106 104 vha->host_no, fcport->loop_id, fcport->vp_idx)); 107 105 108 - atomic_set(&fcport->state, FCS_DEVICE_DEAD); 109 106 qla2x00_mark_device_lost(vha, fcport, 0, 0); 107 + atomic_set(&fcport->state, FCS_UNCONFIGURED); 110 108 } 111 109 } 112 110 ··· 278 276 clear_bit(RESET_ACTIVE, &vha->dpc_flags); 279 277 } 280 278 281 - if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 279 + if (atomic_read(&vha->vp_state) == VP_ACTIVE && 280 + test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 282 281 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 283 282 qla2x00_loop_resync(vha); 284 283 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); ··· 393 390 vha->parent = ha; 394 391 vha->fc_vport = fc_vport; 395 392 vha->device_flags = 0; 396 - vha->instance = num_hosts; 397 393 vha->vp_idx = qla24xx_allocate_vp_id(vha); 398 394 if (vha->vp_idx > ha->max_npiv_vports) { 399 395 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", ··· 430 428 host->max_cmd_len = MAX_CMDSZ; 431 429 host->max_channel = MAX_BUSES - 1; 432 430 host->max_lun = MAX_LUNS; 433 - host->unique_id = vha->instance; 431 + host->unique_id = host->host_no; 434 432 host->max_id = MAX_TARGETS_2200; 435 433 host->transportt = qla2xxx_transport_vport_template; 436 434 ··· 438 436 vha->host_no, vha)); 439 437 440 438 vha->flags.init_done = 1; 441 - num_hosts++; 442 - 443 - mutex_lock(&ha->vport_lock); 444 - set_bit(vha->vp_idx, ha->vp_idx_map); 445 - ha->cur_vport_count++; 446 - mutex_unlock(&ha->vport_lock); 447 439 448 440 return vha; 449 441
+59 -35
drivers/scsi/qla2xxx/qla_os.c
··· 27 27 */ 28 28 static struct kmem_cache *srb_cachep; 29 29 30 - int num_hosts; 31 30 int ql2xlogintimeout = 20; 32 31 module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 33 32 MODULE_PARM_DESC(ql2xlogintimeout, ··· 85 86 "Number of seconds to wait to begin to ramp-up the queue " 86 87 "depth for a device after a queue-full condition has been " 87 88 "detected. Default is 120 seconds."); 89 + 90 + int ql2xiidmaenable=1; 91 + module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); 92 + MODULE_PARM_DESC(ql2xiidmaenable, 93 + "Enables iIDMA settings " 94 + "Default is 1 - perform iIDMA. 0 - no iIDMA."); 95 + 88 96 89 97 /* 90 98 * SCSI host template entry points ··· 394 388 } 395 389 396 390 /* Close window on fcport/rport state-transitioning. */ 397 - if (!*(fc_port_t **)rport->dd_data) { 391 + if (fcport->drport) { 398 392 cmd->result = DID_IMM_RETRY << 16; 399 393 goto qc_fail_command; 400 394 } ··· 449 443 int rval; 450 444 scsi_qla_host_t *pha = to_qla_parent(ha); 451 445 452 - if (unlikely(pci_channel_offline(ha->pdev))) { 446 + if (unlikely(pci_channel_offline(pha->pdev))) { 453 447 cmd->result = DID_REQUEUE << 16; 454 448 goto qc24_fail_command; 455 449 } ··· 461 455 } 462 456 463 457 /* Close window on fcport/rport state-transitioning. */ 464 - if (!*(fc_port_t **)rport->dd_data) { 458 + if (fcport->drport) { 465 459 cmd->result = DID_IMM_RETRY << 16; 466 460 goto qc24_fail_command; 467 461 } ··· 621 615 } 622 616 } 623 617 return (return_status); 618 + } 619 + 620 + void 621 + qla2x00_abort_fcport_cmds(fc_port_t *fcport) 622 + { 623 + int cnt; 624 + unsigned long flags; 625 + srb_t *sp; 626 + scsi_qla_host_t *ha = fcport->ha; 627 + scsi_qla_host_t *pha = to_qla_parent(ha); 628 + 629 + spin_lock_irqsave(&pha->hardware_lock, flags); 630 + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 631 + sp = pha->outstanding_cmds[cnt]; 632 + if (!sp) 633 + continue; 634 + if (sp->fcport != fcport) 635 + continue; 636 + 637 + spin_unlock_irqrestore(&pha->hardware_lock, flags); 638 + if (ha->isp_ops->abort_command(ha, sp)) { 639 + DEBUG2(qla_printk(KERN_WARNING, ha, 640 + "Abort failed -- %lx\n", sp->cmd->serial_number)); 641 + } else { 642 + if (qla2x00_eh_wait_on_command(ha, sp->cmd) != 643 + QLA_SUCCESS) 644 + DEBUG2(qla_printk(KERN_WARNING, ha, 645 + "Abort failed while waiting -- %lx\n", 646 + sp->cmd->serial_number)); 647 + 648 + } 649 + spin_lock_irqsave(&pha->hardware_lock, flags); 650 + } 651 + spin_unlock_irqrestore(&pha->hardware_lock, flags); 624 652 } 625 653 626 654 static void ··· 1113 1073 else 1114 1074 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1115 1075 1116 - rport->dev_loss_tmo = ha->port_down_retry_count + 5; 1076 + rport->dev_loss_tmo = ha->port_down_retry_count; 1117 1077 1118 1078 return 0; 1119 1079 } ··· 1669 1629 } 1670 1630 host->can_queue = ha->request_q_length + 128; 1671 1631 1672 - /* load the F/W, read paramaters, and init the H/W */ 1673 - ha->instance = num_hosts; 1674 - 1675 1632 mutex_init(&ha->vport_lock); 1676 1633 init_completion(&ha->mbx_cmd_comp); 1677 1634 complete(&ha->mbx_cmd_comp); ··· 1716 1679 1717 1680 host->this_id = 255; 1718 1681 host->cmd_per_lun = 3; 1719 - host->unique_id = ha->instance; 1682 + host->unique_id = host->host_no; 1720 1683 host->max_cmd_len = MAX_CMDSZ; 1721 1684 host->max_channel = MAX_BUSES - 1; 1722 1685 host->max_lun = MAX_LUNS; ··· 1736 1699 1737 1700 ha->flags.init_done = 1; 1738 1701 ha->flags.online = 1; 1739 - 1740 - num_hosts++; 1741 1702 1742 1703 ret = scsi_add_host(host, &pdev->dev); 1743 1704 if (ret) ··· 1848 1813 qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 1849 1814 int defer) 1850 1815 { 1851 - unsigned long flags; 1852 1816 struct fc_rport *rport; 1817 + scsi_qla_host_t *pha = to_qla_parent(ha); 1853 1818 1854 1819 if (!fcport->rport) 1855 1820 return; 1856 1821 1857 1822 rport = fcport->rport; 1858 1823 if (defer) { 1859 - spin_lock_irqsave(&fcport->rport_lock, flags); 1824 + spin_lock_irq(ha->host->host_lock); 1860 1825 fcport->drport = rport; 1861 - fcport->rport = NULL; 1862 - *(fc_port_t **)rport->dd_data = NULL; 1863 - spin_unlock_irqrestore(&fcport->rport_lock, flags); 1864 - set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1865 - } else { 1866 - spin_lock_irqsave(&fcport->rport_lock, flags); 1867 - fcport->rport = NULL; 1868 - *(fc_port_t **)rport->dd_data = NULL; 1869 - spin_unlock_irqrestore(&fcport->rport_lock, flags); 1826 + spin_unlock_irq(ha->host->host_lock); 1827 + set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags); 1828 + qla2xxx_wake_dpc(pha); 1829 + } else 1870 1830 fc_remote_port_delete(rport); 1871 - } 1872 1831 } 1873 1832 1874 1833 /* ··· 1932 1903 scsi_qla_host_t *pha = to_qla_parent(ha); 1933 1904 1934 1905 list_for_each_entry(fcport, &pha->fcports, list) { 1935 - if (ha->vp_idx != 0 && ha->vp_idx != fcport->vp_idx) 1906 + if (ha->vp_idx != fcport->vp_idx) 1936 1907 continue; 1937 1908 /* 1938 1909 * No point in marking the device as lost, if the device is ··· 1940 1911 */ 1941 1912 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1942 1913 continue; 1943 - if (atomic_read(&fcport->state) == FCS_ONLINE) { 1944 - if (defer) 1945 - qla2x00_schedule_rport_del(ha, fcport, defer); 1946 - else if (ha->vp_idx == fcport->vp_idx) 1947 - qla2x00_schedule_rport_del(ha, fcport, defer); 1948 - } 1914 + if (atomic_read(&fcport->state) == FCS_ONLINE) 1915 + qla2x00_schedule_rport_del(ha, fcport, defer); 1949 1916 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1950 1917 } 1951 - 1952 - if (defer) 1953 - qla2xxx_wake_dpc(ha); 1954 1918 } 1955 1919 1956 1920 /* ··· 2178 2156 static int 2179 2157 qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2180 2158 { 2181 - unsigned long flags; 2159 + unsigned long uninitialized_var(flags); 2182 2160 scsi_qla_host_t *pha = to_qla_parent(ha); 2183 2161 2184 2162 if (!locked) ··· 2335 2313 ha->host_no)); 2336 2314 } 2337 2315 2338 - if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) 2316 + if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) { 2339 2317 qla2x00_update_fcports(ha); 2318 + clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 2319 + } 2340 2320 2341 2321 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2342 2322 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) {
+45 -3
drivers/scsi/qla2xxx/qla_sup.c
··· 869 869 uint32_t i; 870 870 uint32_t *dwptr; 871 871 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 872 - unsigned long flags; 873 872 874 873 ret = QLA_SUCCESS; 875 874 876 - spin_lock_irqsave(&ha->hardware_lock, flags); 877 875 /* Enable flash write. */ 878 876 WRT_REG_DWORD(&reg->ctrl_status, 879 877 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE); ··· 905 907 WRT_REG_DWORD(&reg->ctrl_status, 906 908 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); 907 909 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ 908 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 909 910 910 911 return ret; 911 912 } ··· 2300 2303 } 2301 2304 2302 2305 return ret; 2306 + } 2307 + 2308 + static int 2309 + qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end) 2310 + { 2311 + if (pos >= end || *pos != 0x82) 2312 + return 0; 2313 + 2314 + pos += 3 + pos[1]; 2315 + if (pos >= end || *pos != 0x90) 2316 + return 0; 2317 + 2318 + pos += 3 + pos[1]; 2319 + if (pos >= end || *pos != 0x78) 2320 + return 0; 2321 + 2322 + return 1; 2323 + } 2324 + 2325 + int 2326 + qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size) 2327 + { 2328 + uint8_t *pos = ha->vpd; 2329 + uint8_t *end = pos + ha->vpd_size; 2330 + int len = 0; 2331 + 2332 + if (!IS_FWI2_CAPABLE(ha) || !qla2xxx_is_vpd_valid(pos, end)) 2333 + return 0; 2334 + 2335 + while (pos < end && *pos != 0x78) { 2336 + len = (*pos == 0x82) ? pos[1] : pos[2]; 2337 + 2338 + if (!strncmp(pos, key, strlen(key))) 2339 + break; 2340 + 2341 + if (*pos != 0x90 && *pos != 0x91) 2342 + pos += len; 2343 + 2344 + pos += 3; 2345 + } 2346 + 2347 + if (pos < end - len && *pos != 0x78) 2348 + return snprintf(str, size, "%.*s", len, pos + 3); 2349 + 2350 + return 0; 2303 2351 } 2304 2352 2305 2353 static int
+1 -1
drivers/scsi/qla2xxx/qla_version.h
··· 7 7 /* 8 8 * Driver version 9 9 */ 10 - #define QLA2XXX_VERSION "8.02.01-k4" 10 + #define QLA2XXX_VERSION "8.02.01-k6" 11 11 12 12 #define QLA_DRIVER_MAJOR_VER 8 13 13 #define QLA_DRIVER_MINOR_VER 2
+3 -1
drivers/scsi/qla4xxx/ql4_os.c
··· 46 46 47 47 int ql4_mod_unload = 0; 48 48 49 + #define QL4_DEF_QDEPTH 32 50 + 49 51 /* 50 52 * SCSI host template entry points 51 53 */ ··· 1389 1387 1390 1388 sdev->hostdata = ddb; 1391 1389 sdev->tagged_supported = 1; 1392 - scsi_activate_tcq(sdev, sdev->host->can_queue); 1390 + scsi_activate_tcq(sdev, QL4_DEF_QDEPTH); 1393 1391 return 0; 1394 1392 } 1395 1393
+48 -7
drivers/scsi/scsi.c
··· 197 197 scsi_pool_free_command(struct scsi_host_cmd_pool *pool, 198 198 struct scsi_cmnd *cmd) 199 199 { 200 + if (cmd->prot_sdb) 201 + kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); 202 + 200 203 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); 201 204 kmem_cache_free(pool->cmd_slab, cmd); 205 + } 206 + 207 + /** 208 + * scsi_host_alloc_command - internal function to allocate command 209 + * @shost: SCSI host whose pool to allocate from 210 + * @gfp_mask: mask for the allocation 211 + * 212 + * Returns a fully allocated command with sense buffer and protection 213 + * data buffer (where applicable) or NULL on failure 214 + */ 215 + static struct scsi_cmnd * 216 + scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask) 217 + { 218 + struct scsi_cmnd *cmd; 219 + 220 + cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 221 + if (!cmd) 222 + return NULL; 223 + 224 + if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) { 225 + cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask); 226 + 227 + if (!cmd->prot_sdb) { 228 + scsi_pool_free_command(shost->cmd_pool, cmd); 229 + return NULL; 230 + } 231 + } 232 + 233 + return cmd; 202 234 } 203 235 204 236 /** ··· 246 214 struct scsi_cmnd *cmd; 247 215 unsigned char *buf; 248 216 249 - cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 217 + cmd = scsi_host_alloc_command(shost, gfp_mask); 250 218 251 219 if (unlikely(!cmd)) { 252 220 unsigned long flags; ··· 489 457 /* 490 458 * Get one backup command for this host. 491 459 */ 492 - cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 460 + cmd = scsi_host_alloc_command(shost, gfp_mask); 493 461 if (!cmd) { 494 462 scsi_put_host_cmd_pool(gfp_mask); 495 463 shost->cmd_pool = NULL; ··· 934 902 935 903 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 936 904 937 - /* Check to see if the queue is managed by the block layer. 938 - * If it is, and we fail to adjust the depth, exit. */ 939 - if (blk_queue_tagged(sdev->request_queue) && 940 - blk_queue_resize_tags(sdev->request_queue, tags) != 0) 941 - goto out; 905 + /* 906 + * Check to see if the queue is managed by the block layer. 907 + * If it is, and we fail to adjust the depth, exit. 908 + * 909 + * Do not resize the tag map if it is a host wide share bqt, 910 + * because the size should be the hosts's can_queue. If there 911 + * is more IO than the LLD's can_queue (so there are not enuogh 912 + * tags) request_fn's host queue ready check will handle it. 913 + */ 914 + if (!sdev->host->bqt) { 915 + if (blk_queue_tagged(sdev->request_queue) && 916 + blk_queue_resize_tags(sdev->request_queue, tags) != 0) 917 + goto out; 918 + } 942 919 943 920 sdev->queue_depth = tags; 944 921 switch (tagged) {
+6 -6
drivers/scsi/scsi_debug.c
··· 1753 1753 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC); 1754 1754 if (!open_devip) { 1755 1755 printk(KERN_ERR "%s: out of memory at line %d\n", 1756 - __FUNCTION__, __LINE__); 1756 + __func__, __LINE__); 1757 1757 return NULL; 1758 1758 } 1759 1759 } ··· 2656 2656 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); 2657 2657 if (NULL == sdbg_host) { 2658 2658 printk(KERN_ERR "%s: out of memory at line %d\n", 2659 - __FUNCTION__, __LINE__); 2659 + __func__, __LINE__); 2660 2660 return -ENOMEM; 2661 2661 } 2662 2662 ··· 2667 2667 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); 2668 2668 if (!sdbg_devinfo) { 2669 2669 printk(KERN_ERR "%s: out of memory at line %d\n", 2670 - __FUNCTION__, __LINE__); 2670 + __func__, __LINE__); 2671 2671 error = -ENOMEM; 2672 2672 goto clean; 2673 2673 } ··· 2987 2987 2988 2988 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 2989 2989 if (NULL == hpnt) { 2990 - printk(KERN_ERR "%s: scsi_register failed\n", __FUNCTION__); 2990 + printk(KERN_ERR "%s: scsi_register failed\n", __func__); 2991 2991 error = -ENODEV; 2992 2992 return error; 2993 2993 } ··· 3002 3002 3003 3003 error = scsi_add_host(hpnt, &sdbg_host->dev); 3004 3004 if (error) { 3005 - printk(KERN_ERR "%s: scsi_add_host failed\n", __FUNCTION__); 3005 + printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); 3006 3006 error = -ENODEV; 3007 3007 scsi_host_put(hpnt); 3008 3008 } else ··· 3021 3021 3022 3022 if (!sdbg_host) { 3023 3023 printk(KERN_ERR "%s: Unable to locate host info\n", 3024 - __FUNCTION__); 3024 + __func__); 3025 3025 return -ENODEV; 3026 3026 } 3027 3027
+3 -3
drivers/scsi/scsi_devinfo.c
··· 272 272 } 273 273 if (from_length > to_length) 274 274 printk(KERN_WARNING "%s: %s string '%s' is too long\n", 275 - __FUNCTION__, name, from); 275 + __func__, name, from); 276 276 } 277 277 278 278 /** ··· 298 298 299 299 devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); 300 300 if (!devinfo) { 301 - printk(KERN_ERR "%s: no memory\n", __FUNCTION__); 301 + printk(KERN_ERR "%s: no memory\n", __func__); 302 302 return -ENOMEM; 303 303 } 304 304 ··· 363 363 strflags = strsep(&next, next_check); 364 364 if (!model || !strflags) { 365 365 printk(KERN_ERR "%s: bad dev info string '%s' '%s'" 366 - " '%s'\n", __FUNCTION__, vendor, model, 366 + " '%s'\n", __func__, vendor, model, 367 367 strflags); 368 368 res = -EINVAL; 369 369 } else
+21 -13
drivers/scsi/scsi_error.c
··· 139 139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete; 140 140 141 141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:" 142 - " %d, (%p)\n", __FUNCTION__, 142 + " %d, (%p)\n", __func__, 143 143 scmd, timeout, complete)); 144 144 145 145 add_timer(&scmd->eh_timeout); ··· 163 163 rtn = del_timer(&scmd->eh_timeout); 164 164 165 165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p," 166 - " rtn: %d\n", __FUNCTION__, 166 + " rtn: %d\n", __func__, 167 167 scmd, rtn)); 168 168 169 169 scmd->eh_timeout.data = (unsigned long)NULL; ··· 233 233 234 234 online = scsi_device_online(sdev); 235 235 236 - SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__, 236 + SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__, 237 237 online)); 238 238 239 239 return online; ··· 271 271 SCSI_LOG_ERROR_RECOVERY(3, 272 272 sdev_printk(KERN_INFO, sdev, 273 273 "%s: cmds failed: %d, cancel: %d\n", 274 - __FUNCTION__, cmd_failed, 274 + __func__, cmd_failed, 275 275 cmd_cancel)); 276 276 cmd_cancel = 0; 277 277 cmd_failed = 0; ··· 344 344 return /* soft_error */ SUCCESS; 345 345 346 346 case ABORTED_COMMAND: 347 + if (sshdr.asc == 0x10) /* DIF */ 348 + return SUCCESS; 349 + 347 350 return NEEDS_RETRY; 348 351 case NOT_READY: 349 352 case UNIT_ATTENTION: ··· 473 470 474 471 SCSI_LOG_ERROR_RECOVERY(3, 475 472 printk("%s scmd: %p result: %x\n", 476 - __FUNCTION__, scmd, scmd->result)); 473 + __func__, scmd, scmd->result)); 477 474 478 475 eh_action = scmd->device->host->eh_action; 479 476 if (eh_action) ··· 490 487 int rtn; 491 488 492 489 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n", 493 - __FUNCTION__)); 490 + __func__)); 494 491 495 492 if (!scmd->device->host->hostt->eh_host_reset_handler) 496 493 return FAILED; ··· 519 516 int rtn; 520 517 521 518 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n", 522 - __FUNCTION__)); 519 + __func__)); 523 520 524 521 if (!scmd->device->host->hostt->eh_bus_reset_handler) 525 522 return FAILED; ··· 667 664 ses->sdb = scmd->sdb; 668 665 ses->next_rq = scmd->request->next_rq; 669 666 ses->result = scmd->result; 667 + ses->underflow = scmd->underflow; 668 + ses->prot_op = scmd->prot_op; 670 669 670 + scmd->prot_op = SCSI_PROT_NORMAL; 671 671 scmd->cmnd = ses->eh_cmnd; 672 672 memset(scmd->cmnd, 0, BLK_MAX_CDB); 673 673 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); ··· 728 722 scmd->sdb = ses->sdb; 729 723 scmd->request->next_rq = ses->next_rq; 730 724 scmd->result = ses->result; 725 + scmd->underflow = ses->underflow; 726 + scmd->prot_op = ses->prot_op; 731 727 } 732 728 EXPORT_SYMBOL(scsi_eh_restore_cmnd); 733 729 ··· 774 766 775 767 SCSI_LOG_ERROR_RECOVERY(3, 776 768 printk("%s: scmd: %p, timeleft: %ld\n", 777 - __FUNCTION__, scmd, timeleft)); 769 + __func__, scmd, timeleft)); 778 770 779 771 /* 780 772 * If there is time left scsi_eh_done got called, and we will ··· 786 778 rtn = scsi_eh_completed_normally(scmd); 787 779 SCSI_LOG_ERROR_RECOVERY(3, 788 780 printk("%s: scsi_eh_completed_normally %x\n", 789 - __FUNCTION__, rtn)); 781 + __func__, rtn)); 790 782 791 783 switch (rtn) { 792 784 case SUCCESS: ··· 921 913 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); 922 914 923 915 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 924 - __FUNCTION__, scmd, rtn)); 916 + __func__, scmd, rtn)); 925 917 926 918 switch (rtn) { 927 919 case NEEDS_RETRY: ··· 1304 1296 if (!scsi_device_online(scmd->device)) { 1305 1297 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report" 1306 1298 " as SUCCESS\n", 1307 - __FUNCTION__)); 1299 + __func__)); 1308 1300 return SUCCESS; 1309 1301 } 1310 1302 ··· 1519 1511 * ioctls to queued block devices. 1520 1512 */ 1521 1513 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1522 - __FUNCTION__)); 1514 + __func__)); 1523 1515 1524 1516 spin_lock_irqsave(shost->host_lock, flags); 1525 1517 if (scsi_host_set_state(shost, SHOST_RUNNING)) ··· 1843 1835 */ 1844 1836 SCSI_LOG_ERROR_RECOVERY(3, 1845 1837 printk("%s: waking up host to restart after TMF\n", 1846 - __FUNCTION__)); 1838 + __func__)); 1847 1839 1848 1840 wake_up(&shost->host_wait); 1849 1841
+49 -6
drivers/scsi/scsi_lib.c
··· 65 65 }; 66 66 #undef SP 67 67 68 - static struct kmem_cache *scsi_sdb_cache; 68 + struct kmem_cache *scsi_sdb_cache; 69 69 70 70 static void scsi_run_queue(struct request_queue *q); 71 71 ··· 787 787 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 788 788 cmd->request->next_rq->special = NULL; 789 789 } 790 + 791 + if (scsi_prot_sg_count(cmd)) 792 + scsi_free_sgtable(cmd->prot_sdb); 790 793 } 791 794 EXPORT_SYMBOL(scsi_release_buffers); 792 795 ··· 950 947 * 6-byte command. 951 948 */ 952 949 scsi_requeue_command(q, cmd); 953 - return; 954 - } else { 950 + } else if (sshdr.asc == 0x10) /* DIX */ 951 + scsi_end_request(cmd, -EIO, this_count, 0); 952 + else 955 953 scsi_end_request(cmd, -EIO, this_count, 1); 954 + return; 955 + case ABORTED_COMMAND: 956 + if (sshdr.asc == 0x10) { /* DIF */ 957 + scsi_end_request(cmd, -EIO, this_count, 0); 956 958 return; 957 959 } 958 960 break; ··· 1078 1070 GFP_ATOMIC); 1079 1071 if (error) 1080 1072 goto err_exit; 1073 + } 1074 + 1075 + if (blk_integrity_rq(cmd->request)) { 1076 + struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1077 + int ivecs, count; 1078 + 1079 + BUG_ON(prot_sdb == NULL); 1080 + ivecs = blk_rq_count_integrity_sg(cmd->request); 1081 + 1082 + if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { 1083 + error = BLKPREP_DEFER; 1084 + goto err_exit; 1085 + } 1086 + 1087 + count = blk_rq_map_integrity_sg(cmd->request, 1088 + prot_sdb->table.sgl); 1089 + BUG_ON(unlikely(count > ivecs)); 1090 + 1091 + cmd->prot_sdb = prot_sdb; 1092 + cmd->prot_sdb->table.nents = count; 1081 1093 } 1082 1094 1083 1095 return BLKPREP_OK ; ··· 1395 1367 1396 1368 if (unlikely(cmd == NULL)) { 1397 1369 printk(KERN_CRIT "impossible request in %s.\n", 1398 - __FUNCTION__); 1370 + __func__); 1399 1371 BUG(); 1400 1372 } 1401 1373 ··· 1519 1491 printk(KERN_CRIT "impossible request in %s.\n" 1520 1492 "please mail a stack trace to " 1521 1493 "linux-scsi@vger.kernel.org\n", 1522 - __FUNCTION__); 1494 + __func__); 1523 1495 blk_dump_rq_flags(req, "foo"); 1524 1496 BUG(); 1525 1497 } 1526 1498 spin_lock(shost->host_lock); 1499 + 1500 + /* 1501 + * We hit this when the driver is using a host wide 1502 + * tag map. For device level tag maps the queue_depth check 1503 + * in the device ready fn would prevent us from trying 1504 + * to allocate a tag. Since the map is a shared host resource 1505 + * we add the dev to the starved list so it eventually gets 1506 + * a run when a tag is freed. 1507 + */ 1508 + if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { 1509 + if (list_empty(&sdev->starved_entry)) 1510 + list_add_tail(&sdev->starved_entry, 1511 + &shost->starved_list); 1512 + goto not_ready; 1513 + } 1527 1514 1528 1515 if (!scsi_host_queue_ready(q, shost, sdev)) 1529 1516 goto not_ready; ··· 2529 2486 if (unlikely(i == sg_count)) { 2530 2487 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2531 2488 "elements %d\n", 2532 - __FUNCTION__, sg_len, *offset, sg_count); 2489 + __func__, sg_len, *offset, sg_count); 2533 2490 WARN_ON(1); 2534 2491 return NULL; 2535 2492 }
+4 -4
drivers/scsi/scsi_netlink.c
··· 55 55 if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || 56 56 (skb->len < nlh->nlmsg_len)) { 57 57 printk(KERN_WARNING "%s: discarding partial skb\n", 58 - __FUNCTION__); 58 + __func__); 59 59 return; 60 60 } 61 61 ··· 82 82 83 83 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { 84 84 printk(KERN_WARNING "%s: discarding partial message\n", 85 - __FUNCTION__); 85 + __func__); 86 86 return; 87 87 } 88 88 ··· 139 139 error = netlink_register_notifier(&scsi_netlink_notifier); 140 140 if (error) { 141 141 printk(KERN_ERR "%s: register of event handler failed - %d\n", 142 - __FUNCTION__, error); 142 + __func__, error); 143 143 return; 144 144 } 145 145 ··· 148 148 THIS_MODULE); 149 149 if (!scsi_nl_sock) { 150 150 printk(KERN_ERR "%s: register of recieve handler failed\n", 151 - __FUNCTION__); 151 + __func__); 152 152 netlink_unregister_notifier(&scsi_netlink_notifier); 153 153 } 154 154
+1
drivers/scsi/scsi_priv.h
··· 77 77 struct request_queue; 78 78 struct request; 79 79 extern int scsi_prep_fn(struct request_queue *, struct request *); 80 + extern struct kmem_cache *scsi_sdb_cache; 80 81 81 82 /* scsi_proc.c */ 82 83 #ifdef CONFIG_SCSI_PROC_FS
+2 -2
drivers/scsi/scsi_proc.c
··· 114 114 sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi); 115 115 if (!sht->proc_dir) 116 116 printk(KERN_ERR "%s: proc_mkdir failed for %s\n", 117 - __FUNCTION__, sht->proc_name); 117 + __func__, sht->proc_name); 118 118 else 119 119 sht->proc_dir->owner = sht->module; 120 120 } ··· 157 157 sht->proc_dir, proc_scsi_read, shost); 158 158 if (!p) { 159 159 printk(KERN_ERR "%s: Failed to register host %d in" 160 - "%s\n", __FUNCTION__, shost->host_no, 160 + "%s\n", __func__, shost->host_no, 161 161 sht->proc_name); 162 162 return; 163 163 }
+7 -6
drivers/scsi/scsi_scan.c
··· 318 318 put_device(&sdev->sdev_gendev); 319 319 out: 320 320 if (display_failure_msg) 321 - printk(ALLOC_FAILURE_MSG, __FUNCTION__); 321 + printk(ALLOC_FAILURE_MSG, __func__); 322 322 return NULL; 323 323 } 324 324 ··· 404 404 405 405 starget = kzalloc(size, GFP_KERNEL); 406 406 if (!starget) { 407 - printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); 407 + printk(KERN_ERR "%s: allocation failure\n", __func__); 408 408 return NULL; 409 409 } 410 410 dev = &starget->dev; ··· 1337 1337 lun_data = kmalloc(length, GFP_ATOMIC | 1338 1338 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); 1339 1339 if (!lun_data) { 1340 - printk(ALLOC_FAILURE_MSG, __FUNCTION__); 1340 + printk(ALLOC_FAILURE_MSG, __func__); 1341 1341 goto out; 1342 1342 } 1343 1343 ··· 1649 1649 { 1650 1650 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, 1651 1651 "%s: <%u:%u:%u>\n", 1652 - __FUNCTION__, channel, id, lun)); 1652 + __func__, channel, id, lun)); 1653 1653 1654 1654 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || 1655 1655 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || ··· 1703 1703 return NULL; 1704 1704 1705 1705 if (shost->async_scan) { 1706 - printk("%s called twice for host %d", __FUNCTION__, 1706 + printk("%s called twice for host %d", __func__, 1707 1707 shost->host_no); 1708 1708 dump_stack(); 1709 1709 return NULL; ··· 1757 1757 mutex_lock(&shost->scan_mutex); 1758 1758 1759 1759 if (!shost->async_scan) { 1760 - printk("%s called twice for host %d", __FUNCTION__, 1760 + printk("%s called twice for host %d", __func__, 1761 1761 shost->host_no); 1762 1762 dump_stack(); 1763 + mutex_unlock(&shost->scan_mutex); 1763 1764 return; 1764 1765 } 1765 1766
+4
drivers/scsi/scsi_sysfs.c
··· 249 249 shost_rd_attr(can_queue, "%hd\n"); 250 250 shost_rd_attr(sg_tablesize, "%hu\n"); 251 251 shost_rd_attr(unchecked_isa_dma, "%d\n"); 252 + shost_rd_attr(prot_capabilities, "%u\n"); 253 + shost_rd_attr(prot_guard_type, "%hd\n"); 252 254 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); 253 255 254 256 static struct attribute *scsi_sysfs_shost_attrs[] = { ··· 265 263 &dev_attr_hstate.attr, 266 264 &dev_attr_supported_mode.attr, 267 265 &dev_attr_active_mode.attr, 266 + &dev_attr_prot_capabilities.attr, 267 + &dev_attr_prot_guard_type.attr, 268 268 NULL 269 269 }; 270 270
+1 -1
drivers/scsi/scsi_tgt_priv.h
··· 6 6 /* tmp - will replace with SCSI logging stuff */ 7 7 #define eprintk(fmt, args...) \ 8 8 do { \ 9 - printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \ 9 + printk("%s(%d) " fmt, __func__, __LINE__, ##args); \ 10 10 } while (0) 11 11 12 12 #define dprintk(fmt, args...)
+6 -6
drivers/scsi/scsi_transport_fc.c
··· 571 571 name = get_fc_host_event_code_name(event_code); 572 572 printk(KERN_WARNING 573 573 "%s: Dropped Event : host %d %s data 0x%08x - err %d\n", 574 - __FUNCTION__, shost->host_no, 574 + __func__, shost->host_no, 575 575 (name) ? name : "<unknown>", event_data, err); 576 576 return; 577 577 } ··· 644 644 send_vendor_fail: 645 645 printk(KERN_WARNING 646 646 "%s: Dropped Event : host %d vendor_unique - err %d\n", 647 - __FUNCTION__, shost->host_no, err); 647 + __func__, shost->host_no, err); 648 648 return; 649 649 } 650 650 EXPORT_SYMBOL(fc_host_post_vendor_event); ··· 2464 2464 size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size); 2465 2465 rport = kzalloc(size, GFP_KERNEL); 2466 2466 if (unlikely(!rport)) { 2467 - printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); 2467 + printk(KERN_ERR "%s: allocation failure\n", __func__); 2468 2468 return NULL; 2469 2469 } 2470 2470 ··· 3137 3137 size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size); 3138 3138 vport = kzalloc(size, GFP_KERNEL); 3139 3139 if (unlikely(!vport)) { 3140 - printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); 3140 + printk(KERN_ERR "%s: allocation failure\n", __func__); 3141 3141 return -ENOMEM; 3142 3142 } 3143 3143 ··· 3201 3201 printk(KERN_ERR 3202 3202 "%s: Cannot create vport symlinks for " 3203 3203 "%s, err=%d\n", 3204 - __FUNCTION__, dev->bus_id, error); 3204 + __func__, dev->bus_id, error); 3205 3205 } 3206 3206 spin_lock_irqsave(shost->host_lock, flags); 3207 3207 vport->flags &= ~FC_VPORT_CREATING; ··· 3314 3314 if (stat) 3315 3315 dev_printk(KERN_ERR, vport->dev.parent, 3316 3316 "%s: %s could not be deleted created via " 3317 - "shost%d channel %d - error %d\n", __FUNCTION__, 3317 + "shost%d channel %d - error %d\n", __func__, 3318 3318 vport->dev.bus_id, vport->shost->host_no, 3319 3319 vport->channel, stat); 3320 3320 }
+2 -2
drivers/scsi/scsi_transport_sas.c
··· 779 779 return; 780 780 err: 781 781 printk(KERN_ERR "%s: Cannot create port links, err=%d\n", 782 - __FUNCTION__, res); 782 + __func__, res); 783 783 } 784 784 785 785 static void sas_port_delete_link(struct sas_port *port, ··· 1029 1029 return; 1030 1030 err: 1031 1031 printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n", 1032 - __FUNCTION__, res); 1032 + __func__, res); 1033 1033 1034 1034 } 1035 1035 EXPORT_SYMBOL(sas_port_mark_backlink);
+228 -63
drivers/scsi/sd.c
··· 99 99 static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); 100 100 static void sd_print_result(struct scsi_disk *, int); 101 101 102 - static DEFINE_IDR(sd_index_idr); 103 - static DEFINE_SPINLOCK(sd_index_lock); 102 + static DEFINE_IDA(sd_index_ida); 104 103 105 104 /* This semaphore is used to mediate the 0->1 reference get in the 106 105 * face of object destruction (i.e. we can't allow a get on an ··· 233 234 return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart); 234 235 } 235 236 237 + static ssize_t 238 + sd_show_protection_type(struct device *dev, struct device_attribute *attr, 239 + char *buf) 240 + { 241 + struct scsi_disk *sdkp = to_scsi_disk(dev); 242 + 243 + return snprintf(buf, 20, "%u\n", sdkp->protection_type); 244 + } 245 + 246 + static ssize_t 247 + sd_show_app_tag_own(struct device *dev, struct device_attribute *attr, 248 + char *buf) 249 + { 250 + struct scsi_disk *sdkp = to_scsi_disk(dev); 251 + 252 + return snprintf(buf, 20, "%u\n", sdkp->ATO); 253 + } 254 + 236 255 static struct device_attribute sd_disk_attrs[] = { 237 256 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 238 257 sd_store_cache_type), ··· 259 242 sd_store_allow_restart), 260 243 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop, 261 244 sd_store_manage_start_stop), 245 + __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), 246 + __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), 262 247 __ATTR_NULL, 263 248 }; 264 249 ··· 373 354 struct scsi_cmnd *SCpnt; 374 355 struct scsi_device *sdp = q->queuedata; 375 356 struct gendisk *disk = rq->rq_disk; 357 + struct scsi_disk *sdkp; 376 358 sector_t block = rq->sector; 359 + sector_t threshold; 377 360 unsigned int this_count = rq->nr_sectors; 378 361 unsigned int timeout = sdp->timeout; 379 362 int ret; ··· 391 370 if (ret != BLKPREP_OK) 392 371 goto out; 393 372 SCpnt = rq->special; 373 + sdkp = scsi_disk(disk); 394 374 395 375 /* from here on until we're complete, any goto out 396 376 * is used for a killable error condition */ ··· 423 401 } 424 402 425 403 /* 426 - * Some devices (some sdcards for one) don't like it if the 427 - * last sector gets read in a larger then 1 sector read. 404 + * Some SD card readers can't handle multi-sector accesses which touch 405 + * the last one or two hardware sectors. Split accesses as needed. 428 406 */ 429 - if (unlikely(sdp->last_sector_bug && 430 - rq->nr_sectors > sdp->sector_size / 512 && 431 - block + this_count == get_capacity(disk))) 432 - this_count -= sdp->sector_size / 512; 407 + threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS * 408 + (sdp->sector_size / 512); 409 + 410 + if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) { 411 + if (block < threshold) { 412 + /* Access up to the threshold but not beyond */ 413 + this_count = threshold - block; 414 + } else { 415 + /* Access only a single hardware sector */ 416 + this_count = sdp->sector_size / 512; 417 + } 418 + } 433 419 434 420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n", 435 421 (unsigned long long)block)); ··· 489 459 } 490 460 SCpnt->cmnd[0] = WRITE_6; 491 461 SCpnt->sc_data_direction = DMA_TO_DEVICE; 462 + 463 + if (blk_integrity_rq(rq) && 464 + sd_dif_prepare(rq, block, sdp->sector_size) == -EIO) 465 + goto out; 466 + 492 467 } else if (rq_data_dir(rq) == READ) { 493 468 SCpnt->cmnd[0] = READ_6; 494 469 SCpnt->sc_data_direction = DMA_FROM_DEVICE; ··· 508 473 "writing" : "reading", this_count, 509 474 rq->nr_sectors)); 510 475 511 - SCpnt->cmnd[1] = 0; 512 - 476 + /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 477 + if (scsi_host_dif_capable(sdp->host, sdkp->protection_type)) 478 + SCpnt->cmnd[1] = 1 << 5; 479 + else 480 + SCpnt->cmnd[1] = 0; 481 + 513 482 if (block > 0xffffffff) { 514 483 SCpnt->cmnd[0] += READ_16 - READ_6; 515 484 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0; ··· 531 492 SCpnt->cmnd[13] = (unsigned char) this_count & 0xff; 532 493 SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0; 533 494 } else if ((this_count > 0xff) || (block > 0x1fffff) || 495 + scsi_device_protection(SCpnt->device) || 534 496 SCpnt->device->use_10_for_rw) { 535 497 if (this_count > 0xffff) 536 498 this_count = 0xffff; ··· 565 525 SCpnt->cmnd[5] = 0; 566 526 } 567 527 SCpnt->sdb.length = this_count * sdp->sector_size; 528 + 529 + /* If DIF or DIX is enabled, tell HBA how to handle request */ 530 + if (sdkp->protection_type || scsi_prot_sg_count(SCpnt)) 531 + sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt)); 568 532 569 533 /* 570 534 * We shouldn't disconnect in the middle of a sector, so with a dumb ··· 964 920 .revalidate_disk = sd_revalidate_disk, 965 921 }; 966 922 923 + static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 924 + { 925 + u64 start_lba = scmd->request->sector; 926 + u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512); 927 + u64 bad_lba; 928 + int info_valid; 929 + 930 + if (!blk_fs_request(scmd->request)) 931 + return 0; 932 + 933 + info_valid = scsi_get_sense_info_fld(scmd->sense_buffer, 934 + SCSI_SENSE_BUFFERSIZE, 935 + &bad_lba); 936 + if (!info_valid) 937 + return 0; 938 + 939 + if (scsi_bufflen(scmd) <= scmd->device->sector_size) 940 + return 0; 941 + 942 + if (scmd->device->sector_size < 512) { 943 + /* only legitimate sector_size here is 256 */ 944 + start_lba <<= 1; 945 + end_lba <<= 1; 946 + } else { 947 + /* be careful ... don't want any overflows */ 948 + u64 factor = scmd->device->sector_size / 512; 949 + do_div(start_lba, factor); 950 + do_div(end_lba, factor); 951 + } 952 + 953 + /* The bad lba was reported incorrectly, we have no idea where 954 + * the error is. 955 + */ 956 + if (bad_lba < start_lba || bad_lba >= end_lba) 957 + return 0; 958 + 959 + /* This computation should always be done in terms of 960 + * the resolution of the device's medium. 961 + */ 962 + return (bad_lba - start_lba) * scmd->device->sector_size; 963 + } 964 + 967 965 /** 968 966 * sd_done - bottom half handler: called when the lower level 969 967 * driver has completed (successfully or otherwise) a scsi command. ··· 1016 930 static int sd_done(struct scsi_cmnd *SCpnt) 1017 931 { 1018 932 int result = SCpnt->result; 1019 - unsigned int xfer_size = scsi_bufflen(SCpnt); 1020 - unsigned int good_bytes = result ? 0 : xfer_size; 1021 - u64 start_lba = SCpnt->request->sector; 1022 - u64 end_lba = SCpnt->request->sector + (xfer_size / 512); 1023 - u64 bad_lba; 933 + unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); 1024 934 struct scsi_sense_hdr sshdr; 1025 935 int sense_valid = 0; 1026 936 int sense_deferred = 0; 1027 - int info_valid; 1028 937 1029 938 if (result) { 1030 939 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); ··· 1044 963 switch (sshdr.sense_key) { 1045 964 case HARDWARE_ERROR: 1046 965 case MEDIUM_ERROR: 1047 - if (!blk_fs_request(SCpnt->request)) 1048 - goto out; 1049 - info_valid = scsi_get_sense_info_fld(SCpnt->sense_buffer, 1050 - SCSI_SENSE_BUFFERSIZE, 1051 - &bad_lba); 1052 - if (!info_valid) 1053 - goto out; 1054 - if (xfer_size <= SCpnt->device->sector_size) 1055 - goto out; 1056 - if (SCpnt->device->sector_size < 512) { 1057 - /* only legitimate sector_size here is 256 */ 1058 - start_lba <<= 1; 1059 - end_lba <<= 1; 1060 - } else { 1061 - /* be careful ... don't want any overflows */ 1062 - u64 factor = SCpnt->device->sector_size / 512; 1063 - do_div(start_lba, factor); 1064 - do_div(end_lba, factor); 1065 - } 1066 - 1067 - if (bad_lba < start_lba || bad_lba >= end_lba) 1068 - /* the bad lba was reported incorrectly, we have 1069 - * no idea where the error is 1070 - */ 1071 - goto out; 1072 - 1073 - /* This computation should always be done in terms of 1074 - * the resolution of the device's medium. 1075 - */ 1076 - good_bytes = (bad_lba - start_lba)*SCpnt->device->sector_size; 966 + good_bytes = sd_completed_bytes(SCpnt); 1077 967 break; 1078 968 case RECOVERED_ERROR: 1079 969 case NO_SENSE: ··· 1054 1002 scsi_print_sense("sd", SCpnt); 1055 1003 SCpnt->result = 0; 1056 1004 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1057 - good_bytes = xfer_size; 1005 + good_bytes = scsi_bufflen(SCpnt); 1006 + break; 1007 + case ABORTED_COMMAND: 1008 + if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */ 1009 + scsi_print_result(SCpnt); 1010 + scsi_print_sense("sd", SCpnt); 1011 + good_bytes = sd_completed_bytes(SCpnt); 1012 + } 1058 1013 break; 1059 1014 case ILLEGAL_REQUEST: 1060 - if (SCpnt->device->use_10_for_rw && 1015 + if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */ 1016 + scsi_print_result(SCpnt); 1017 + scsi_print_sense("sd", SCpnt); 1018 + good_bytes = sd_completed_bytes(SCpnt); 1019 + } 1020 + if (!scsi_device_protection(SCpnt->device) && 1021 + SCpnt->device->use_10_for_rw && 1061 1022 (SCpnt->cmnd[0] == READ_10 || 1062 1023 SCpnt->cmnd[0] == WRITE_10)) 1063 1024 SCpnt->device->use_10_for_rw = 0; ··· 1083 1018 break; 1084 1019 } 1085 1020 out: 1021 + if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) 1022 + sd_dif_complete(SCpnt, good_bytes); 1023 + 1086 1024 return good_bytes; 1087 1025 } 1088 1026 ··· 1233 1165 } 1234 1166 } 1235 1167 1168 + 1169 + /* 1170 + * Determine whether disk supports Data Integrity Field. 1171 + */ 1172 + void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) 1173 + { 1174 + struct scsi_device *sdp = sdkp->device; 1175 + u8 type; 1176 + 1177 + if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) 1178 + type = 0; 1179 + else 1180 + type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 1181 + 1182 + switch (type) { 1183 + case SD_DIF_TYPE0_PROTECTION: 1184 + sdkp->protection_type = 0; 1185 + break; 1186 + 1187 + case SD_DIF_TYPE1_PROTECTION: 1188 + case SD_DIF_TYPE3_PROTECTION: 1189 + sdkp->protection_type = type; 1190 + break; 1191 + 1192 + case SD_DIF_TYPE2_PROTECTION: 1193 + sd_printk(KERN_ERR, sdkp, "formatted with DIF Type 2 " \ 1194 + "protection which is currently unsupported. " \ 1195 + "Disabling disk!\n"); 1196 + goto disable; 1197 + 1198 + default: 1199 + sd_printk(KERN_ERR, sdkp, "formatted with unknown " \ 1200 + "protection type %d. Disabling disk!\n", type); 1201 + goto disable; 1202 + } 1203 + 1204 + return; 1205 + 1206 + disable: 1207 + sdkp->protection_type = 0; 1208 + sdkp->capacity = 0; 1209 + } 1210 + 1236 1211 /* 1237 1212 * read disk capacity 1238 1213 */ ··· 1285 1174 unsigned char cmd[16]; 1286 1175 int the_result, retries; 1287 1176 int sector_size = 0; 1288 - int longrc = 0; 1177 + /* Force READ CAPACITY(16) when PROTECT=1 */ 1178 + int longrc = scsi_device_protection(sdkp->device) ? 1 : 0; 1289 1179 struct scsi_sense_hdr sshdr; 1290 1180 int sense_valid = 0; 1291 1181 struct scsi_device *sdp = sdkp->device; ··· 1298 1186 memset((void *) cmd, 0, 16); 1299 1187 cmd[0] = SERVICE_ACTION_IN; 1300 1188 cmd[1] = SAI_READ_CAPACITY_16; 1301 - cmd[13] = 12; 1302 - memset((void *) buffer, 0, 12); 1189 + cmd[13] = 13; 1190 + memset((void *) buffer, 0, 13); 1303 1191 } else { 1304 1192 cmd[0] = READ_CAPACITY; 1305 1193 memset((void *) &cmd[1], 0, 9); ··· 1307 1195 } 1308 1196 1309 1197 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 1310 - buffer, longrc ? 12 : 8, &sshdr, 1198 + buffer, longrc ? 13 : 8, &sshdr, 1311 1199 SD_TIMEOUT, SD_MAX_RETRIES); 1312 1200 1313 1201 if (media_not_present(sdkp, &sshdr)) ··· 1382 1270 1383 1271 sector_size = (buffer[8] << 24) | 1384 1272 (buffer[9] << 16) | (buffer[10] << 8) | buffer[11]; 1273 + 1274 + sd_read_protection_type(sdkp, buffer); 1385 1275 } 1386 1276 1387 1277 /* Some devices return the total number of sectors, not the ··· 1645 1531 sdkp->DPOFUA = 0; 1646 1532 } 1647 1533 1534 + /* 1535 + * The ATO bit indicates whether the DIF application tag is available 1536 + * for use by the operating system. 1537 + */ 1538 + void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) 1539 + { 1540 + int res, offset; 1541 + struct scsi_device *sdp = sdkp->device; 1542 + struct scsi_mode_data data; 1543 + struct scsi_sense_hdr sshdr; 1544 + 1545 + if (sdp->type != TYPE_DISK) 1546 + return; 1547 + 1548 + if (sdkp->protection_type == 0) 1549 + return; 1550 + 1551 + res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT, 1552 + SD_MAX_RETRIES, &data, &sshdr); 1553 + 1554 + if (!scsi_status_is_good(res) || !data.header_length || 1555 + data.length < 6) { 1556 + sd_printk(KERN_WARNING, sdkp, 1557 + "getting Control mode page failed, assume no ATO\n"); 1558 + 1559 + if (scsi_sense_valid(&sshdr)) 1560 + sd_print_sense_hdr(sdkp, &sshdr); 1561 + 1562 + return; 1563 + } 1564 + 1565 + offset = data.header_length + data.block_descriptor_length; 1566 + 1567 + if ((buffer[offset] & 0x3f) != 0x0a) { 1568 + sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); 1569 + return; 1570 + } 1571 + 1572 + if ((buffer[offset + 5] & 0x80) == 0) 1573 + return; 1574 + 1575 + sdkp->ATO = 1; 1576 + 1577 + return; 1578 + } 1579 + 1648 1580 /** 1649 1581 * sd_revalidate_disk - called the first time a new disk is seen, 1650 1582 * performs disk spin up, read_capacity, etc. ··· 1727 1567 sdkp->write_prot = 0; 1728 1568 sdkp->WCE = 0; 1729 1569 sdkp->RCD = 0; 1570 + sdkp->ATO = 0; 1730 1571 1731 1572 sd_spinup_disk(sdkp); 1732 1573 ··· 1739 1578 sd_read_capacity(sdkp, buffer); 1740 1579 sd_read_write_protect_flag(sdkp, buffer); 1741 1580 sd_read_cache_type(sdkp, buffer); 1581 + sd_read_app_tag_own(sdkp, buffer); 1742 1582 } 1743 1583 1744 1584 /* ··· 1805 1643 if (!gd) 1806 1644 goto out_free; 1807 1645 1808 - if (!idr_pre_get(&sd_index_idr, GFP_KERNEL)) 1809 - goto out_put; 1646 + do { 1647 + if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) 1648 + goto out_put; 1810 1649 1811 - spin_lock(&sd_index_lock); 1812 - error = idr_get_new(&sd_index_idr, NULL, &index); 1813 - spin_unlock(&sd_index_lock); 1650 + error = ida_get_new(&sd_index_ida, &index); 1651 + } while (error == -EAGAIN); 1814 1652 1815 - if (index >= SD_MAX_DISKS) 1816 - error = -EBUSY; 1817 1653 if (error) 1818 1654 goto out_put; 1655 + 1656 + error = -EBUSY; 1657 + if (index >= SD_MAX_DISKS) 1658 + goto out_free_index; 1819 1659 1820 1660 sdkp->device = sdp; 1821 1661 sdkp->driver = &sd_template; ··· 1839 1675 strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE); 1840 1676 1841 1677 if (device_add(&sdkp->dev)) 1842 - goto out_put; 1678 + goto out_free_index; 1843 1679 1844 1680 get_device(&sdp->sdev_gendev); 1845 1681 ··· 1875 1711 1876 1712 dev_set_drvdata(dev, sdkp); 1877 1713 add_disk(gd); 1714 + sd_dif_config_host(sdkp); 1878 1715 1879 1716 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 1880 1717 sdp->removable ? "removable " : ""); 1881 1718 1882 1719 return 0; 1883 1720 1721 + out_free_index: 1722 + ida_remove(&sd_index_ida, index); 1884 1723 out_put: 1885 1724 put_disk(gd); 1886 1725 out_free: ··· 1933 1766 struct scsi_disk *sdkp = to_scsi_disk(dev); 1934 1767 struct gendisk *disk = sdkp->disk; 1935 1768 1936 - spin_lock(&sd_index_lock); 1937 - idr_remove(&sd_index_idr, sdkp->index); 1938 - spin_unlock(&sd_index_lock); 1769 + ida_remove(&sd_index_ida, sdkp->index); 1939 1770 1940 1771 disk->private_data = NULL; 1941 1772 put_disk(disk);
+54
drivers/scsi/sd.h
··· 31 31 */ 32 32 #define SD_BUF_SIZE 512 33 33 34 + /* 35 + * Number of sectors at the end of the device to avoid multi-sector 36 + * accesses to in the case of last_sector_bug 37 + */ 38 + #define SD_LAST_BUGGY_SECTORS 8 39 + 34 40 struct scsi_disk { 35 41 struct scsi_driver *driver; /* always &sd_template */ 36 42 struct scsi_device *device; ··· 47 41 u32 index; 48 42 u8 media_present; 49 43 u8 write_prot; 44 + u8 protection_type;/* Data Integrity Field */ 50 45 unsigned previous_state : 1; 46 + unsigned ATO : 1; /* state of disk ATO bit */ 51 47 unsigned WCE : 1; /* state of disk WCE bit */ 52 48 unsigned RCD : 1; /* state of disk RCD bit, unused */ 53 49 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ ··· 66 58 sdev_printk(prefix, (sdsk)->device, "[%s] " fmt, \ 67 59 (sdsk)->disk->disk_name, ##a) : \ 68 60 sdev_printk(prefix, (sdsk)->device, fmt, ##a) 61 + 62 + /* 63 + * A DIF-capable target device can be formatted with different 64 + * protection schemes. Currently 0 through 3 are defined: 65 + * 66 + * Type 0 is regular (unprotected) I/O 67 + * 68 + * Type 1 defines the contents of the guard and reference tags 69 + * 70 + * Type 2 defines the contents of the guard and reference tags and 71 + * uses 32-byte commands to seed the latter 72 + * 73 + * Type 3 defines the contents of the guard tag only 74 + */ 75 + 76 + enum sd_dif_target_protection_types { 77 + SD_DIF_TYPE0_PROTECTION = 0x0, 78 + SD_DIF_TYPE1_PROTECTION = 0x1, 79 + SD_DIF_TYPE2_PROTECTION = 0x2, 80 + SD_DIF_TYPE3_PROTECTION = 0x3, 81 + }; 82 + 83 + /* 84 + * Data Integrity Field tuple. 85 + */ 86 + struct sd_dif_tuple { 87 + __be16 guard_tag; /* Checksum */ 88 + __be16 app_tag; /* Opaque storage */ 89 + __be32 ref_tag; /* Target LBA or indirect LBA */ 90 + }; 91 + 92 + #if defined(CONFIG_BLK_DEV_INTEGRITY) 93 + 94 + extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int); 95 + extern void sd_dif_config_host(struct scsi_disk *); 96 + extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int); 97 + extern void sd_dif_complete(struct scsi_cmnd *, unsigned int); 98 + 99 + #else /* CONFIG_BLK_DEV_INTEGRITY */ 100 + 101 + #define sd_dif_op(a, b, c) do { } while (0) 102 + #define sd_dif_config_host(a) do { } while (0) 103 + #define sd_dif_prepare(a, b, c) (0) 104 + #define sd_dif_complete(a, b) (0) 105 + 106 + #endif /* CONFIG_BLK_DEV_INTEGRITY */ 69 107 70 108 #endif /* _SCSI_DISK_H */
+538
drivers/scsi/sd_dif.c
··· 1 + /* 2 + * sd_dif.c - SCSI Data Integrity Field 3 + * 4 + * Copyright (C) 2007, 2008 Oracle Corporation 5 + * Written by: Martin K. Petersen <martin.petersen@oracle.com> 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License version 9 + * 2 as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, but 12 + * WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 + * General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; see the file COPYING. If not, write to 18 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 19 + * USA. 20 + * 21 + */ 22 + 23 + #include <linux/blkdev.h> 24 + #include <linux/crc-t10dif.h> 25 + 26 + #include <scsi/scsi.h> 27 + #include <scsi/scsi_cmnd.h> 28 + #include <scsi/scsi_dbg.h> 29 + #include <scsi/scsi_device.h> 30 + #include <scsi/scsi_driver.h> 31 + #include <scsi/scsi_eh.h> 32 + #include <scsi/scsi_host.h> 33 + #include <scsi/scsi_ioctl.h> 34 + #include <scsi/scsicam.h> 35 + 36 + #include <net/checksum.h> 37 + 38 + #include "sd.h" 39 + 40 + typedef __u16 (csum_fn) (void *, unsigned int); 41 + 42 + static __u16 sd_dif_crc_fn(void *data, unsigned int len) 43 + { 44 + return cpu_to_be16(crc_t10dif(data, len)); 45 + } 46 + 47 + static __u16 sd_dif_ip_fn(void *data, unsigned int len) 48 + { 49 + return ip_compute_csum(data, len); 50 + } 51 + 52 + /* 53 + * Type 1 and Type 2 protection use the same format: 16 bit guard tag, 54 + * 16 bit app tag, 32 bit reference tag. 55 + */ 56 + static void sd_dif_type1_generate(struct blk_integrity_exchg *bix, csum_fn *fn) 57 + { 58 + void *buf = bix->data_buf; 59 + struct sd_dif_tuple *sdt = bix->prot_buf; 60 + sector_t sector = bix->sector; 61 + unsigned int i; 62 + 63 + for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) { 64 + sdt->guard_tag = fn(buf, bix->sector_size); 65 + sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 66 + sdt->app_tag = 0; 67 + 68 + buf += bix->sector_size; 69 + sector++; 70 + } 71 + } 72 + 73 + static void sd_dif_type1_generate_crc(struct blk_integrity_exchg *bix) 74 + { 75 + sd_dif_type1_generate(bix, sd_dif_crc_fn); 76 + } 77 + 78 + static void sd_dif_type1_generate_ip(struct blk_integrity_exchg *bix) 79 + { 80 + sd_dif_type1_generate(bix, sd_dif_ip_fn); 81 + } 82 + 83 + static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn) 84 + { 85 + void *buf = bix->data_buf; 86 + struct sd_dif_tuple *sdt = bix->prot_buf; 87 + sector_t sector = bix->sector; 88 + unsigned int i; 89 + __u16 csum; 90 + 91 + for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) { 92 + /* Unwritten sectors */ 93 + if (sdt->app_tag == 0xffff) 94 + return 0; 95 + 96 + /* Bad ref tag received from disk */ 97 + if (sdt->ref_tag == 0xffffffff) { 98 + printk(KERN_ERR 99 + "%s: bad phys ref tag on sector %lu\n", 100 + bix->disk_name, (unsigned long)sector); 101 + return -EIO; 102 + } 103 + 104 + if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 105 + printk(KERN_ERR 106 + "%s: ref tag error on sector %lu (rcvd %u)\n", 107 + bix->disk_name, (unsigned long)sector, 108 + be32_to_cpu(sdt->ref_tag)); 109 + return -EIO; 110 + } 111 + 112 + csum = fn(buf, bix->sector_size); 113 + 114 + if (sdt->guard_tag != csum) { 115 + printk(KERN_ERR "%s: guard tag error on sector %lu " \ 116 + "(rcvd %04x, data %04x)\n", bix->disk_name, 117 + (unsigned long)sector, 118 + be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 119 + return -EIO; 120 + } 121 + 122 + buf += bix->sector_size; 123 + sector++; 124 + } 125 + 126 + return 0; 127 + } 128 + 129 + static int sd_dif_type1_verify_crc(struct blk_integrity_exchg *bix) 130 + { 131 + return sd_dif_type1_verify(bix, sd_dif_crc_fn); 132 + } 133 + 134 + static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix) 135 + { 136 + return sd_dif_type1_verify(bix, sd_dif_ip_fn); 137 + } 138 + 139 + /* 140 + * Functions for interleaving and deinterleaving application tags 141 + */ 142 + static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors) 143 + { 144 + struct sd_dif_tuple *sdt = prot; 145 + char *tag = tag_buf; 146 + unsigned int i, j; 147 + 148 + for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 149 + sdt->app_tag = tag[j] << 8 | tag[j+1]; 150 + BUG_ON(sdt->app_tag == 0xffff); 151 + } 152 + } 153 + 154 + static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors) 155 + { 156 + struct sd_dif_tuple *sdt = prot; 157 + char *tag = tag_buf; 158 + unsigned int i, j; 159 + 160 + for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 161 + tag[j] = (sdt->app_tag & 0xff00) >> 8; 162 + tag[j+1] = sdt->app_tag & 0xff; 163 + } 164 + } 165 + 166 + static struct blk_integrity dif_type1_integrity_crc = { 167 + .name = "T10-DIF-TYPE1-CRC", 168 + .generate_fn = sd_dif_type1_generate_crc, 169 + .verify_fn = sd_dif_type1_verify_crc, 170 + .get_tag_fn = sd_dif_type1_get_tag, 171 + .set_tag_fn = sd_dif_type1_set_tag, 172 + .tuple_size = sizeof(struct sd_dif_tuple), 173 + .tag_size = 0, 174 + }; 175 + 176 + static struct blk_integrity dif_type1_integrity_ip = { 177 + .name = "T10-DIF-TYPE1-IP", 178 + .generate_fn = sd_dif_type1_generate_ip, 179 + .verify_fn = sd_dif_type1_verify_ip, 180 + .get_tag_fn = sd_dif_type1_get_tag, 181 + .set_tag_fn = sd_dif_type1_set_tag, 182 + .tuple_size = sizeof(struct sd_dif_tuple), 183 + .tag_size = 0, 184 + }; 185 + 186 + 187 + /* 188 + * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque 189 + * tag space. 190 + */ 191 + static void sd_dif_type3_generate(struct blk_integrity_exchg *bix, csum_fn *fn) 192 + { 193 + void *buf = bix->data_buf; 194 + struct sd_dif_tuple *sdt = bix->prot_buf; 195 + unsigned int i; 196 + 197 + for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) { 198 + sdt->guard_tag = fn(buf, bix->sector_size); 199 + sdt->ref_tag = 0; 200 + sdt->app_tag = 0; 201 + 202 + buf += bix->sector_size; 203 + } 204 + } 205 + 206 + static void sd_dif_type3_generate_crc(struct blk_integrity_exchg *bix) 207 + { 208 + sd_dif_type3_generate(bix, sd_dif_crc_fn); 209 + } 210 + 211 + static void sd_dif_type3_generate_ip(struct blk_integrity_exchg *bix) 212 + { 213 + sd_dif_type3_generate(bix, sd_dif_ip_fn); 214 + } 215 + 216 + static int sd_dif_type3_verify(struct blk_integrity_exchg *bix, csum_fn *fn) 217 + { 218 + void *buf = bix->data_buf; 219 + struct sd_dif_tuple *sdt = bix->prot_buf; 220 + sector_t sector = bix->sector; 221 + unsigned int i; 222 + __u16 csum; 223 + 224 + for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) { 225 + /* Unwritten sectors */ 226 + if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff) 227 + return 0; 228 + 229 + csum = fn(buf, bix->sector_size); 230 + 231 + if (sdt->guard_tag != csum) { 232 + printk(KERN_ERR "%s: guard tag error on sector %lu " \ 233 + "(rcvd %04x, data %04x)\n", bix->disk_name, 234 + (unsigned long)sector, 235 + be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 236 + return -EIO; 237 + } 238 + 239 + buf += bix->sector_size; 240 + sector++; 241 + } 242 + 243 + return 0; 244 + } 245 + 246 + static int sd_dif_type3_verify_crc(struct blk_integrity_exchg *bix) 247 + { 248 + return sd_dif_type3_verify(bix, sd_dif_crc_fn); 249 + } 250 + 251 + static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix) 252 + { 253 + return sd_dif_type3_verify(bix, sd_dif_ip_fn); 254 + } 255 + 256 + static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors) 257 + { 258 + struct sd_dif_tuple *sdt = prot; 259 + char *tag = tag_buf; 260 + unsigned int i, j; 261 + 262 + for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) { 263 + sdt->app_tag = tag[j] << 8 | tag[j+1]; 264 + sdt->ref_tag = tag[j+2] << 24 | tag[j+3] << 16 | 265 + tag[j+4] << 8 | tag[j+5]; 266 + } 267 + } 268 + 269 + static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors) 270 + { 271 + struct sd_dif_tuple *sdt = prot; 272 + char *tag = tag_buf; 273 + unsigned int i, j; 274 + 275 + for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 276 + tag[j] = (sdt->app_tag & 0xff00) >> 8; 277 + tag[j+1] = sdt->app_tag & 0xff; 278 + tag[j+2] = (sdt->ref_tag & 0xff000000) >> 24; 279 + tag[j+3] = (sdt->ref_tag & 0xff0000) >> 16; 280 + tag[j+4] = (sdt->ref_tag & 0xff00) >> 8; 281 + tag[j+5] = sdt->ref_tag & 0xff; 282 + BUG_ON(sdt->app_tag == 0xffff || sdt->ref_tag == 0xffffffff); 283 + } 284 + } 285 + 286 + static struct blk_integrity dif_type3_integrity_crc = { 287 + .name = "T10-DIF-TYPE3-CRC", 288 + .generate_fn = sd_dif_type3_generate_crc, 289 + .verify_fn = sd_dif_type3_verify_crc, 290 + .get_tag_fn = sd_dif_type3_get_tag, 291 + .set_tag_fn = sd_dif_type3_set_tag, 292 + .tuple_size = sizeof(struct sd_dif_tuple), 293 + .tag_size = 0, 294 + }; 295 + 296 + static struct blk_integrity dif_type3_integrity_ip = { 297 + .name = "T10-DIF-TYPE3-IP", 298 + .generate_fn = sd_dif_type3_generate_ip, 299 + .verify_fn = sd_dif_type3_verify_ip, 300 + .get_tag_fn = sd_dif_type3_get_tag, 301 + .set_tag_fn = sd_dif_type3_set_tag, 302 + .tuple_size = sizeof(struct sd_dif_tuple), 303 + .tag_size = 0, 304 + }; 305 + 306 + /* 307 + * Configure exchange of protection information between OS and HBA. 308 + */ 309 + void sd_dif_config_host(struct scsi_disk *sdkp) 310 + { 311 + struct scsi_device *sdp = sdkp->device; 312 + struct gendisk *disk = sdkp->disk; 313 + u8 type = sdkp->protection_type; 314 + 315 + /* If this HBA doesn't support DIX, resort to normal I/O or DIF */ 316 + if (scsi_host_dix_capable(sdp->host, type) == 0) { 317 + 318 + if (type == SD_DIF_TYPE0_PROTECTION) 319 + return; 320 + 321 + if (scsi_host_dif_capable(sdp->host, type) == 0) { 322 + sd_printk(KERN_INFO, sdkp, "Type %d protection " \ 323 + "unsupported by HBA. Disabling DIF.\n", type); 324 + sdkp->protection_type = 0; 325 + return; 326 + } 327 + 328 + sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n", 329 + type); 330 + 331 + return; 332 + } 333 + 334 + /* Enable DMA of protection information */ 335 + if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) 336 + if (type == SD_DIF_TYPE3_PROTECTION) 337 + blk_integrity_register(disk, &dif_type3_integrity_ip); 338 + else 339 + blk_integrity_register(disk, &dif_type1_integrity_ip); 340 + else 341 + if (type == SD_DIF_TYPE3_PROTECTION) 342 + blk_integrity_register(disk, &dif_type3_integrity_crc); 343 + else 344 + blk_integrity_register(disk, &dif_type1_integrity_crc); 345 + 346 + sd_printk(KERN_INFO, sdkp, 347 + "Enabling %s integrity protection\n", disk->integrity->name); 348 + 349 + /* Signal to block layer that we support sector tagging */ 350 + if (type && sdkp->ATO) { 351 + if (type == SD_DIF_TYPE3_PROTECTION) 352 + disk->integrity->tag_size = sizeof(u16) + sizeof(u32); 353 + else 354 + disk->integrity->tag_size = sizeof(u16); 355 + 356 + sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n", 357 + disk->integrity->tag_size); 358 + } 359 + } 360 + 361 + /* 362 + * DIF DMA operation magic decoder ring. 363 + */ 364 + void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix) 365 + { 366 + int csum_convert, prot_op; 367 + 368 + prot_op = 0; 369 + 370 + /* Convert checksum? */ 371 + if (scsi_host_get_guard(scmd->device->host) != SHOST_DIX_GUARD_CRC) 372 + csum_convert = 1; 373 + else 374 + csum_convert = 0; 375 + 376 + switch (scmd->cmnd[0]) { 377 + case READ_10: 378 + case READ_12: 379 + case READ_16: 380 + if (dif && dix) 381 + if (csum_convert) 382 + prot_op = SCSI_PROT_READ_CONVERT; 383 + else 384 + prot_op = SCSI_PROT_READ_PASS; 385 + else if (dif && !dix) 386 + prot_op = SCSI_PROT_READ_STRIP; 387 + else if (!dif && dix) 388 + prot_op = SCSI_PROT_READ_INSERT; 389 + 390 + break; 391 + 392 + case WRITE_10: 393 + case WRITE_12: 394 + case WRITE_16: 395 + if (dif && dix) 396 + if (csum_convert) 397 + prot_op = SCSI_PROT_WRITE_CONVERT; 398 + else 399 + prot_op = SCSI_PROT_WRITE_PASS; 400 + else if (dif && !dix) 401 + prot_op = SCSI_PROT_WRITE_INSERT; 402 + else if (!dif && dix) 403 + prot_op = SCSI_PROT_WRITE_STRIP; 404 + 405 + break; 406 + } 407 + 408 + scsi_set_prot_op(scmd, prot_op); 409 + scsi_set_prot_type(scmd, dif); 410 + } 411 + 412 + /* 413 + * The virtual start sector is the one that was originally submitted 414 + * by the block layer. Due to partitioning, MD/DM cloning, etc. the 415 + * actual physical start sector is likely to be different. Remap 416 + * protection information to match the physical LBA. 417 + * 418 + * From a protocol perspective there's a slight difference between 419 + * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the 420 + * reference tag is seeded in the CDB. This gives us the potential to 421 + * avoid virt->phys remapping during write. However, at read time we 422 + * don't know whether the virt sector is the same as when we wrote it 423 + * (we could be reading from real disk as opposed to MD/DM device. So 424 + * we always remap Type 2 making it identical to Type 1. 425 + * 426 + * Type 3 does not have a reference tag so no remapping is required. 427 + */ 428 + int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz) 429 + { 430 + const int tuple_sz = sizeof(struct sd_dif_tuple); 431 + struct bio *bio; 432 + struct scsi_disk *sdkp; 433 + struct sd_dif_tuple *sdt; 434 + unsigned int i, j; 435 + u32 phys, virt; 436 + 437 + /* Already remapped? */ 438 + if (rq->cmd_flags & REQ_INTEGRITY) 439 + return 0; 440 + 441 + sdkp = rq->bio->bi_bdev->bd_disk->private_data; 442 + 443 + if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION) 444 + return 0; 445 + 446 + rq->cmd_flags |= REQ_INTEGRITY; 447 + phys = hw_sector & 0xffffffff; 448 + 449 + __rq_for_each_bio(bio, rq) { 450 + struct bio_vec *iv; 451 + 452 + virt = bio->bi_integrity->bip_sector & 0xffffffff; 453 + 454 + bip_for_each_vec(iv, bio->bi_integrity, i) { 455 + sdt = kmap_atomic(iv->bv_page, KM_USER0) 456 + + iv->bv_offset; 457 + 458 + for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 459 + 460 + if (be32_to_cpu(sdt->ref_tag) != virt) 461 + goto error; 462 + 463 + sdt->ref_tag = cpu_to_be32(phys); 464 + virt++; 465 + phys++; 466 + } 467 + 468 + kunmap_atomic(sdt, KM_USER0); 469 + } 470 + } 471 + 472 + return 0; 473 + 474 + error: 475 + kunmap_atomic(sdt, KM_USER0); 476 + sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u\n", 477 + __func__, virt, phys, be32_to_cpu(sdt->ref_tag)); 478 + 479 + return -EIO; 480 + } 481 + 482 + /* 483 + * Remap physical sector values in the reference tag to the virtual 484 + * values expected by the block layer. 485 + */ 486 + void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes) 487 + { 488 + const int tuple_sz = sizeof(struct sd_dif_tuple); 489 + struct scsi_disk *sdkp; 490 + struct bio *bio; 491 + struct sd_dif_tuple *sdt; 492 + unsigned int i, j, sectors, sector_sz; 493 + u32 phys, virt; 494 + 495 + sdkp = scsi_disk(scmd->request->rq_disk); 496 + 497 + if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0) 498 + return; 499 + 500 + sector_sz = scmd->device->sector_size; 501 + sectors = good_bytes / sector_sz; 502 + 503 + phys = scmd->request->sector & 0xffffffff; 504 + if (sector_sz == 4096) 505 + phys >>= 3; 506 + 507 + __rq_for_each_bio(bio, scmd->request) { 508 + struct bio_vec *iv; 509 + 510 + virt = bio->bi_integrity->bip_sector & 0xffffffff; 511 + 512 + bip_for_each_vec(iv, bio->bi_integrity, i) { 513 + sdt = kmap_atomic(iv->bv_page, KM_USER0) 514 + + iv->bv_offset; 515 + 516 + for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 517 + 518 + if (sectors == 0) { 519 + kunmap_atomic(sdt, KM_USER0); 520 + return; 521 + } 522 + 523 + if (be32_to_cpu(sdt->ref_tag) != phys && 524 + sdt->app_tag != 0xffff) 525 + sdt->ref_tag = 0xffffffff; /* Bad ref */ 526 + else 527 + sdt->ref_tag = cpu_to_be32(virt); 528 + 529 + virt++; 530 + phys++; 531 + sectors--; 532 + } 533 + 534 + kunmap_atomic(sdt, KM_USER0); 535 + } 536 + } 537 + } 538 +
+4 -7
drivers/scsi/st.c
··· 17 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 18 18 */ 19 19 20 - static const char *verstr = "20080224"; 20 + static const char *verstr = "20080504"; 21 21 22 22 #include <linux/module.h> 23 23 ··· 631 631 /* Flush the write buffer (never need to write if variable blocksize). */ 632 632 static int st_flush_write_buffer(struct scsi_tape * STp) 633 633 { 634 - int offset, transfer, blks; 634 + int transfer, blks; 635 635 int result; 636 636 unsigned char cmd[MAX_COMMAND_SIZE]; 637 637 struct st_request *SRpnt; ··· 644 644 result = 0; 645 645 if (STp->dirty == 1) { 646 646 647 - offset = (STp->buffer)->buffer_bytes; 648 - transfer = ((offset + STp->block_size - 1) / 649 - STp->block_size) * STp->block_size; 647 + transfer = STp->buffer->buffer_bytes; 650 648 DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n", 651 649 tape_name(STp), transfer)); 652 - 653 - memset((STp->buffer)->b_data + offset, 0, transfer - offset); 654 650 655 651 memset(cmd, 0, MAX_COMMAND_SIZE); 656 652 cmd[0] = WRITE_6; ··· 1666 1670 if (undone <= do_count) { 1667 1671 /* Only data from this write is not written */ 1668 1672 count += undone; 1673 + b_point -= undone; 1669 1674 do_count -= undone; 1670 1675 if (STp->block_size) 1671 1676 blks = (transfer - undone) / STp->block_size;
+1 -1
drivers/scsi/stex.c
··· 467 467 /* Cheat: usually extracted from Inquiry data */ 468 468 sdev->tagged_supported = 1; 469 469 470 - scsi_activate_tcq(sdev, sdev->host->can_queue); 470 + scsi_activate_tcq(sdev, ST_CMD_PER_LUN); 471 471 472 472 return 0; 473 473 }
+2
drivers/scsi/sym53c8xx_2/sym_hipd.c
··· 5741 5741 5742 5742 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { 5743 5743 tp = &np->target[target]; 5744 + if (tp->luntbl) 5745 + sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); 5744 5746 #if SYM_CONF_MAX_LUN > 1 5745 5747 kfree(tp->lunmp); 5746 5748 #endif
+4 -4
drivers/scsi/tmscsim.c
··· 452 452 /* TODO: error handling */ 453 453 if (pSRB->SGcount != 1) 454 454 error = 1; 455 - DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle)); 455 + DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle)); 456 456 /* Map SG list */ 457 457 } else if (scsi_sg_count(pcmd)) { 458 458 int nseg; ··· 466 466 if (nseg < 0) 467 467 error = 1; 468 468 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\ 469 - __FUNCTION__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd))); 469 + __func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd))); 470 470 /* Map single segment */ 471 471 } else 472 472 pSRB->SGcount = 0; ··· 483 483 484 484 if (pSRB->SRBFlag) { 485 485 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE); 486 - DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle)); 486 + DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle)); 487 487 } else { 488 488 scsi_dma_unmap(pcmd); 489 489 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n", 490 - __FUNCTION__, scsi_sglist(pcmd), scsi_sg_count(pcmd))); 490 + __func__, scsi_sglist(pcmd), scsi_sg_count(pcmd))); 491 491 } 492 492 } 493 493
+4 -4
drivers/scsi/wd7000.c
··· 148 148 * 149 149 * 2002/10/04 - Alan Cox <alan@redhat.com> 150 150 * 151 - * Use dev_id for interrupts, kill __FUNCTION__ pasting 151 + * Use dev_id for interrupts, kill __func__ pasting 152 152 * Add a lock for the scb pool, clean up all other cli/sti usage stuff 153 153 * Use the adapter lock for the other places we had the cli's 154 154 * ··· 640 640 (void) get_options(str, ARRAY_SIZE(ints), ints); 641 641 642 642 if (wd7000_card_num >= NUM_CONFIGS) { 643 - printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __FUNCTION__); 643 + printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __func__); 644 644 return 0; 645 645 } 646 646 647 647 if ((ints[0] < 3) || (ints[0] > 5)) { 648 - printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __FUNCTION__); 648 + printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __func__); 649 649 } else { 650 650 for (i = 0; i < NUM_IRQS; i++) 651 651 if (ints[1] == wd7000_irq[i]) ··· 1642 1642 ip[2] = info[2]; 1643 1643 1644 1644 if (info[0] == 255) 1645 - printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __FUNCTION__); 1645 + printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __func__); 1646 1646 } 1647 1647 } 1648 1648
+4 -4
drivers/scsi/zalon.c
··· 68 68 if (status == PDC_RET_OK) { 69 69 clock = (int) pdc_result[16]; 70 70 } else { 71 - printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __FUNCTION__, status); 71 + printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __func__, status); 72 72 clock = defaultclock; 73 73 } 74 74 75 - printk(KERN_DEBUG "%s: SCSI clock %d\n", __FUNCTION__, clock); 75 + printk(KERN_DEBUG "%s: SCSI clock %d\n", __func__, clock); 76 76 return clock; 77 77 } 78 78 #endif ··· 108 108 */ 109 109 dev->irq = gsc_alloc_irq(&gsc_irq); 110 110 111 - printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __FUNCTION__, 111 + printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __func__, 112 112 zalon_vers, dev->irq); 113 113 114 114 __raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM); 115 115 116 116 if (zalon_vers == 0) 117 - printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __FUNCTION__); 117 + printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __func__); 118 118 119 119 memset(&device, 0, sizeof(struct ncr_device)); 120 120
+3
include/scsi/scsi.h
··· 106 106 #define VARIABLE_LENGTH_CMD 0x7f 107 107 #define REPORT_LUNS 0xa0 108 108 #define MAINTENANCE_IN 0xa3 109 + #define MAINTENANCE_OUT 0xa4 109 110 #define MOVE_MEDIUM 0xa5 110 111 #define EXCHANGE_MEDIUM 0xa6 111 112 #define READ_12 0xa8 ··· 126 125 #define SAI_READ_CAPACITY_16 0x10 127 126 /* values for maintenance in */ 128 127 #define MI_REPORT_TARGET_PGS 0x0a 128 + /* values for maintenance out */ 129 + #define MO_SET_TARGET_PGS 0x0a 129 130 130 131 /* Values for T10/04-262r7 */ 131 132 #define ATA_16 0x85 /* 16-byte pass-thru */
+86
include/scsi/scsi_cmnd.h
··· 77 77 int allowed; 78 78 int timeout_per_command; 79 79 80 + unsigned char prot_op; 81 + unsigned char prot_type; 82 + 80 83 unsigned short cmd_len; 81 84 enum dma_data_direction sc_data_direction; 82 85 ··· 90 87 91 88 /* These elements define the operation we ultimately want to perform */ 92 89 struct scsi_data_buffer sdb; 90 + struct scsi_data_buffer *prot_sdb; 91 + 93 92 unsigned underflow; /* Return error if less than 94 93 this amount is transferred */ 95 94 ··· 212 207 return sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), 213 208 buf, buflen); 214 209 } 210 + 211 + /* 212 + * The operations below are hints that tell the controller driver how 213 + * to handle I/Os with DIF or similar types of protection information. 214 + */ 215 + enum scsi_prot_operations { 216 + /* Normal I/O */ 217 + SCSI_PROT_NORMAL = 0, 218 + 219 + /* OS-HBA: Protected, HBA-Target: Unprotected */ 220 + SCSI_PROT_READ_INSERT, 221 + SCSI_PROT_WRITE_STRIP, 222 + 223 + /* OS-HBA: Unprotected, HBA-Target: Protected */ 224 + SCSI_PROT_READ_STRIP, 225 + SCSI_PROT_WRITE_INSERT, 226 + 227 + /* OS-HBA: Protected, HBA-Target: Protected */ 228 + SCSI_PROT_READ_PASS, 229 + SCSI_PROT_WRITE_PASS, 230 + 231 + /* OS-HBA: Protected, HBA-Target: Protected, checksum conversion */ 232 + SCSI_PROT_READ_CONVERT, 233 + SCSI_PROT_WRITE_CONVERT, 234 + }; 235 + 236 + static inline void scsi_set_prot_op(struct scsi_cmnd *scmd, unsigned char op) 237 + { 238 + scmd->prot_op = op; 239 + } 240 + 241 + static inline unsigned char scsi_get_prot_op(struct scsi_cmnd *scmd) 242 + { 243 + return scmd->prot_op; 244 + } 245 + 246 + /* 247 + * The controller usually does not know anything about the target it 248 + * is communicating with. However, when DIX is enabled the controller 249 + * must be know target type so it can verify the protection 250 + * information passed along with the I/O. 251 + */ 252 + enum scsi_prot_target_type { 253 + SCSI_PROT_DIF_TYPE0 = 0, 254 + SCSI_PROT_DIF_TYPE1, 255 + SCSI_PROT_DIF_TYPE2, 256 + SCSI_PROT_DIF_TYPE3, 257 + }; 258 + 259 + static inline void scsi_set_prot_type(struct scsi_cmnd *scmd, unsigned char type) 260 + { 261 + scmd->prot_type = type; 262 + } 263 + 264 + static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd) 265 + { 266 + return scmd->prot_type; 267 + } 268 + 269 + static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) 270 + { 271 + return scmd->request->sector; 272 + } 273 + 274 + static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd) 275 + { 276 + return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0; 277 + } 278 + 279 + static inline struct scatterlist *scsi_prot_sglist(struct scsi_cmnd *cmd) 280 + { 281 + return cmd->prot_sdb ? cmd->prot_sdb->table.sgl : NULL; 282 + } 283 + 284 + static inline struct scsi_data_buffer *scsi_prot(struct scsi_cmnd *cmd) 285 + { 286 + return cmd->prot_sdb; 287 + } 288 + 289 + #define scsi_for_each_prot_sg(cmd, sg, nseg, __i) \ 290 + for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i) 215 291 216 292 #endif /* _SCSI_SCSI_CMND_H */
+15 -2
include/scsi/scsi_device.h
··· 140 140 unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */ 141 141 unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */ 142 142 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ 143 - unsigned last_sector_bug:1; /* Always read last sector in a 1 sector read */ 143 + unsigned last_sector_bug:1; /* do not use multisector accesses on 144 + SD_LAST_BUGGY_SECTORS */ 144 145 145 146 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ 146 147 struct list_head event_list; /* asserted events */ ··· 168 167 unsigned long sdev_data[0]; 169 168 } __attribute__((aligned(sizeof(unsigned long)))); 170 169 170 + struct scsi_dh_devlist { 171 + char *vendor; 172 + char *model; 173 + }; 174 + 171 175 struct scsi_device_handler { 172 176 /* Used by the infrastructure */ 173 177 struct list_head list; /* list of scsi_device_handlers */ 174 - struct notifier_block nb; 175 178 176 179 /* Filled by the hardware handler */ 177 180 struct module *module; 178 181 const char *name; 182 + const struct scsi_dh_devlist *devlist; 179 183 int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); 184 + int (*attach)(struct scsi_device *); 185 + void (*detach)(struct scsi_device *); 180 186 int (*activate)(struct scsi_device *); 181 187 int (*prep_fn)(struct scsi_device *, struct request *); 182 188 }; ··· 422 414 static inline int scsi_device_enclosure(struct scsi_device *sdev) 423 415 { 424 416 return sdev->inquiry[6] & (1<<6); 417 + } 418 + 419 + static inline int scsi_device_protection(struct scsi_device *sdev) 420 + { 421 + return sdev->inquiry[5] & (1<<0); 425 422 } 426 423 427 424 #define MODULE_ALIAS_SCSI_DEVICE(type) \
+11
include/scsi/scsi_dh.h
··· 32 32 */ 33 33 SCSI_DH_DEV_FAILED, /* generic device error */ 34 34 SCSI_DH_DEV_TEMP_BUSY, 35 + SCSI_DH_DEV_UNSUPP, /* device handler not supported */ 35 36 SCSI_DH_DEVICE_MAX, /* max device blkerr definition */ 36 37 37 38 /* ··· 58 57 #if defined(CONFIG_SCSI_DH) || defined(CONFIG_SCSI_DH_MODULE) 59 58 extern int scsi_dh_activate(struct request_queue *); 60 59 extern int scsi_dh_handler_exist(const char *); 60 + extern int scsi_dh_attach(struct request_queue *, const char *); 61 + extern void scsi_dh_detach(struct request_queue *); 61 62 #else 62 63 static inline int scsi_dh_activate(struct request_queue *req) 63 64 { ··· 68 65 static inline int scsi_dh_handler_exist(const char *name) 69 66 { 70 67 return 0; 68 + } 69 + static inline int scsi_dh_attach(struct request_queue *req, const char *name) 70 + { 71 + return SCSI_DH_NOSYS; 72 + } 73 + static inline void scsi_dh_detach(struct request_queue *q) 74 + { 75 + return; 71 76 } 72 77 #endif
+2
include/scsi/scsi_eh.h
··· 74 74 /* saved state */ 75 75 int result; 76 76 enum dma_data_direction data_direction; 77 + unsigned underflow; 77 78 unsigned char cmd_len; 79 + unsigned char prot_op; 78 80 unsigned char *cmnd; 79 81 struct scsi_data_buffer sdb; 80 82 struct request *next_rq;
+85 -1
include/scsi/scsi_host.h
··· 547 547 unsigned int host_failed; /* commands that failed. */ 548 548 unsigned int host_eh_scheduled; /* EH scheduled without command */ 549 549 550 - unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ 550 + unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ 551 551 int resetting; /* if set, it means that last_reset is a valid value */ 552 552 unsigned long last_reset; 553 553 ··· 635 635 * Value host_blocked counts down from 636 636 */ 637 637 unsigned int max_host_blocked; 638 + 639 + /* Protection Information */ 640 + unsigned int prot_capabilities; 641 + unsigned char prot_guard_type; 638 642 639 643 /* 640 644 * q used for scsi_tgt msgs, async events or any other requests that ··· 759 755 */ 760 756 extern void scsi_free_host_dev(struct scsi_device *); 761 757 extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *); 758 + 759 + /* 760 + * DIF defines the exchange of protection information between 761 + * initiator and SBC block device. 762 + * 763 + * DIX defines the exchange of protection information between OS and 764 + * initiator. 765 + */ 766 + enum scsi_host_prot_capabilities { 767 + SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */ 768 + SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */ 769 + SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */ 770 + 771 + SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */ 772 + SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */ 773 + SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */ 774 + SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */ 775 + }; 776 + 777 + /* 778 + * SCSI hosts which support the Data Integrity Extensions must 779 + * indicate their capabilities by setting the prot_capabilities using 780 + * this call. 781 + */ 782 + static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask) 783 + { 784 + shost->prot_capabilities = mask; 785 + } 786 + 787 + static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost) 788 + { 789 + return shost->prot_capabilities; 790 + } 791 + 792 + static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) 793 + { 794 + switch (target_type) { 795 + case 1: return shost->prot_capabilities & SHOST_DIF_TYPE1_PROTECTION; 796 + case 2: return shost->prot_capabilities & SHOST_DIF_TYPE2_PROTECTION; 797 + case 3: return shost->prot_capabilities & SHOST_DIF_TYPE3_PROTECTION; 798 + } 799 + 800 + return 0; 801 + } 802 + 803 + static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type) 804 + { 805 + switch (target_type) { 806 + case 0: return shost->prot_capabilities & SHOST_DIX_TYPE0_PROTECTION; 807 + case 1: return shost->prot_capabilities & SHOST_DIX_TYPE1_PROTECTION; 808 + case 2: return shost->prot_capabilities & SHOST_DIX_TYPE2_PROTECTION; 809 + case 3: return shost->prot_capabilities & SHOST_DIX_TYPE3_PROTECTION; 810 + } 811 + 812 + return 0; 813 + } 814 + 815 + /* 816 + * All DIX-capable initiators must support the T10-mandated CRC 817 + * checksum. Controllers can optionally implement the IP checksum 818 + * scheme which has much lower impact on system performance. Note 819 + * that the main rationale for the checksum is to match integrity 820 + * metadata with data. Detecting bit errors are a job for ECC memory 821 + * and buses. 822 + */ 823 + 824 + enum scsi_host_guard_type { 825 + SHOST_DIX_GUARD_CRC = 1 << 0, 826 + SHOST_DIX_GUARD_IP = 1 << 1, 827 + }; 828 + 829 + static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type) 830 + { 831 + shost->prot_guard_type = type; 832 + } 833 + 834 + static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost) 835 + { 836 + return shost->prot_guard_type; 837 + } 762 838 763 839 /* legacy interfaces */ 764 840 extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);