[PATCH] USB: Support multiply-LUN devices in ub

Signed-off-by: Pete Zaitcev <zaitcev@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

diff -urp -X dontdiff linux-2.6.12-rc3/drivers/block/ub.c linux-2.6.12-rc3-lem/drivers/block/ub.c

authored by Pete Zaitcev and committed by Greg KH f4800078 aa447acb

+364 -236
+364 -236
drivers/block/ub.c
··· 8 * and is not licensed separately. See file COPYING for details. 9 * 10 * TODO (sorted by decreasing priority) 11 * -- Do resets with usb_device_reset (needs a thread context, use khubd) 12 * -- set readonly flag for CDs, set removable flag for CF readers 13 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) 14 - * -- support pphaneuf's SDDR-75 with two LUNs (also broken capacity...) 15 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries 16 * -- verify the 13 conditions and do bulk resets 17 - * -- normal pool of commands instead of cmdv[]? 18 * -- kill last_pipe and simply do two-state clearing on both pipes 19 * -- verify protocol (bulk) from USB descriptors (maybe...) 20 * -- highmem and sg ··· 48 #define US_SC_SCSI 0x06 /* Transparent */ 49 50 /* 51 */ 52 #define UB_MINORS_PER_MAJOR 8 53 54 #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ ··· 71 u32 Tag; /* unique per command id */ 72 __le32 DataTransferLength; /* size of data */ 73 u8 Flags; /* direction in bit 0 */ 74 - u8 Lun; /* LUN normally 0 */ 75 u8 Length; /* of of the CDB */ 76 u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ 77 }; ··· 174 unsigned int len; /* Requested length */ 175 // struct scatterlist sgv[UB_MAX_REQ_SG]; 176 177 void (*done)(struct ub_dev *, struct ub_scsi_cmd *); 178 void *back; 179 }; ··· 259 }; 260 261 /* 262 - * The UB device instance. 263 */ 264 - struct ub_dev { 265 - spinlock_t lock; 266 - int id; /* Number among ub's */ 267 - atomic_t poison; /* The USB device is disconnected */ 268 - int openc; /* protected by ub_lock! */ 269 - /* kref is too implicit for our taste */ 270 - unsigned int tagcnt; 271 int changed; /* Media was changed */ 272 int removable; 273 int readonly; 274 int first_open; /* Kludge. See ub_bd_open. */ 275 - char name[8]; 276 struct usb_device *dev; 277 struct usb_interface *intf; 278 279 - struct ub_capacity capacity; 280 - struct gendisk *disk; 281 282 unsigned int send_bulk_pipe; /* cached pipe values */ 283 unsigned int recv_bulk_pipe; ··· 307 unsigned int recv_ctrl_pipe; 308 309 struct tasklet_struct tasklet; 310 - 311 - /* XXX Use Ingo's mempool (once we have more than one) */ 312 - int cmda[1]; 313 - struct ub_scsi_cmd cmdv[1]; 314 315 struct ub_scsi_cmd_queue cmd_queue; 316 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ ··· 326 /* 327 */ 328 static void ub_cleanup(struct ub_dev *sc); 329 - static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq); 330 - static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 331 - struct request *rq); 332 static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 333 struct request *rq); 334 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); ··· 345 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 346 int stalled_pipe); 347 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); 348 - static int ub_sync_tur(struct ub_dev *sc); 349 - static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret); 350 351 /* 352 */ ··· 369 */ 370 #define UB_MAX_HOSTS 26 371 static char ub_hostv[UB_MAX_HOSTS]; 372 static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ 373 374 /* ··· 434 { 435 struct usb_interface *intf; 436 struct ub_dev *sc; 437 int cnt; 438 unsigned long flags; 439 int nc, nh; ··· 451 spin_lock_irqsave(&sc->lock, flags); 452 453 cnt += sprintf(page + cnt, 454 - "qlen %d qmax %d changed %d removable %d readonly %d\n", 455 - sc->cmd_queue.qlen, sc->cmd_queue.qmax, 456 - sc->changed, sc->removable, sc->readonly); 457 458 if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0; 459 for (j = 0; j < SCMD_TRACE_SZ; j++) { ··· 559 */ 560 static void ub_cleanup(struct ub_dev *sc) 561 { 562 request_queue_t *q; 563 564 - /* I don't think queue can be NULL. But... Stolen from sx8.c */ 565 - if ((q = sc->disk->queue) != NULL) 566 - blk_cleanup_queue(q); 567 568 - /* 569 - * If we zero disk->private_data BEFORE put_disk, we have to check 570 - * for NULL all over the place in open, release, check_media and 571 - * revalidate, because the block level semaphore is well inside the 572 - * put_disk. But we cannot zero after the call, because *disk is gone. 573 - * The sd.c is blatantly racy in this area. 574 - */ 575 - /* disk->private_data = NULL; */ 576 - put_disk(sc->disk); 577 - sc->disk = NULL; 578 579 - ub_id_put(sc->id); 580 kfree(sc); 581 } 582 583 /* 584 * The "command allocator". 585 */ 586 - static struct ub_scsi_cmd *ub_get_cmd(struct ub_dev *sc) 587 { 588 struct ub_scsi_cmd *ret; 589 590 - if (sc->cmda[0]) 591 return NULL; 592 - ret = &sc->cmdv[0]; 593 - sc->cmda[0] = 1; 594 return ret; 595 } 596 597 - static void ub_put_cmd(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 598 { 599 - if (cmd != &sc->cmdv[0]) { 600 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", 601 - sc->name, cmd); 602 return; 603 } 604 - if (!sc->cmda[0]) { 605 - printk(KERN_WARNING "%s: releasing a free cmd\n", sc->name); 606 return; 607 } 608 - sc->cmda[0] = 0; 609 } 610 611 /* ··· 676 677 static void ub_bd_rq_fn(request_queue_t *q) 678 { 679 - struct ub_dev *sc = q->queuedata; 680 struct request *rq; 681 682 while ((rq = elv_next_request(q)) != NULL) { 683 - if (ub_bd_rq_fn_1(sc, rq) != 0) { 684 blk_stop_queue(q); 685 break; 686 } 687 } 688 } 689 690 - static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq) 691 { 692 struct ub_scsi_cmd *cmd; 693 int rc; 694 695 - if (atomic_read(&sc->poison) || sc->changed) { 696 blkdev_dequeue_request(rq); 697 ub_end_rq(rq, 0); 698 return 0; 699 } 700 701 - if ((cmd = ub_get_cmd(sc)) == NULL) 702 return -1; 703 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 704 ··· 708 if (blk_pc_request(rq)) { 709 rc = ub_cmd_build_packet(sc, cmd, rq); 710 } else { 711 - rc = ub_cmd_build_block(sc, cmd, rq); 712 } 713 if (rc != 0) { 714 - ub_put_cmd(sc, cmd); 715 ub_end_rq(rq, 0); 716 - blk_start_queue(sc->disk->queue); 717 return 0; 718 } 719 - 720 cmd->state = UB_CMDST_INIT; 721 cmd->done = ub_rw_cmd_done; 722 cmd->back = rq; 723 724 cmd->tag = sc->tagcnt++; 725 if ((rc = ub_submit_scsi(sc, cmd)) != 0) { 726 - ub_put_cmd(sc, cmd); 727 ub_end_rq(rq, 0); 728 - blk_start_queue(sc->disk->queue); 729 return 0; 730 } 731 732 return 0; 733 } 734 735 - static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 736 - struct request *rq) 737 { 738 int ub_dir; 739 #if 0 /* We use rq->buffer for now */ ··· 752 sg = &cmd->sgv[0]; 753 n_elem = blk_rq_map_sg(q, rq, sg); 754 if (n_elem <= 0) { 755 - ub_put_cmd(sc, cmd); 756 ub_end_rq(rq, 0); 757 blk_start_queue(q); 758 return 0; /* request with no s/g entries? */ ··· 761 if (n_elem != 1) { /* Paranoia */ 762 printk(KERN_WARNING "%s: request with %d segments\n", 763 sc->name, n_elem); 764 - ub_put_cmd(sc, cmd); 765 ub_end_rq(rq, 0); 766 blk_start_queue(q); 767 return 0; ··· 793 * The call to blk_queue_hardsect_size() guarantees that request 794 * is aligned, but it is given in terms of 512 byte units, always. 795 */ 796 - block = rq->sector >> sc->capacity.bshift; 797 - nblks = rq->nr_sectors >> sc->capacity.bshift; 798 799 cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; 800 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ ··· 848 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 849 { 850 struct request *rq = cmd->back; 851 - struct gendisk *disk = sc->disk; 852 request_queue_t *q = disk->queue; 853 int uptodate; 854 ··· 864 else 865 uptodate = 0; 866 867 - ub_put_cmd(sc, cmd); 868 ub_end_rq(rq, uptodate); 869 blk_start_queue(q); 870 } ··· 933 bcb->Tag = cmd->tag; /* Endianness is not important */ 934 bcb->DataTransferLength = cpu_to_le32(cmd->len); 935 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; 936 - bcb->Lun = 0; /* No multi-LUN yet */ 937 bcb->Length = cmd->cdb_len; 938 939 /* copy the command payload */ ··· 1048 * The control pipe clears itself - nothing to do. 1049 * XXX Might try to reset the device here and retry. 1050 */ 1051 - printk(KERN_NOTICE "%s: " 1052 - "stall on control pipe for device %u\n", 1053 - sc->name, sc->dev->devnum); 1054 goto Bad_End; 1055 } 1056 ··· 1070 * The control pipe clears itself - nothing to do. 1071 * XXX Might try to reset the device here and retry. 1072 */ 1073 - printk(KERN_NOTICE "%s: " 1074 - "stall on control pipe for device %u\n", 1075 - sc->name, sc->dev->devnum); 1076 goto Bad_End; 1077 } 1078 ··· 1090 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1091 if (rc != 0) { 1092 printk(KERN_NOTICE "%s: " 1093 - "unable to submit clear for device %u" 1094 - " (code %d)\n", 1095 - sc->name, sc->dev->devnum, rc); 1096 /* 1097 * This is typically ENOMEM or some other such shit. 1098 * Retrying is pointless. Just do Bad End on it... ··· 1150 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1151 if (rc != 0) { 1152 printk(KERN_NOTICE "%s: " 1153 - "unable to submit clear for device %u" 1154 - " (code %d)\n", 1155 - sc->name, sc->dev->devnum, rc); 1156 /* 1157 * This is typically ENOMEM or some other such shit. 1158 * Retrying is pointless. Just do Bad End on it... ··· 1182 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1183 if (rc != 0) { 1184 printk(KERN_NOTICE "%s: " 1185 - "unable to submit clear for device %u" 1186 - " (code %d)\n", 1187 - sc->name, sc->dev->devnum, rc); 1188 /* 1189 * This is typically ENOMEM or some other such shit. 1190 * Retrying is pointless. Just do Bad End on it... ··· 1205 * encounter such a thing, try to read the CSW again. 1206 */ 1207 if (++cmd->stat_count >= 4) { 1208 - printk(KERN_NOTICE "%s: " 1209 - "unable to get CSW on device %u\n", 1210 - sc->name, sc->dev->devnum); 1211 goto Bad_End; 1212 } 1213 __ub_state_stat(sc, cmd); ··· 1247 */ 1248 if (++cmd->stat_count >= 4) { 1249 printk(KERN_NOTICE "%s: " 1250 - "tag mismatch orig 0x%x reply 0x%x " 1251 - "on device %u\n", 1252 - sc->name, cmd->tag, bcs->Tag, 1253 - sc->dev->devnum); 1254 goto Bad_End; 1255 } 1256 __ub_state_stat(sc, cmd); ··· 1282 1283 } else { 1284 printk(KERN_WARNING "%s: " 1285 - "wrong command state %d on device %u\n", 1286 - sc->name, cmd->state, sc->dev->devnum); 1287 goto Bad_End; 1288 } 1289 return; ··· 1326 1327 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1328 /* XXX Clear stalls */ 1329 - printk("%s: CSW #%d submit failed (%d)\n", sc->name, cmd->tag, rc); /* P3 */ 1330 ub_complete(&sc->work_done); 1331 ub_state_done(sc, cmd, rc); 1332 return; ··· 1370 scmd->state = UB_CMDST_INIT; 1371 scmd->data = sc->top_sense; 1372 scmd->len = UB_SENSE_SIZE; 1373 scmd->done = ub_top_sense_done; 1374 scmd->back = cmd; 1375 ··· 1449 } 1450 if (cmd != scmd->back) { 1451 printk(KERN_WARNING "%s: " 1452 - "sense done for wrong command 0x%x on device %u\n", 1453 - sc->name, cmd->tag, sc->dev->devnum); 1454 return; 1455 } 1456 if (cmd->state != UB_CMDST_SENSE) { 1457 printk(KERN_WARNING "%s: " 1458 - "sense done with bad cmd state %d on device %u\n", 1459 - sc->name, cmd->state, sc->dev->devnum); 1460 return; 1461 } 1462 ··· 1467 ub_scsi_urb_compl(sc, cmd); 1468 } 1469 1470 - #if 0 1471 - /* Determine what the maximum LUN supported is */ 1472 - int usb_stor_Bulk_max_lun(struct us_data *us) 1473 - { 1474 - int result; 1475 - 1476 - /* issue the command */ 1477 - result = usb_stor_control_msg(us, us->recv_ctrl_pipe, 1478 - US_BULK_GET_MAX_LUN, 1479 - USB_DIR_IN | USB_TYPE_CLASS | 1480 - USB_RECIP_INTERFACE, 1481 - 0, us->ifnum, us->iobuf, 1, HZ); 1482 - 1483 - /* 1484 - * Some devices (i.e. Iomega Zip100) need this -- apparently 1485 - * the bulk pipes get STALLed when the GetMaxLUN request is 1486 - * processed. This is, in theory, harmless to all other devices 1487 - * (regardless of if they stall or not). 1488 - */ 1489 - if (result < 0) { 1490 - usb_stor_clear_halt(us, us->recv_bulk_pipe); 1491 - usb_stor_clear_halt(us, us->send_bulk_pipe); 1492 - } 1493 - 1494 - US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", 1495 - result, us->iobuf[0]); 1496 - 1497 - /* if we have a successful request, return the result */ 1498 - if (result == 1) 1499 - return us->iobuf[0]; 1500 - 1501 - /* return the default -- no LUNs */ 1502 - return 0; 1503 - } 1504 - #endif 1505 - 1506 /* 1507 * This is called from a process context. 1508 */ 1509 - static void ub_revalidate(struct ub_dev *sc) 1510 { 1511 1512 - sc->readonly = 0; /* XXX Query this from the device */ 1513 1514 - sc->capacity.nsec = 0; 1515 - sc->capacity.bsize = 512; 1516 - sc->capacity.bshift = 0; 1517 1518 - if (ub_sync_tur(sc) != 0) 1519 return; /* Not ready */ 1520 - sc->changed = 0; 1521 1522 - if (ub_sync_read_cap(sc, &sc->capacity) != 0) { 1523 /* 1524 * The retry here means something is wrong, either with the 1525 * device, with the transport, or with our code. 1526 * We keep this because sd.c has retries for capacity. 1527 */ 1528 - if (ub_sync_read_cap(sc, &sc->capacity) != 0) { 1529 - sc->capacity.nsec = 0; 1530 - sc->capacity.bsize = 512; 1531 - sc->capacity.bshift = 0; 1532 } 1533 } 1534 } ··· 1505 static int ub_bd_open(struct inode *inode, struct file *filp) 1506 { 1507 struct gendisk *disk = inode->i_bdev->bd_disk; 1508 struct ub_dev *sc; 1509 unsigned long flags; 1510 int rc; 1511 1512 - if ((sc = disk->private_data) == NULL) 1513 return -ENXIO; 1514 spin_lock_irqsave(&ub_lock, flags); 1515 if (atomic_read(&sc->poison)) { 1516 spin_unlock_irqrestore(&ub_lock, flags); ··· 1534 * The bottom line is, Al Viro says that we should not allow 1535 * bdev->bd_invalidated to be set when doing add_disk no matter what. 1536 */ 1537 - if (sc->first_open) { 1538 - if (sc->changed) { 1539 - sc->first_open = 0; 1540 rc = -ENOMEDIUM; 1541 goto err_open; 1542 } 1543 } 1544 1545 - if (sc->removable || sc->readonly) 1546 check_disk_change(inode->i_bdev); 1547 1548 /* ··· 1550 * under some pretty murky conditions (a failure of READ CAPACITY). 1551 * We may need it one day. 1552 */ 1553 - if (sc->removable && sc->changed && !(filp->f_flags & O_NDELAY)) { 1554 rc = -ENOMEDIUM; 1555 goto err_open; 1556 } 1557 1558 - if (sc->readonly && (filp->f_mode & FMODE_WRITE)) { 1559 rc = -EROFS; 1560 goto err_open; 1561 } ··· 1572 static int ub_bd_release(struct inode *inode, struct file *filp) 1573 { 1574 struct gendisk *disk = inode->i_bdev->bd_disk; 1575 - struct ub_dev *sc = disk->private_data; 1576 1577 ub_put(sc); 1578 return 0; ··· 1603 */ 1604 static int ub_bd_revalidate(struct gendisk *disk) 1605 { 1606 - struct ub_dev *sc = disk->private_data; 1607 1608 - ub_revalidate(sc); 1609 - /* This is pretty much a long term P3 */ 1610 - if (!atomic_read(&sc->poison)) { /* Cover sc->dev */ 1611 - printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n", 1612 - sc->name, sc->dev->devnum, 1613 - sc->capacity.nsec, sc->capacity.bsize); 1614 - } 1615 1616 /* XXX Support sector size switching like in sr.c */ 1617 - blk_queue_hardsect_size(disk->queue, sc->capacity.bsize); 1618 - set_capacity(disk, sc->capacity.nsec); 1619 - // set_disk_ro(sdkp->disk, sc->readonly); 1620 1621 return 0; 1622 } ··· 1626 */ 1627 static int ub_bd_media_changed(struct gendisk *disk) 1628 { 1629 - struct ub_dev *sc = disk->private_data; 1630 1631 - if (!sc->removable) 1632 return 0; 1633 1634 /* ··· 1640 * will fail, then block layer discards the data. Since we never 1641 * spin drives up, such devices simply cannot be used with ub anyway. 1642 */ 1643 - if (ub_sync_tur(sc) != 0) { 1644 - sc->changed = 1; 1645 return 1; 1646 } 1647 1648 - return sc->changed; 1649 } 1650 1651 static struct block_device_operations ub_bd_fops = { ··· 1669 /* 1670 * Test if the device has a check condition on it, synchronously. 1671 */ 1672 - static int ub_sync_tur(struct ub_dev *sc) 1673 { 1674 struct ub_scsi_cmd *cmd; 1675 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; ··· 1688 cmd->cdb_len = 6; 1689 cmd->dir = UB_DIR_NONE; 1690 cmd->state = UB_CMDST_INIT; 1691 cmd->done = ub_probe_done; 1692 cmd->back = &compl; 1693 ··· 1719 /* 1720 * Read the SCSI capacity synchronously (for probing). 1721 */ 1722 - static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret) 1723 { 1724 struct ub_scsi_cmd *cmd; 1725 char *p; ··· 1745 cmd->state = UB_CMDST_INIT; 1746 cmd->data = p; 1747 cmd->len = 8; 1748 cmd->done = ub_probe_done; 1749 cmd->back = &compl; 1750 ··· 1812 { 1813 struct completion *cop = (struct completion *) arg; 1814 complete(cop); 1815 } 1816 1817 /* ··· 1984 } 1985 1986 if (ep_in == NULL || ep_out == NULL) { 1987 - printk(KERN_NOTICE "%s: device %u failed endpoint check\n", 1988 - sc->name, sc->dev->devnum); 1989 return -EIO; 1990 } 1991 ··· 2008 const struct usb_device_id *dev_id) 2009 { 2010 struct ub_dev *sc; 2011 - request_queue_t *q; 2012 - struct gendisk *disk; 2013 int rc; 2014 int i; 2015 ··· 2017 goto err_core; 2018 memset(sc, 0, sizeof(struct ub_dev)); 2019 spin_lock_init(&sc->lock); 2020 usb_init_urb(&sc->work_urb); 2021 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 2022 atomic_set(&sc->poison, 0); ··· 2029 ub_init_completion(&sc->work_done); 2030 sc->work_done.done = 1; /* A little yuk, but oh well... */ 2031 2032 - rc = -ENOSR; 2033 - if ((sc->id = ub_id_get()) == -1) 2034 - goto err_id; 2035 - snprintf(sc->name, 8, DRV_NAME "%c", sc->id + 'a'); 2036 - 2037 sc->dev = interface_to_usbdev(intf); 2038 sc->intf = intf; 2039 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; 2040 - 2041 usb_set_intfdata(intf, sc); 2042 usb_get_dev(sc->dev); 2043 // usb_get_intf(sc->intf); /* Do we need this? */ 2044 2045 /* XXX Verify that we can handle the device (from descriptors) */ 2046 ··· 2076 * In any case it's not our business how revaliadation is implemented. 2077 */ 2078 for (i = 0; i < 3; i++) { /* Retries for benh's key */ 2079 - if ((rc = ub_sync_tur(sc)) <= 0) break; 2080 if (rc != 0x6) break; 2081 msleep(10); 2082 } 2083 2084 - sc->removable = 1; /* XXX Query this from the device */ 2085 - sc->changed = 1; /* ub_revalidate clears only */ 2086 - sc->first_open = 1; 2087 2088 - ub_revalidate(sc); 2089 - /* This is pretty much a long term P3 */ 2090 - printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n", 2091 - sc->name, sc->dev->devnum, sc->capacity.nsec, sc->capacity.bsize); 2092 2093 - /* 2094 - * Just one disk per sc currently, but maybe more. 2095 - */ 2096 rc = -ENOMEM; 2097 if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL) 2098 goto err_diskalloc; 2099 2100 - sc->disk = disk; 2101 - sprintf(disk->disk_name, DRV_NAME "%c", sc->id + 'a'); 2102 - sprintf(disk->devfs_name, DEVFS_NAME "/%c", sc->id + 'a'); 2103 disk->major = UB_MAJOR; 2104 - disk->first_minor = sc->id * UB_MINORS_PER_MAJOR; 2105 disk->fops = &ub_bd_fops; 2106 - disk->private_data = sc; 2107 - disk->driverfs_dev = &intf->dev; 2108 2109 rc = -ENOMEM; 2110 if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL) ··· 2165 2166 disk->queue = q; 2167 2168 - // blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); 2169 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); 2170 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2171 - // blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); 2172 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2173 - blk_queue_hardsect_size(q, sc->capacity.bsize); 2174 2175 - /* 2176 - * This is a serious infraction, caused by a deficiency in the 2177 - * USB sg interface (usb_sg_wait()). We plan to remove this once 2178 - * we get mileage on the driver and can justify a change to USB API. 2179 - * See blk_queue_bounce_limit() to understand this part. 2180 - * 2181 - * XXX And I still need to be aware of the DMA mask in the HC. 2182 - */ 2183 - q->bounce_pfn = blk_max_low_pfn; 2184 - q->bounce_gfp = GFP_NOIO; 2185 2186 - q->queuedata = sc; 2187 - 2188 - set_capacity(disk, sc->capacity.nsec); 2189 - if (sc->removable) 2190 disk->flags |= GENHD_FL_REMOVABLE; 2191 2192 add_disk(disk); ··· 2185 err_blkqinit: 2186 put_disk(disk); 2187 err_diskalloc: 2188 - device_remove_file(&sc->intf->dev, &dev_attr_diag); 2189 - err_diag: 2190 - usb_set_intfdata(intf, NULL); 2191 - // usb_put_intf(sc->intf); 2192 - usb_put_dev(sc->dev); 2193 - ub_id_put(sc->id); 2194 err_id: 2195 - kfree(sc); 2196 - err_core: 2197 return rc; 2198 } 2199 2200 static void ub_disconnect(struct usb_interface *intf) 2201 { 2202 struct ub_dev *sc = usb_get_intfdata(intf); 2203 - struct gendisk *disk = sc->disk; 2204 unsigned long flags; 2205 2206 /* ··· 2248 /* 2249 * Unregister the upper layer. 2250 */ 2251 - if (disk->flags & GENHD_FL_UP) 2252 - del_gendisk(disk); 2253 - /* 2254 - * I wish I could do: 2255 - * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 2256 - * As it is, we rely on our internal poisoning and let 2257 - * the upper levels to spin furiously failing all the I/O. 2258 - */ 2259 2260 /* 2261 * Taking a lock on a structure which is about to be freed ··· 2310 { 2311 int rc; 2312 2313 - /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu\n", 2314 - sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev)); 2315 2316 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) 2317 goto err_regblkdev;
··· 8 * and is not licensed separately. See file COPYING for details. 9 * 10 * TODO (sorted by decreasing priority) 11 + * -- Kill first_open (Al Viro fixed the block layer now) 12 * -- Do resets with usb_device_reset (needs a thread context, use khubd) 13 * -- set readonly flag for CDs, set removable flag for CF readers 14 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) 15 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries 16 * -- verify the 13 conditions and do bulk resets 17 * -- kill last_pipe and simply do two-state clearing on both pipes 18 * -- verify protocol (bulk) from USB descriptors (maybe...) 19 * -- highmem and sg ··· 49 #define US_SC_SCSI 0x06 /* Transparent */ 50 51 /* 52 + * This many LUNs per USB device. 53 + * Every one of them takes a host, see UB_MAX_HOSTS. 54 */ 55 + #define UB_MAX_LUNS 4 56 + 57 + /* 58 + */ 59 + 60 #define UB_MINORS_PER_MAJOR 8 61 62 #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ ··· 65 u32 Tag; /* unique per command id */ 66 __le32 DataTransferLength; /* size of data */ 67 u8 Flags; /* direction in bit 0 */ 68 + u8 Lun; /* LUN */ 69 u8 Length; /* of of the CDB */ 70 u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ 71 }; ··· 168 unsigned int len; /* Requested length */ 169 // struct scatterlist sgv[UB_MAX_REQ_SG]; 170 171 + struct ub_lun *lun; 172 void (*done)(struct ub_dev *, struct ub_scsi_cmd *); 173 void *back; 174 }; ··· 252 }; 253 254 /* 255 + * The block device instance (one per LUN). 256 */ 257 + struct ub_lun { 258 + struct ub_dev *udev; 259 + struct list_head link; 260 + struct gendisk *disk; 261 + int id; /* Host index */ 262 + int num; /* LUN number */ 263 + char name[16]; 264 + 265 int changed; /* Media was changed */ 266 int removable; 267 int readonly; 268 int first_open; /* Kludge. See ub_bd_open. */ 269 + 270 + /* Use Ingo's mempool if or when we have more than one command. */ 271 + /* 272 + * Currently we never need more than one command for the whole device. 273 + * However, giving every LUN a command is a cheap and automatic way 274 + * to enforce fairness between them. 275 + */ 276 + int cmda[1]; 277 + struct ub_scsi_cmd cmdv[1]; 278 + 279 + struct ub_capacity capacity; 280 + }; 281 + 282 + /* 283 + * The USB device instance. 284 + */ 285 + struct ub_dev { 286 + spinlock_t lock; 287 + atomic_t poison; /* The USB device is disconnected */ 288 + int openc; /* protected by ub_lock! */ 289 + /* kref is too implicit for our taste */ 290 + unsigned int tagcnt; 291 + char name[12]; 292 struct usb_device *dev; 293 struct usb_interface *intf; 294 295 + struct list_head luns; 296 297 unsigned int send_bulk_pipe; /* cached pipe values */ 298 unsigned int recv_bulk_pipe; ··· 278 unsigned int recv_ctrl_pipe; 279 280 struct tasklet_struct tasklet; 281 282 struct ub_scsi_cmd_queue cmd_queue; 283 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ ··· 301 /* 302 */ 303 static void ub_cleanup(struct ub_dev *sc); 304 + static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq); 305 + static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 306 + struct ub_scsi_cmd *cmd, struct request *rq); 307 static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 308 struct request *rq); 309 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); ··· 320 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 321 int stalled_pipe); 322 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); 323 + static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); 324 + static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 325 + struct ub_capacity *ret); 326 + static int ub_probe_lun(struct ub_dev *sc, int lnum); 327 328 /* 329 */ ··· 342 */ 343 #define UB_MAX_HOSTS 26 344 static char ub_hostv[UB_MAX_HOSTS]; 345 + 346 static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ 347 348 /* ··· 406 { 407 struct usb_interface *intf; 408 struct ub_dev *sc; 409 + struct list_head *p; 410 + struct ub_lun *lun; 411 int cnt; 412 unsigned long flags; 413 int nc, nh; ··· 421 spin_lock_irqsave(&sc->lock, flags); 422 423 cnt += sprintf(page + cnt, 424 + "qlen %d qmax %d\n", 425 + sc->cmd_queue.qlen, sc->cmd_queue.qmax); 426 + 427 + list_for_each (p, &sc->luns) { 428 + lun = list_entry(p, struct ub_lun, link); 429 + cnt += sprintf(page + cnt, 430 + "lun %u changed %d removable %d readonly %d\n", 431 + lun->num, lun->changed, lun->removable, lun->readonly); 432 + } 433 434 if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0; 435 for (j = 0; j < SCMD_TRACE_SZ; j++) { ··· 523 */ 524 static void ub_cleanup(struct ub_dev *sc) 525 { 526 + struct list_head *p; 527 + struct ub_lun *lun; 528 request_queue_t *q; 529 530 + while (!list_empty(&sc->luns)) { 531 + p = sc->luns.next; 532 + lun = list_entry(p, struct ub_lun, link); 533 + list_del(p); 534 535 + /* I don't think queue can be NULL. But... Stolen from sx8.c */ 536 + if ((q = lun->disk->queue) != NULL) 537 + blk_cleanup_queue(q); 538 + /* 539 + * If we zero disk->private_data BEFORE put_disk, we have 540 + * to check for NULL all over the place in open, release, 541 + * check_media and revalidate, because the block level 542 + * semaphore is well inside the put_disk. 543 + * But we cannot zero after the call, because *disk is gone. 544 + * The sd.c is blatantly racy in this area. 545 + */ 546 + /* disk->private_data = NULL; */ 547 + put_disk(lun->disk); 548 + lun->disk = NULL; 549 550 + ub_id_put(lun->id); 551 + kfree(lun); 552 + } 553 + 554 kfree(sc); 555 } 556 557 /* 558 * The "command allocator". 559 */ 560 + static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) 561 { 562 struct ub_scsi_cmd *ret; 563 564 + if (lun->cmda[0]) 565 return NULL; 566 + ret = &lun->cmdv[0]; 567 + lun->cmda[0] = 1; 568 return ret; 569 } 570 571 + static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) 572 { 573 + if (cmd != &lun->cmdv[0]) { 574 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", 575 + lun->name, cmd); 576 return; 577 } 578 + if (!lun->cmda[0]) { 579 + printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); 580 return; 581 } 582 + lun->cmda[0] = 0; 583 } 584 585 /* ··· 630 631 static void ub_bd_rq_fn(request_queue_t *q) 632 { 633 + struct ub_lun *lun = q->queuedata; 634 struct request *rq; 635 636 while ((rq = elv_next_request(q)) != NULL) { 637 + if (ub_bd_rq_fn_1(lun, rq) != 0) { 638 blk_stop_queue(q); 639 break; 640 } 641 } 642 } 643 644 + static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) 645 { 646 + struct ub_dev *sc = lun->udev; 647 struct ub_scsi_cmd *cmd; 648 int rc; 649 650 + if (atomic_read(&sc->poison) || lun->changed) { 651 blkdev_dequeue_request(rq); 652 ub_end_rq(rq, 0); 653 return 0; 654 } 655 656 + if ((cmd = ub_get_cmd(lun)) == NULL) 657 return -1; 658 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 659 ··· 661 if (blk_pc_request(rq)) { 662 rc = ub_cmd_build_packet(sc, cmd, rq); 663 } else { 664 + rc = ub_cmd_build_block(sc, lun, cmd, rq); 665 } 666 if (rc != 0) { 667 + ub_put_cmd(lun, cmd); 668 ub_end_rq(rq, 0); 669 return 0; 670 } 671 cmd->state = UB_CMDST_INIT; 672 + cmd->lun = lun; 673 cmd->done = ub_rw_cmd_done; 674 cmd->back = rq; 675 676 cmd->tag = sc->tagcnt++; 677 if ((rc = ub_submit_scsi(sc, cmd)) != 0) { 678 + ub_put_cmd(lun, cmd); 679 ub_end_rq(rq, 0); 680 return 0; 681 } 682 683 return 0; 684 } 685 686 + static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 687 + struct ub_scsi_cmd *cmd, struct request *rq) 688 { 689 int ub_dir; 690 #if 0 /* We use rq->buffer for now */ ··· 707 sg = &cmd->sgv[0]; 708 n_elem = blk_rq_map_sg(q, rq, sg); 709 if (n_elem <= 0) { 710 + ub_put_cmd(lun, cmd); 711 ub_end_rq(rq, 0); 712 blk_start_queue(q); 713 return 0; /* request with no s/g entries? */ ··· 716 if (n_elem != 1) { /* Paranoia */ 717 printk(KERN_WARNING "%s: request with %d segments\n", 718 sc->name, n_elem); 719 + ub_put_cmd(lun, cmd); 720 ub_end_rq(rq, 0); 721 blk_start_queue(q); 722 return 0; ··· 748 * The call to blk_queue_hardsect_size() guarantees that request 749 * is aligned, but it is given in terms of 512 byte units, always. 750 */ 751 + block = rq->sector >> lun->capacity.bshift; 752 + nblks = rq->nr_sectors >> lun->capacity.bshift; 753 754 cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; 755 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ ··· 803 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 804 { 805 struct request *rq = cmd->back; 806 + struct ub_lun *lun = cmd->lun; 807 + struct gendisk *disk = lun->disk; 808 request_queue_t *q = disk->queue; 809 int uptodate; 810 ··· 818 else 819 uptodate = 0; 820 821 + ub_put_cmd(lun, cmd); 822 ub_end_rq(rq, uptodate); 823 blk_start_queue(q); 824 } ··· 887 bcb->Tag = cmd->tag; /* Endianness is not important */ 888 bcb->DataTransferLength = cpu_to_le32(cmd->len); 889 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; 890 + bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; 891 bcb->Length = cmd->cdb_len; 892 893 /* copy the command payload */ ··· 1002 * The control pipe clears itself - nothing to do. 1003 * XXX Might try to reset the device here and retry. 1004 */ 1005 + printk(KERN_NOTICE "%s: stall on control pipe\n", 1006 + sc->name); 1007 goto Bad_End; 1008 } 1009 ··· 1025 * The control pipe clears itself - nothing to do. 1026 * XXX Might try to reset the device here and retry. 1027 */ 1028 + printk(KERN_NOTICE "%s: stall on control pipe\n", 1029 + sc->name); 1030 goto Bad_End; 1031 } 1032 ··· 1046 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1047 if (rc != 0) { 1048 printk(KERN_NOTICE "%s: " 1049 + "unable to submit clear (%d)\n", 1050 + sc->name, rc); 1051 /* 1052 * This is typically ENOMEM or some other such shit. 1053 * Retrying is pointless. Just do Bad End on it... ··· 1107 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1108 if (rc != 0) { 1109 printk(KERN_NOTICE "%s: " 1110 + "unable to submit clear (%d)\n", 1111 + sc->name, rc); 1112 /* 1113 * This is typically ENOMEM or some other such shit. 1114 * Retrying is pointless. Just do Bad End on it... ··· 1140 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1141 if (rc != 0) { 1142 printk(KERN_NOTICE "%s: " 1143 + "unable to submit clear (%d)\n", 1144 + sc->name, rc); 1145 /* 1146 * This is typically ENOMEM or some other such shit. 1147 * Retrying is pointless. Just do Bad End on it... ··· 1164 * encounter such a thing, try to read the CSW again. 1165 */ 1166 if (++cmd->stat_count >= 4) { 1167 + printk(KERN_NOTICE "%s: unable to get CSW\n", 1168 + sc->name); 1169 goto Bad_End; 1170 } 1171 __ub_state_stat(sc, cmd); ··· 1207 */ 1208 if (++cmd->stat_count >= 4) { 1209 printk(KERN_NOTICE "%s: " 1210 + "tag mismatch orig 0x%x reply 0x%x\n", 1211 + sc->name, cmd->tag, bcs->Tag); 1212 goto Bad_End; 1213 } 1214 __ub_state_stat(sc, cmd); ··· 1244 1245 } else { 1246 printk(KERN_WARNING "%s: " 1247 + "wrong command state %d\n", 1248 + sc->name, cmd->state); 1249 goto Bad_End; 1250 } 1251 return; ··· 1288 1289 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1290 /* XXX Clear stalls */ 1291 ub_complete(&sc->work_done); 1292 ub_state_done(sc, cmd, rc); 1293 return; ··· 1333 scmd->state = UB_CMDST_INIT; 1334 scmd->data = sc->top_sense; 1335 scmd->len = UB_SENSE_SIZE; 1336 + scmd->lun = cmd->lun; 1337 scmd->done = ub_top_sense_done; 1338 scmd->back = cmd; 1339 ··· 1411 } 1412 if (cmd != scmd->back) { 1413 printk(KERN_WARNING "%s: " 1414 + "sense done for wrong command 0x%x\n", 1415 + sc->name, cmd->tag); 1416 return; 1417 } 1418 if (cmd->state != UB_CMDST_SENSE) { 1419 printk(KERN_WARNING "%s: " 1420 + "sense done with bad cmd state %d\n", 1421 + sc->name, cmd->state); 1422 return; 1423 } 1424 ··· 1429 ub_scsi_urb_compl(sc, cmd); 1430 } 1431 1432 /* 1433 * This is called from a process context. 1434 */ 1435 + static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) 1436 { 1437 1438 + lun->readonly = 0; /* XXX Query this from the device */ 1439 1440 + lun->capacity.nsec = 0; 1441 + lun->capacity.bsize = 512; 1442 + lun->capacity.bshift = 0; 1443 1444 + if (ub_sync_tur(sc, lun) != 0) 1445 return; /* Not ready */ 1446 + lun->changed = 0; 1447 1448 + if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { 1449 /* 1450 * The retry here means something is wrong, either with the 1451 * device, with the transport, or with our code. 1452 * We keep this because sd.c has retries for capacity. 1453 */ 1454 + if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { 1455 + lun->capacity.nsec = 0; 1456 + lun->capacity.bsize = 512; 1457 + lun->capacity.bshift = 0; 1458 } 1459 } 1460 } ··· 1503 static int ub_bd_open(struct inode *inode, struct file *filp) 1504 { 1505 struct gendisk *disk = inode->i_bdev->bd_disk; 1506 + struct ub_lun *lun; 1507 struct ub_dev *sc; 1508 unsigned long flags; 1509 int rc; 1510 1511 + if ((lun = disk->private_data) == NULL) 1512 return -ENXIO; 1513 + sc = lun->udev; 1514 + 1515 spin_lock_irqsave(&ub_lock, flags); 1516 if (atomic_read(&sc->poison)) { 1517 spin_unlock_irqrestore(&ub_lock, flags); ··· 1529 * The bottom line is, Al Viro says that we should not allow 1530 * bdev->bd_invalidated to be set when doing add_disk no matter what. 1531 */ 1532 + if (lun->first_open) { 1533 + lun->first_open = 0; 1534 + if (lun->changed) { 1535 rc = -ENOMEDIUM; 1536 goto err_open; 1537 } 1538 } 1539 1540 + if (lun->removable || lun->readonly) 1541 check_disk_change(inode->i_bdev); 1542 1543 /* ··· 1545 * under some pretty murky conditions (a failure of READ CAPACITY). 1546 * We may need it one day. 1547 */ 1548 + if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { 1549 rc = -ENOMEDIUM; 1550 goto err_open; 1551 } 1552 1553 + if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { 1554 rc = -EROFS; 1555 goto err_open; 1556 } ··· 1567 static int ub_bd_release(struct inode *inode, struct file *filp) 1568 { 1569 struct gendisk *disk = inode->i_bdev->bd_disk; 1570 + struct ub_lun *lun = disk->private_data; 1571 + struct ub_dev *sc = lun->udev; 1572 1573 ub_put(sc); 1574 return 0; ··· 1597 */ 1598 static int ub_bd_revalidate(struct gendisk *disk) 1599 { 1600 + struct ub_lun *lun = disk->private_data; 1601 1602 + ub_revalidate(lun->udev, lun); 1603 1604 /* XXX Support sector size switching like in sr.c */ 1605 + blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); 1606 + set_capacity(disk, lun->capacity.nsec); 1607 + // set_disk_ro(sdkp->disk, lun->readonly); 1608 1609 return 0; 1610 } ··· 1626 */ 1627 static int ub_bd_media_changed(struct gendisk *disk) 1628 { 1629 + struct ub_lun *lun = disk->private_data; 1630 1631 + if (!lun->removable) 1632 return 0; 1633 1634 /* ··· 1640 * will fail, then block layer discards the data. Since we never 1641 * spin drives up, such devices simply cannot be used with ub anyway. 1642 */ 1643 + if (ub_sync_tur(lun->udev, lun) != 0) { 1644 + lun->changed = 1; 1645 return 1; 1646 } 1647 1648 + return lun->changed; 1649 } 1650 1651 static struct block_device_operations ub_bd_fops = { ··· 1669 /* 1670 * Test if the device has a check condition on it, synchronously. 1671 */ 1672 + static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) 1673 { 1674 struct ub_scsi_cmd *cmd; 1675 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; ··· 1688 cmd->cdb_len = 6; 1689 cmd->dir = UB_DIR_NONE; 1690 cmd->state = UB_CMDST_INIT; 1691 + cmd->lun = lun; /* This may be NULL, but that's ok */ 1692 cmd->done = ub_probe_done; 1693 cmd->back = &compl; 1694 ··· 1718 /* 1719 * Read the SCSI capacity synchronously (for probing). 1720 */ 1721 + static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 1722 + struct ub_capacity *ret) 1723 { 1724 struct ub_scsi_cmd *cmd; 1725 char *p; ··· 1743 cmd->state = UB_CMDST_INIT; 1744 cmd->data = p; 1745 cmd->len = 8; 1746 + cmd->lun = lun; 1747 cmd->done = ub_probe_done; 1748 cmd->back = &compl; 1749 ··· 1809 { 1810 struct completion *cop = (struct completion *) arg; 1811 complete(cop); 1812 + } 1813 + 1814 + /* 1815 + * Get number of LUNs by the way of Bulk GetMaxLUN command. 1816 + */ 1817 + static int ub_sync_getmaxlun(struct ub_dev *sc) 1818 + { 1819 + int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; 1820 + unsigned char *p; 1821 + enum { ALLOC_SIZE = 1 }; 1822 + struct usb_ctrlrequest *cr; 1823 + struct completion compl; 1824 + struct timer_list timer; 1825 + int nluns; 1826 + int rc; 1827 + 1828 + init_completion(&compl); 1829 + 1830 + rc = -ENOMEM; 1831 + if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 1832 + goto err_alloc; 1833 + *p = 55; 1834 + 1835 + cr = &sc->work_cr; 1836 + cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; 1837 + cr->bRequest = US_BULK_GET_MAX_LUN; 1838 + cr->wValue = cpu_to_le16(0); 1839 + cr->wIndex = cpu_to_le16(ifnum); 1840 + cr->wLength = cpu_to_le16(1); 1841 + 1842 + usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, 1843 + (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); 1844 + sc->work_urb.transfer_flags = 0; 1845 + sc->work_urb.actual_length = 0; 1846 + sc->work_urb.error_count = 0; 1847 + sc->work_urb.status = 0; 1848 + 1849 + if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 1850 + if (rc == -EPIPE) { 1851 + printk("%s: Stall at GetMaxLUN, using 1 LUN\n", 1852 + sc->name); /* P3 */ 1853 + } else { 1854 + printk(KERN_WARNING 1855 + "%s: Unable to submit GetMaxLUN (%d)\n", 1856 + sc->name, rc); 1857 + } 1858 + goto err_submit; 1859 + } 1860 + 1861 + init_timer(&timer); 1862 + timer.function = ub_probe_timeout; 1863 + timer.data = (unsigned long) &compl; 1864 + timer.expires = jiffies + UB_CTRL_TIMEOUT; 1865 + add_timer(&timer); 1866 + 1867 + wait_for_completion(&compl); 1868 + 1869 + del_timer_sync(&timer); 1870 + usb_kill_urb(&sc->work_urb); 1871 + 1872 + if (sc->work_urb.actual_length != 1) { 1873 + printk("%s: GetMaxLUN returned %d bytes\n", sc->name, 1874 + sc->work_urb.actual_length); /* P3 */ 1875 + nluns = 0; 1876 + } else { 1877 + if ((nluns = *p) == 55) { 1878 + nluns = 0; 1879 + } else { 1880 + /* GetMaxLUN returns the maximum LUN number */ 1881 + nluns += 1; 1882 + if (nluns > UB_MAX_LUNS) 1883 + nluns = UB_MAX_LUNS; 1884 + } 1885 + printk("%s: GetMaxLUN returned %d, using %d LUNs\n", sc->name, 1886 + *p, nluns); /* P3 */ 1887 + } 1888 + 1889 + kfree(p); 1890 + return nluns; 1891 + 1892 + err_submit: 1893 + kfree(p); 1894 + err_alloc: 1895 + return rc; 1896 } 1897 1898 /* ··· 1897 } 1898 1899 if (ep_in == NULL || ep_out == NULL) { 1900 + printk(KERN_NOTICE "%s: failed endpoint check\n", 1901 + sc->name); 1902 return -EIO; 1903 } 1904 ··· 1921 const struct usb_device_id *dev_id) 1922 { 1923 struct ub_dev *sc; 1924 + int nluns; 1925 int rc; 1926 int i; 1927 ··· 1931 goto err_core; 1932 memset(sc, 0, sizeof(struct ub_dev)); 1933 spin_lock_init(&sc->lock); 1934 + INIT_LIST_HEAD(&sc->luns); 1935 usb_init_urb(&sc->work_urb); 1936 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 1937 atomic_set(&sc->poison, 0); ··· 1942 ub_init_completion(&sc->work_done); 1943 sc->work_done.done = 1; /* A little yuk, but oh well... */ 1944 1945 sc->dev = interface_to_usbdev(intf); 1946 sc->intf = intf; 1947 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; 1948 usb_set_intfdata(intf, sc); 1949 usb_get_dev(sc->dev); 1950 // usb_get_intf(sc->intf); /* Do we need this? */ 1951 + 1952 + snprintf(sc->name, 12, DRV_NAME "(%d.%d)", 1953 + sc->dev->bus->busnum, sc->dev->devnum); 1954 1955 /* XXX Verify that we can handle the device (from descriptors) */ 1956 ··· 1992 * In any case it's not our business how revaliadation is implemented. 1993 */ 1994 for (i = 0; i < 3; i++) { /* Retries for benh's key */ 1995 + if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; 1996 if (rc != 0x6) break; 1997 msleep(10); 1998 } 1999 2000 + nluns = 1; 2001 + for (i = 0; i < 3; i++) { 2002 + if ((rc = ub_sync_getmaxlun(sc)) < 0) { 2003 + /* 2004 + * Some devices (i.e. Iomega Zip100) need this -- 2005 + * apparently the bulk pipes get STALLed when the 2006 + * GetMaxLUN request is processed. 2007 + * XXX I have a ZIP-100, verify it does this. 2008 + */ 2009 + if (rc == -EPIPE) { 2010 + ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2011 + ub_probe_clear_stall(sc, sc->send_bulk_pipe); 2012 + } 2013 + break; 2014 + } 2015 + if (rc != 0) { 2016 + nluns = rc; 2017 + break; 2018 + } 2019 + mdelay(100); 2020 + } 2021 2022 + for (i = 0; i < nluns; i++) { 2023 + ub_probe_lun(sc, i); 2024 + } 2025 + return 0; 2026 2027 + /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */ 2028 + err_diag: 2029 + usb_set_intfdata(intf, NULL); 2030 + // usb_put_intf(sc->intf); 2031 + usb_put_dev(sc->dev); 2032 + kfree(sc); 2033 + err_core: 2034 + return rc; 2035 + } 2036 + 2037 + static int ub_probe_lun(struct ub_dev *sc, int lnum) 2038 + { 2039 + struct ub_lun *lun; 2040 + request_queue_t *q; 2041 + struct gendisk *disk; 2042 + int rc; 2043 + 2044 + rc = -ENOMEM; 2045 + if ((lun = kmalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) 2046 + goto err_alloc; 2047 + memset(lun, 0, sizeof(struct ub_lun)); 2048 + lun->num = lnum; 2049 + 2050 + rc = -ENOSR; 2051 + if ((lun->id = ub_id_get()) == -1) 2052 + goto err_id; 2053 + 2054 + lun->udev = sc; 2055 + list_add(&lun->link, &sc->luns); 2056 + 2057 + snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", 2058 + lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); 2059 + 2060 + lun->removable = 1; /* XXX Query this from the device */ 2061 + lun->changed = 1; /* ub_revalidate clears only */ 2062 + lun->first_open = 1; 2063 + ub_revalidate(sc, lun); 2064 + 2065 rc = -ENOMEM; 2066 if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL) 2067 goto err_diskalloc; 2068 2069 + lun->disk = disk; 2070 + sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); 2071 + sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a'); 2072 disk->major = UB_MAJOR; 2073 + disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; 2074 disk->fops = &ub_bd_fops; 2075 + disk->private_data = lun; 2076 + disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ 2077 2078 rc = -ENOMEM; 2079 if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL) ··· 2028 2029 disk->queue = q; 2030 2031 + blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 2032 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); 2033 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2034 + blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2035 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2036 + blk_queue_hardsect_size(q, lun->capacity.bsize); 2037 2038 + q->queuedata = lun; 2039 2040 + set_capacity(disk, lun->capacity.nsec); 2041 + if (lun->removable) 2042 disk->flags |= GENHD_FL_REMOVABLE; 2043 2044 add_disk(disk); ··· 2059 err_blkqinit: 2060 put_disk(disk); 2061 err_diskalloc: 2062 + list_del(&lun->link); 2063 + ub_id_put(lun->id); 2064 err_id: 2065 + kfree(lun); 2066 + err_alloc: 2067 return rc; 2068 } 2069 2070 static void ub_disconnect(struct usb_interface *intf) 2071 { 2072 struct ub_dev *sc = usb_get_intfdata(intf); 2073 + struct list_head *p; 2074 + struct ub_lun *lun; 2075 + struct gendisk *disk; 2076 unsigned long flags; 2077 2078 /* ··· 2124 /* 2125 * Unregister the upper layer. 2126 */ 2127 + list_for_each (p, &sc->luns) { 2128 + lun = list_entry(p, struct ub_lun, link); 2129 + disk = lun->disk; 2130 + if (disk->flags & GENHD_FL_UP) 2131 + del_gendisk(disk); 2132 + /* 2133 + * I wish I could do: 2134 + * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 2135 + * As it is, we rely on our internal poisoning and let 2136 + * the upper levels to spin furiously failing all the I/O. 2137 + */ 2138 + } 2139 2140 /* 2141 * Taking a lock on a structure which is about to be freed ··· 2182 { 2183 int rc; 2184 2185 + /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n", 2186 + sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun)); 2187 2188 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) 2189 goto err_regblkdev;