[PATCH] USB: Support multiply-LUN devices in ub

Signed-off-by: Pete Zaitcev <zaitcev@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

diff -urp -X dontdiff linux-2.6.12-rc3/drivers/block/ub.c linux-2.6.12-rc3-lem/drivers/block/ub.c

authored by Pete Zaitcev and committed by Greg KH f4800078 aa447acb

+364 -236
+364 -236
drivers/block/ub.c
··· 8 8 * and is not licensed separately. See file COPYING for details. 9 9 * 10 10 * TODO (sorted by decreasing priority) 11 + * -- Kill first_open (Al Viro fixed the block layer now) 11 12 * -- Do resets with usb_device_reset (needs a thread context, use khubd) 12 13 * -- set readonly flag for CDs, set removable flag for CF readers 13 14 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) 14 - * -- support pphaneuf's SDDR-75 with two LUNs (also broken capacity...) 15 15 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries 16 16 * -- verify the 13 conditions and do bulk resets 17 - * -- normal pool of commands instead of cmdv[]? 18 17 * -- kill last_pipe and simply do two-state clearing on both pipes 19 18 * -- verify protocol (bulk) from USB descriptors (maybe...) 20 19 * -- highmem and sg ··· 48 49 #define US_SC_SCSI 0x06 /* Transparent */ 49 50 50 51 /* 52 + * This many LUNs per USB device. 53 + * Every one of them takes a host, see UB_MAX_HOSTS. 51 54 */ 55 + #define UB_MAX_LUNS 4 56 + 57 + /* 58 + */ 59 + 52 60 #define UB_MINORS_PER_MAJOR 8 53 61 54 62 #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ ··· 71 65 u32 Tag; /* unique per command id */ 72 66 __le32 DataTransferLength; /* size of data */ 73 67 u8 Flags; /* direction in bit 0 */ 74 - u8 Lun; /* LUN normally 0 */ 68 + u8 Lun; /* LUN */ 75 69 u8 Length; /* of of the CDB */ 76 70 u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ 77 71 }; ··· 174 168 unsigned int len; /* Requested length */ 175 169 // struct scatterlist sgv[UB_MAX_REQ_SG]; 176 170 171 + struct ub_lun *lun; 177 172 void (*done)(struct ub_dev *, struct ub_scsi_cmd *); 178 173 void *back; 179 174 }; ··· 259 252 }; 260 253 261 254 /* 262 - * The UB device instance. 255 + * The block device instance (one per LUN). 263 256 */ 264 - struct ub_dev { 265 - spinlock_t lock; 266 - int id; /* Number among ub's */ 267 - atomic_t poison; /* The USB device is disconnected */ 268 - int openc; /* protected by ub_lock! */ 269 - /* kref is too implicit for our taste */ 270 - unsigned int tagcnt; 257 + struct ub_lun { 258 + struct ub_dev *udev; 259 + struct list_head link; 260 + struct gendisk *disk; 261 + int id; /* Host index */ 262 + int num; /* LUN number */ 263 + char name[16]; 264 + 271 265 int changed; /* Media was changed */ 272 266 int removable; 273 267 int readonly; 274 268 int first_open; /* Kludge. See ub_bd_open. */ 275 - char name[8]; 269 + 270 + /* Use Ingo's mempool if or when we have more than one command. */ 271 + /* 272 + * Currently we never need more than one command for the whole device. 273 + * However, giving every LUN a command is a cheap and automatic way 274 + * to enforce fairness between them. 275 + */ 276 + int cmda[1]; 277 + struct ub_scsi_cmd cmdv[1]; 278 + 279 + struct ub_capacity capacity; 280 + }; 281 + 282 + /* 283 + * The USB device instance. 284 + */ 285 + struct ub_dev { 286 + spinlock_t lock; 287 + atomic_t poison; /* The USB device is disconnected */ 288 + int openc; /* protected by ub_lock! */ 289 + /* kref is too implicit for our taste */ 290 + unsigned int tagcnt; 291 + char name[12]; 276 292 struct usb_device *dev; 277 293 struct usb_interface *intf; 278 294 279 - struct ub_capacity capacity; 280 - struct gendisk *disk; 295 + struct list_head luns; 281 296 282 297 unsigned int send_bulk_pipe; /* cached pipe values */ 283 298 unsigned int recv_bulk_pipe; ··· 307 278 unsigned int recv_ctrl_pipe; 308 279 309 280 struct tasklet_struct tasklet; 310 - 311 - /* XXX Use Ingo's mempool (once we have more than one) */ 312 - int cmda[1]; 313 - struct ub_scsi_cmd cmdv[1]; 314 281 315 282 struct ub_scsi_cmd_queue cmd_queue; 316 283 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ ··· 326 301 /* 327 302 */ 328 303 static void ub_cleanup(struct ub_dev *sc); 329 - static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq); 330 - static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 331 - struct request *rq); 304 + static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq); 305 + static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 306 + struct ub_scsi_cmd *cmd, struct request *rq); 332 307 static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 333 308 struct request *rq); 334 309 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); ··· 345 320 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 346 321 int stalled_pipe); 347 322 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); 348 - static int ub_sync_tur(struct ub_dev *sc); 349 - static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret); 323 + static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); 324 + static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 325 + struct ub_capacity *ret); 326 + static int ub_probe_lun(struct ub_dev *sc, int lnum); 350 327 351 328 /* 352 329 */ ··· 369 342 */ 370 343 #define UB_MAX_HOSTS 26 371 344 static char ub_hostv[UB_MAX_HOSTS]; 345 + 372 346 static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ 373 347 374 348 /* ··· 434 406 { 435 407 struct usb_interface *intf; 436 408 struct ub_dev *sc; 409 + struct list_head *p; 410 + struct ub_lun *lun; 437 411 int cnt; 438 412 unsigned long flags; 439 413 int nc, nh; ··· 451 421 spin_lock_irqsave(&sc->lock, flags); 452 422 453 423 cnt += sprintf(page + cnt, 454 - "qlen %d qmax %d changed %d removable %d readonly %d\n", 455 - sc->cmd_queue.qlen, sc->cmd_queue.qmax, 456 - sc->changed, sc->removable, sc->readonly); 424 + "qlen %d qmax %d\n", 425 + sc->cmd_queue.qlen, sc->cmd_queue.qmax); 426 + 427 + list_for_each (p, &sc->luns) { 428 + lun = list_entry(p, struct ub_lun, link); 429 + cnt += sprintf(page + cnt, 430 + "lun %u changed %d removable %d readonly %d\n", 431 + lun->num, lun->changed, lun->removable, lun->readonly); 432 + } 457 433 458 434 if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0; 459 435 for (j = 0; j < SCMD_TRACE_SZ; j++) { ··· 559 523 */ 560 524 static void ub_cleanup(struct ub_dev *sc) 561 525 { 526 + struct list_head *p; 527 + struct ub_lun *lun; 562 528 request_queue_t *q; 563 529 564 - /* I don't think queue can be NULL. But... Stolen from sx8.c */ 565 - if ((q = sc->disk->queue) != NULL) 566 - blk_cleanup_queue(q); 530 + while (!list_empty(&sc->luns)) { 531 + p = sc->luns.next; 532 + lun = list_entry(p, struct ub_lun, link); 533 + list_del(p); 567 534 568 - /* 569 - * If we zero disk->private_data BEFORE put_disk, we have to check 570 - * for NULL all over the place in open, release, check_media and 571 - * revalidate, because the block level semaphore is well inside the 572 - * put_disk. But we cannot zero after the call, because *disk is gone. 573 - * The sd.c is blatantly racy in this area. 574 - */ 575 - /* disk->private_data = NULL; */ 576 - put_disk(sc->disk); 577 - sc->disk = NULL; 535 + /* I don't think queue can be NULL. But... Stolen from sx8.c */ 536 + if ((q = lun->disk->queue) != NULL) 537 + blk_cleanup_queue(q); 538 + /* 539 + * If we zero disk->private_data BEFORE put_disk, we have 540 + * to check for NULL all over the place in open, release, 541 + * check_media and revalidate, because the block level 542 + * semaphore is well inside the put_disk. 543 + * But we cannot zero after the call, because *disk is gone. 544 + * The sd.c is blatantly racy in this area. 545 + */ 546 + /* disk->private_data = NULL; */ 547 + put_disk(lun->disk); 548 + lun->disk = NULL; 578 549 579 - ub_id_put(sc->id); 550 + ub_id_put(lun->id); 551 + kfree(lun); 552 + } 553 + 580 554 kfree(sc); 581 555 } 582 556 583 557 /* 584 558 * The "command allocator". 585 559 */ 586 - static struct ub_scsi_cmd *ub_get_cmd(struct ub_dev *sc) 560 + static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) 587 561 { 588 562 struct ub_scsi_cmd *ret; 589 563 590 - if (sc->cmda[0]) 564 + if (lun->cmda[0]) 591 565 return NULL; 592 - ret = &sc->cmdv[0]; 593 - sc->cmda[0] = 1; 566 + ret = &lun->cmdv[0]; 567 + lun->cmda[0] = 1; 594 568 return ret; 595 569 } 596 570 597 - static void ub_put_cmd(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 571 + static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) 598 572 { 599 - if (cmd != &sc->cmdv[0]) { 573 + if (cmd != &lun->cmdv[0]) { 600 574 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", 601 - sc->name, cmd); 575 + lun->name, cmd); 602 576 return; 603 577 } 604 - if (!sc->cmda[0]) { 605 - printk(KERN_WARNING "%s: releasing a free cmd\n", sc->name); 578 + if (!lun->cmda[0]) { 579 + printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); 606 580 return; 607 581 } 608 - sc->cmda[0] = 0; 582 + lun->cmda[0] = 0; 609 583 } 610 584 611 585 /* ··· 676 630 677 631 static void ub_bd_rq_fn(request_queue_t *q) 678 632 { 679 - struct ub_dev *sc = q->queuedata; 633 + struct ub_lun *lun = q->queuedata; 680 634 struct request *rq; 681 635 682 636 while ((rq = elv_next_request(q)) != NULL) { 683 - if (ub_bd_rq_fn_1(sc, rq) != 0) { 637 + if (ub_bd_rq_fn_1(lun, rq) != 0) { 684 638 blk_stop_queue(q); 685 639 break; 686 640 } 687 641 } 688 642 } 689 643 690 - static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq) 644 + static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq) 691 645 { 646 + struct ub_dev *sc = lun->udev; 692 647 struct ub_scsi_cmd *cmd; 693 648 int rc; 694 649 695 - if (atomic_read(&sc->poison) || sc->changed) { 650 + if (atomic_read(&sc->poison) || lun->changed) { 696 651 blkdev_dequeue_request(rq); 697 652 ub_end_rq(rq, 0); 698 653 return 0; 699 654 } 700 655 701 - if ((cmd = ub_get_cmd(sc)) == NULL) 656 + if ((cmd = ub_get_cmd(lun)) == NULL) 702 657 return -1; 703 658 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 704 659 ··· 708 661 if (blk_pc_request(rq)) { 709 662 rc = ub_cmd_build_packet(sc, cmd, rq); 710 663 } else { 711 - rc = ub_cmd_build_block(sc, cmd, rq); 664 + rc = ub_cmd_build_block(sc, lun, cmd, rq); 712 665 } 713 666 if (rc != 0) { 714 - ub_put_cmd(sc, cmd); 667 + ub_put_cmd(lun, cmd); 715 668 ub_end_rq(rq, 0); 716 - blk_start_queue(sc->disk->queue); 717 669 return 0; 718 670 } 719 - 720 671 cmd->state = UB_CMDST_INIT; 672 + cmd->lun = lun; 721 673 cmd->done = ub_rw_cmd_done; 722 674 cmd->back = rq; 723 675 724 676 cmd->tag = sc->tagcnt++; 725 677 if ((rc = ub_submit_scsi(sc, cmd)) != 0) { 726 - ub_put_cmd(sc, cmd); 678 + ub_put_cmd(lun, cmd); 727 679 ub_end_rq(rq, 0); 728 - blk_start_queue(sc->disk->queue); 729 680 return 0; 730 681 } 731 682 732 683 return 0; 733 684 } 734 685 735 - static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 736 - struct request *rq) 686 + static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 687 + struct ub_scsi_cmd *cmd, struct request *rq) 737 688 { 738 689 int ub_dir; 739 690 #if 0 /* We use rq->buffer for now */ ··· 752 707 sg = &cmd->sgv[0]; 753 708 n_elem = blk_rq_map_sg(q, rq, sg); 754 709 if (n_elem <= 0) { 755 - ub_put_cmd(sc, cmd); 710 + ub_put_cmd(lun, cmd); 756 711 ub_end_rq(rq, 0); 757 712 blk_start_queue(q); 758 713 return 0; /* request with no s/g entries? */ ··· 761 716 if (n_elem != 1) { /* Paranoia */ 762 717 printk(KERN_WARNING "%s: request with %d segments\n", 763 718 sc->name, n_elem); 764 - ub_put_cmd(sc, cmd); 719 + ub_put_cmd(lun, cmd); 765 720 ub_end_rq(rq, 0); 766 721 blk_start_queue(q); 767 722 return 0; ··· 793 748 * The call to blk_queue_hardsect_size() guarantees that request 794 749 * is aligned, but it is given in terms of 512 byte units, always. 795 750 */ 796 - block = rq->sector >> sc->capacity.bshift; 797 - nblks = rq->nr_sectors >> sc->capacity.bshift; 751 + block = rq->sector >> lun->capacity.bshift; 752 + nblks = rq->nr_sectors >> lun->capacity.bshift; 798 753 799 754 cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; 800 755 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ ··· 848 803 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 849 804 { 850 805 struct request *rq = cmd->back; 851 - struct gendisk *disk = sc->disk; 806 + struct ub_lun *lun = cmd->lun; 807 + struct gendisk *disk = lun->disk; 852 808 request_queue_t *q = disk->queue; 853 809 int uptodate; 854 810 ··· 864 818 else 865 819 uptodate = 0; 866 820 867 - ub_put_cmd(sc, cmd); 821 + ub_put_cmd(lun, cmd); 868 822 ub_end_rq(rq, uptodate); 869 823 blk_start_queue(q); 870 824 } ··· 933 887 bcb->Tag = cmd->tag; /* Endianness is not important */ 934 888 bcb->DataTransferLength = cpu_to_le32(cmd->len); 935 889 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; 936 - bcb->Lun = 0; /* No multi-LUN yet */ 890 + bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; 937 891 bcb->Length = cmd->cdb_len; 938 892 939 893 /* copy the command payload */ ··· 1048 1002 * The control pipe clears itself - nothing to do. 1049 1003 * XXX Might try to reset the device here and retry. 1050 1004 */ 1051 - printk(KERN_NOTICE "%s: " 1052 - "stall on control pipe for device %u\n", 1053 - sc->name, sc->dev->devnum); 1005 + printk(KERN_NOTICE "%s: stall on control pipe\n", 1006 + sc->name); 1054 1007 goto Bad_End; 1055 1008 } 1056 1009 ··· 1070 1025 * The control pipe clears itself - nothing to do. 1071 1026 * XXX Might try to reset the device here and retry. 1072 1027 */ 1073 - printk(KERN_NOTICE "%s: " 1074 - "stall on control pipe for device %u\n", 1075 - sc->name, sc->dev->devnum); 1028 + printk(KERN_NOTICE "%s: stall on control pipe\n", 1029 + sc->name); 1076 1030 goto Bad_End; 1077 1031 } 1078 1032 ··· 1090 1046 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1091 1047 if (rc != 0) { 1092 1048 printk(KERN_NOTICE "%s: " 1093 - "unable to submit clear for device %u" 1094 - " (code %d)\n", 1095 - sc->name, sc->dev->devnum, rc); 1049 + "unable to submit clear (%d)\n", 1050 + sc->name, rc); 1096 1051 /* 1097 1052 * This is typically ENOMEM or some other such shit. 1098 1053 * Retrying is pointless. Just do Bad End on it... ··· 1150 1107 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1151 1108 if (rc != 0) { 1152 1109 printk(KERN_NOTICE "%s: " 1153 - "unable to submit clear for device %u" 1154 - " (code %d)\n", 1155 - sc->name, sc->dev->devnum, rc); 1110 + "unable to submit clear (%d)\n", 1111 + sc->name, rc); 1156 1112 /* 1157 1113 * This is typically ENOMEM or some other such shit. 1158 1114 * Retrying is pointless. Just do Bad End on it... ··· 1182 1140 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1183 1141 if (rc != 0) { 1184 1142 printk(KERN_NOTICE "%s: " 1185 - "unable to submit clear for device %u" 1186 - " (code %d)\n", 1187 - sc->name, sc->dev->devnum, rc); 1143 + "unable to submit clear (%d)\n", 1144 + sc->name, rc); 1188 1145 /* 1189 1146 * This is typically ENOMEM or some other such shit. 1190 1147 * Retrying is pointless. Just do Bad End on it... ··· 1205 1164 * encounter such a thing, try to read the CSW again. 1206 1165 */ 1207 1166 if (++cmd->stat_count >= 4) { 1208 - printk(KERN_NOTICE "%s: " 1209 - "unable to get CSW on device %u\n", 1210 - sc->name, sc->dev->devnum); 1167 + printk(KERN_NOTICE "%s: unable to get CSW\n", 1168 + sc->name); 1211 1169 goto Bad_End; 1212 1170 } 1213 1171 __ub_state_stat(sc, cmd); ··· 1247 1207 */ 1248 1208 if (++cmd->stat_count >= 4) { 1249 1209 printk(KERN_NOTICE "%s: " 1250 - "tag mismatch orig 0x%x reply 0x%x " 1251 - "on device %u\n", 1252 - sc->name, cmd->tag, bcs->Tag, 1253 - sc->dev->devnum); 1210 + "tag mismatch orig 0x%x reply 0x%x\n", 1211 + sc->name, cmd->tag, bcs->Tag); 1254 1212 goto Bad_End; 1255 1213 } 1256 1214 __ub_state_stat(sc, cmd); ··· 1282 1244 1283 1245 } else { 1284 1246 printk(KERN_WARNING "%s: " 1285 - "wrong command state %d on device %u\n", 1286 - sc->name, cmd->state, sc->dev->devnum); 1247 + "wrong command state %d\n", 1248 + sc->name, cmd->state); 1287 1249 goto Bad_End; 1288 1250 } 1289 1251 return; ··· 1326 1288 1327 1289 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1328 1290 /* XXX Clear stalls */ 1329 - printk("%s: CSW #%d submit failed (%d)\n", sc->name, cmd->tag, rc); /* P3 */ 1330 1291 ub_complete(&sc->work_done); 1331 1292 ub_state_done(sc, cmd, rc); 1332 1293 return; ··· 1370 1333 scmd->state = UB_CMDST_INIT; 1371 1334 scmd->data = sc->top_sense; 1372 1335 scmd->len = UB_SENSE_SIZE; 1336 + scmd->lun = cmd->lun; 1373 1337 scmd->done = ub_top_sense_done; 1374 1338 scmd->back = cmd; 1375 1339 ··· 1449 1411 } 1450 1412 if (cmd != scmd->back) { 1451 1413 printk(KERN_WARNING "%s: " 1452 - "sense done for wrong command 0x%x on device %u\n", 1453 - sc->name, cmd->tag, sc->dev->devnum); 1414 + "sense done for wrong command 0x%x\n", 1415 + sc->name, cmd->tag); 1454 1416 return; 1455 1417 } 1456 1418 if (cmd->state != UB_CMDST_SENSE) { 1457 1419 printk(KERN_WARNING "%s: " 1458 - "sense done with bad cmd state %d on device %u\n", 1459 - sc->name, cmd->state, sc->dev->devnum); 1420 + "sense done with bad cmd state %d\n", 1421 + sc->name, cmd->state); 1460 1422 return; 1461 1423 } 1462 1424 ··· 1467 1429 ub_scsi_urb_compl(sc, cmd); 1468 1430 } 1469 1431 1470 - #if 0 1471 - /* Determine what the maximum LUN supported is */ 1472 - int usb_stor_Bulk_max_lun(struct us_data *us) 1473 - { 1474 - int result; 1475 - 1476 - /* issue the command */ 1477 - result = usb_stor_control_msg(us, us->recv_ctrl_pipe, 1478 - US_BULK_GET_MAX_LUN, 1479 - USB_DIR_IN | USB_TYPE_CLASS | 1480 - USB_RECIP_INTERFACE, 1481 - 0, us->ifnum, us->iobuf, 1, HZ); 1482 - 1483 - /* 1484 - * Some devices (i.e. Iomega Zip100) need this -- apparently 1485 - * the bulk pipes get STALLed when the GetMaxLUN request is 1486 - * processed. This is, in theory, harmless to all other devices 1487 - * (regardless of if they stall or not). 1488 - */ 1489 - if (result < 0) { 1490 - usb_stor_clear_halt(us, us->recv_bulk_pipe); 1491 - usb_stor_clear_halt(us, us->send_bulk_pipe); 1492 - } 1493 - 1494 - US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", 1495 - result, us->iobuf[0]); 1496 - 1497 - /* if we have a successful request, return the result */ 1498 - if (result == 1) 1499 - return us->iobuf[0]; 1500 - 1501 - /* return the default -- no LUNs */ 1502 - return 0; 1503 - } 1504 - #endif 1505 - 1506 1432 /* 1507 1433 * This is called from a process context. 1508 1434 */ 1509 - static void ub_revalidate(struct ub_dev *sc) 1435 + static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) 1510 1436 { 1511 1437 1512 - sc->readonly = 0; /* XXX Query this from the device */ 1438 + lun->readonly = 0; /* XXX Query this from the device */ 1513 1439 1514 - sc->capacity.nsec = 0; 1515 - sc->capacity.bsize = 512; 1516 - sc->capacity.bshift = 0; 1440 + lun->capacity.nsec = 0; 1441 + lun->capacity.bsize = 512; 1442 + lun->capacity.bshift = 0; 1517 1443 1518 - if (ub_sync_tur(sc) != 0) 1444 + if (ub_sync_tur(sc, lun) != 0) 1519 1445 return; /* Not ready */ 1520 - sc->changed = 0; 1446 + lun->changed = 0; 1521 1447 1522 - if (ub_sync_read_cap(sc, &sc->capacity) != 0) { 1448 + if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { 1523 1449 /* 1524 1450 * The retry here means something is wrong, either with the 1525 1451 * device, with the transport, or with our code. 1526 1452 * We keep this because sd.c has retries for capacity. 1527 1453 */ 1528 - if (ub_sync_read_cap(sc, &sc->capacity) != 0) { 1529 - sc->capacity.nsec = 0; 1530 - sc->capacity.bsize = 512; 1531 - sc->capacity.bshift = 0; 1454 + if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { 1455 + lun->capacity.nsec = 0; 1456 + lun->capacity.bsize = 512; 1457 + lun->capacity.bshift = 0; 1532 1458 } 1533 1459 } 1534 1460 } ··· 1505 1503 static int ub_bd_open(struct inode *inode, struct file *filp) 1506 1504 { 1507 1505 struct gendisk *disk = inode->i_bdev->bd_disk; 1506 + struct ub_lun *lun; 1508 1507 struct ub_dev *sc; 1509 1508 unsigned long flags; 1510 1509 int rc; 1511 1510 1512 - if ((sc = disk->private_data) == NULL) 1511 + if ((lun = disk->private_data) == NULL) 1513 1512 return -ENXIO; 1513 + sc = lun->udev; 1514 + 1514 1515 spin_lock_irqsave(&ub_lock, flags); 1515 1516 if (atomic_read(&sc->poison)) { 1516 1517 spin_unlock_irqrestore(&ub_lock, flags); ··· 1534 1529 * The bottom line is, Al Viro says that we should not allow 1535 1530 * bdev->bd_invalidated to be set when doing add_disk no matter what. 1536 1531 */ 1537 - if (sc->first_open) { 1538 - if (sc->changed) { 1539 - sc->first_open = 0; 1532 + if (lun->first_open) { 1533 + lun->first_open = 0; 1534 + if (lun->changed) { 1540 1535 rc = -ENOMEDIUM; 1541 1536 goto err_open; 1542 1537 } 1543 1538 } 1544 1539 1545 - if (sc->removable || sc->readonly) 1540 + if (lun->removable || lun->readonly) 1546 1541 check_disk_change(inode->i_bdev); 1547 1542 1548 1543 /* ··· 1550 1545 * under some pretty murky conditions (a failure of READ CAPACITY). 1551 1546 * We may need it one day. 1552 1547 */ 1553 - if (sc->removable && sc->changed && !(filp->f_flags & O_NDELAY)) { 1548 + if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { 1554 1549 rc = -ENOMEDIUM; 1555 1550 goto err_open; 1556 1551 } 1557 1552 1558 - if (sc->readonly && (filp->f_mode & FMODE_WRITE)) { 1553 + if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { 1559 1554 rc = -EROFS; 1560 1555 goto err_open; 1561 1556 } ··· 1572 1567 static int ub_bd_release(struct inode *inode, struct file *filp) 1573 1568 { 1574 1569 struct gendisk *disk = inode->i_bdev->bd_disk; 1575 - struct ub_dev *sc = disk->private_data; 1570 + struct ub_lun *lun = disk->private_data; 1571 + struct ub_dev *sc = lun->udev; 1576 1572 1577 1573 ub_put(sc); 1578 1574 return 0; ··· 1603 1597 */ 1604 1598 static int ub_bd_revalidate(struct gendisk *disk) 1605 1599 { 1606 - struct ub_dev *sc = disk->private_data; 1600 + struct ub_lun *lun = disk->private_data; 1607 1601 1608 - ub_revalidate(sc); 1609 - /* This is pretty much a long term P3 */ 1610 - if (!atomic_read(&sc->poison)) { /* Cover sc->dev */ 1611 - printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n", 1612 - sc->name, sc->dev->devnum, 1613 - sc->capacity.nsec, sc->capacity.bsize); 1614 - } 1602 + ub_revalidate(lun->udev, lun); 1615 1603 1616 1604 /* XXX Support sector size switching like in sr.c */ 1617 - blk_queue_hardsect_size(disk->queue, sc->capacity.bsize); 1618 - set_capacity(disk, sc->capacity.nsec); 1619 - // set_disk_ro(sdkp->disk, sc->readonly); 1605 + blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); 1606 + set_capacity(disk, lun->capacity.nsec); 1607 + // set_disk_ro(sdkp->disk, lun->readonly); 1620 1608 1621 1609 return 0; 1622 1610 } ··· 1626 1626 */ 1627 1627 static int ub_bd_media_changed(struct gendisk *disk) 1628 1628 { 1629 - struct ub_dev *sc = disk->private_data; 1629 + struct ub_lun *lun = disk->private_data; 1630 1630 1631 - if (!sc->removable) 1631 + if (!lun->removable) 1632 1632 return 0; 1633 1633 1634 1634 /* ··· 1640 1640 * will fail, then block layer discards the data. Since we never 1641 1641 * spin drives up, such devices simply cannot be used with ub anyway. 1642 1642 */ 1643 - if (ub_sync_tur(sc) != 0) { 1644 - sc->changed = 1; 1643 + if (ub_sync_tur(lun->udev, lun) != 0) { 1644 + lun->changed = 1; 1645 1645 return 1; 1646 1646 } 1647 1647 1648 - return sc->changed; 1648 + return lun->changed; 1649 1649 } 1650 1650 1651 1651 static struct block_device_operations ub_bd_fops = { ··· 1669 1669 /* 1670 1670 * Test if the device has a check condition on it, synchronously. 1671 1671 */ 1672 - static int ub_sync_tur(struct ub_dev *sc) 1672 + static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) 1673 1673 { 1674 1674 struct ub_scsi_cmd *cmd; 1675 1675 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; ··· 1688 1688 cmd->cdb_len = 6; 1689 1689 cmd->dir = UB_DIR_NONE; 1690 1690 cmd->state = UB_CMDST_INIT; 1691 + cmd->lun = lun; /* This may be NULL, but that's ok */ 1691 1692 cmd->done = ub_probe_done; 1692 1693 cmd->back = &compl; 1693 1694 ··· 1719 1718 /* 1720 1719 * Read the SCSI capacity synchronously (for probing). 1721 1720 */ 1722 - static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret) 1721 + static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 1722 + struct ub_capacity *ret) 1723 1723 { 1724 1724 struct ub_scsi_cmd *cmd; 1725 1725 char *p; ··· 1745 1743 cmd->state = UB_CMDST_INIT; 1746 1744 cmd->data = p; 1747 1745 cmd->len = 8; 1746 + cmd->lun = lun; 1748 1747 cmd->done = ub_probe_done; 1749 1748 cmd->back = &compl; 1750 1749 ··· 1812 1809 { 1813 1810 struct completion *cop = (struct completion *) arg; 1814 1811 complete(cop); 1812 + } 1813 + 1814 + /* 1815 + * Get number of LUNs by the way of Bulk GetMaxLUN command. 1816 + */ 1817 + static int ub_sync_getmaxlun(struct ub_dev *sc) 1818 + { 1819 + int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; 1820 + unsigned char *p; 1821 + enum { ALLOC_SIZE = 1 }; 1822 + struct usb_ctrlrequest *cr; 1823 + struct completion compl; 1824 + struct timer_list timer; 1825 + int nluns; 1826 + int rc; 1827 + 1828 + init_completion(&compl); 1829 + 1830 + rc = -ENOMEM; 1831 + if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 1832 + goto err_alloc; 1833 + *p = 55; 1834 + 1835 + cr = &sc->work_cr; 1836 + cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; 1837 + cr->bRequest = US_BULK_GET_MAX_LUN; 1838 + cr->wValue = cpu_to_le16(0); 1839 + cr->wIndex = cpu_to_le16(ifnum); 1840 + cr->wLength = cpu_to_le16(1); 1841 + 1842 + usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, 1843 + (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); 1844 + sc->work_urb.transfer_flags = 0; 1845 + sc->work_urb.actual_length = 0; 1846 + sc->work_urb.error_count = 0; 1847 + sc->work_urb.status = 0; 1848 + 1849 + if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 1850 + if (rc == -EPIPE) { 1851 + printk("%s: Stall at GetMaxLUN, using 1 LUN\n", 1852 + sc->name); /* P3 */ 1853 + } else { 1854 + printk(KERN_WARNING 1855 + "%s: Unable to submit GetMaxLUN (%d)\n", 1856 + sc->name, rc); 1857 + } 1858 + goto err_submit; 1859 + } 1860 + 1861 + init_timer(&timer); 1862 + timer.function = ub_probe_timeout; 1863 + timer.data = (unsigned long) &compl; 1864 + timer.expires = jiffies + UB_CTRL_TIMEOUT; 1865 + add_timer(&timer); 1866 + 1867 + wait_for_completion(&compl); 1868 + 1869 + del_timer_sync(&timer); 1870 + usb_kill_urb(&sc->work_urb); 1871 + 1872 + if (sc->work_urb.actual_length != 1) { 1873 + printk("%s: GetMaxLUN returned %d bytes\n", sc->name, 1874 + sc->work_urb.actual_length); /* P3 */ 1875 + nluns = 0; 1876 + } else { 1877 + if ((nluns = *p) == 55) { 1878 + nluns = 0; 1879 + } else { 1880 + /* GetMaxLUN returns the maximum LUN number */ 1881 + nluns += 1; 1882 + if (nluns > UB_MAX_LUNS) 1883 + nluns = UB_MAX_LUNS; 1884 + } 1885 + printk("%s: GetMaxLUN returned %d, using %d LUNs\n", sc->name, 1886 + *p, nluns); /* P3 */ 1887 + } 1888 + 1889 + kfree(p); 1890 + return nluns; 1891 + 1892 + err_submit: 1893 + kfree(p); 1894 + err_alloc: 1895 + return rc; 1815 1896 } 1816 1897 1817 1898 /* ··· 1984 1897 } 1985 1898 1986 1899 if (ep_in == NULL || ep_out == NULL) { 1987 - printk(KERN_NOTICE "%s: device %u failed endpoint check\n", 1988 - sc->name, sc->dev->devnum); 1900 + printk(KERN_NOTICE "%s: failed endpoint check\n", 1901 + sc->name); 1989 1902 return -EIO; 1990 1903 } 1991 1904 ··· 2008 1921 const struct usb_device_id *dev_id) 2009 1922 { 2010 1923 struct ub_dev *sc; 2011 - request_queue_t *q; 2012 - struct gendisk *disk; 1924 + int nluns; 2013 1925 int rc; 2014 1926 int i; 2015 1927 ··· 2017 1931 goto err_core; 2018 1932 memset(sc, 0, sizeof(struct ub_dev)); 2019 1933 spin_lock_init(&sc->lock); 1934 + INIT_LIST_HEAD(&sc->luns); 2020 1935 usb_init_urb(&sc->work_urb); 2021 1936 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 2022 1937 atomic_set(&sc->poison, 0); ··· 2029 1942 ub_init_completion(&sc->work_done); 2030 1943 sc->work_done.done = 1; /* A little yuk, but oh well... */ 2031 1944 2032 - rc = -ENOSR; 2033 - if ((sc->id = ub_id_get()) == -1) 2034 - goto err_id; 2035 - snprintf(sc->name, 8, DRV_NAME "%c", sc->id + 'a'); 2036 - 2037 1945 sc->dev = interface_to_usbdev(intf); 2038 1946 sc->intf = intf; 2039 1947 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; 2040 - 2041 1948 usb_set_intfdata(intf, sc); 2042 1949 usb_get_dev(sc->dev); 2043 1950 // usb_get_intf(sc->intf); /* Do we need this? */ 1951 + 1952 + snprintf(sc->name, 12, DRV_NAME "(%d.%d)", 1953 + sc->dev->bus->busnum, sc->dev->devnum); 2044 1954 2045 1955 /* XXX Verify that we can handle the device (from descriptors) */ 2046 1956 ··· 2076 1992 * In any case it's not our business how revaliadation is implemented. 2077 1993 */ 2078 1994 for (i = 0; i < 3; i++) { /* Retries for benh's key */ 2079 - if ((rc = ub_sync_tur(sc)) <= 0) break; 1995 + if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; 2080 1996 if (rc != 0x6) break; 2081 1997 msleep(10); 2082 1998 } 2083 1999 2084 - sc->removable = 1; /* XXX Query this from the device */ 2085 - sc->changed = 1; /* ub_revalidate clears only */ 2086 - sc->first_open = 1; 2000 + nluns = 1; 2001 + for (i = 0; i < 3; i++) { 2002 + if ((rc = ub_sync_getmaxlun(sc)) < 0) { 2003 + /* 2004 + * Some devices (i.e. Iomega Zip100) need this -- 2005 + * apparently the bulk pipes get STALLed when the 2006 + * GetMaxLUN request is processed. 2007 + * XXX I have a ZIP-100, verify it does this. 2008 + */ 2009 + if (rc == -EPIPE) { 2010 + ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2011 + ub_probe_clear_stall(sc, sc->send_bulk_pipe); 2012 + } 2013 + break; 2014 + } 2015 + if (rc != 0) { 2016 + nluns = rc; 2017 + break; 2018 + } 2019 + mdelay(100); 2020 + } 2087 2021 2088 - ub_revalidate(sc); 2089 - /* This is pretty much a long term P3 */ 2090 - printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n", 2091 - sc->name, sc->dev->devnum, sc->capacity.nsec, sc->capacity.bsize); 2022 + for (i = 0; i < nluns; i++) { 2023 + ub_probe_lun(sc, i); 2024 + } 2025 + return 0; 2092 2026 2093 - /* 2094 - * Just one disk per sc currently, but maybe more. 2095 - */ 2027 + /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */ 2028 + err_diag: 2029 + usb_set_intfdata(intf, NULL); 2030 + // usb_put_intf(sc->intf); 2031 + usb_put_dev(sc->dev); 2032 + kfree(sc); 2033 + err_core: 2034 + return rc; 2035 + } 2036 + 2037 + static int ub_probe_lun(struct ub_dev *sc, int lnum) 2038 + { 2039 + struct ub_lun *lun; 2040 + request_queue_t *q; 2041 + struct gendisk *disk; 2042 + int rc; 2043 + 2044 + rc = -ENOMEM; 2045 + if ((lun = kmalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) 2046 + goto err_alloc; 2047 + memset(lun, 0, sizeof(struct ub_lun)); 2048 + lun->num = lnum; 2049 + 2050 + rc = -ENOSR; 2051 + if ((lun->id = ub_id_get()) == -1) 2052 + goto err_id; 2053 + 2054 + lun->udev = sc; 2055 + list_add(&lun->link, &sc->luns); 2056 + 2057 + snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", 2058 + lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); 2059 + 2060 + lun->removable = 1; /* XXX Query this from the device */ 2061 + lun->changed = 1; /* ub_revalidate clears only */ 2062 + lun->first_open = 1; 2063 + ub_revalidate(sc, lun); 2064 + 2096 2065 rc = -ENOMEM; 2097 2066 if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL) 2098 2067 goto err_diskalloc; 2099 2068 2100 - sc->disk = disk; 2101 - sprintf(disk->disk_name, DRV_NAME "%c", sc->id + 'a'); 2102 - sprintf(disk->devfs_name, DEVFS_NAME "/%c", sc->id + 'a'); 2069 + lun->disk = disk; 2070 + sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); 2071 + sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a'); 2103 2072 disk->major = UB_MAJOR; 2104 - disk->first_minor = sc->id * UB_MINORS_PER_MAJOR; 2073 + disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; 2105 2074 disk->fops = &ub_bd_fops; 2106 - disk->private_data = sc; 2107 - disk->driverfs_dev = &intf->dev; 2075 + disk->private_data = lun; 2076 + disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ 2108 2077 2109 2078 rc = -ENOMEM; 2110 2079 if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL) ··· 2165 2028 2166 2029 disk->queue = q; 2167 2030 2168 - // blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); 2031 + blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 2169 2032 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); 2170 2033 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2171 - // blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); 2034 + blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2172 2035 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2173 - blk_queue_hardsect_size(q, sc->capacity.bsize); 2036 + blk_queue_hardsect_size(q, lun->capacity.bsize); 2174 2037 2175 - /* 2176 - * This is a serious infraction, caused by a deficiency in the 2177 - * USB sg interface (usb_sg_wait()). We plan to remove this once 2178 - * we get mileage on the driver and can justify a change to USB API. 2179 - * See blk_queue_bounce_limit() to understand this part. 2180 - * 2181 - * XXX And I still need to be aware of the DMA mask in the HC. 2182 - */ 2183 - q->bounce_pfn = blk_max_low_pfn; 2184 - q->bounce_gfp = GFP_NOIO; 2038 + q->queuedata = lun; 2185 2039 2186 - q->queuedata = sc; 2187 - 2188 - set_capacity(disk, sc->capacity.nsec); 2189 - if (sc->removable) 2040 + set_capacity(disk, lun->capacity.nsec); 2041 + if (lun->removable) 2190 2042 disk->flags |= GENHD_FL_REMOVABLE; 2191 2043 2192 2044 add_disk(disk); ··· 2185 2059 err_blkqinit: 2186 2060 put_disk(disk); 2187 2061 err_diskalloc: 2188 - device_remove_file(&sc->intf->dev, &dev_attr_diag); 2189 - err_diag: 2190 - usb_set_intfdata(intf, NULL); 2191 - // usb_put_intf(sc->intf); 2192 - usb_put_dev(sc->dev); 2193 - ub_id_put(sc->id); 2062 + list_del(&lun->link); 2063 + ub_id_put(lun->id); 2194 2064 err_id: 2195 - kfree(sc); 2196 - err_core: 2065 + kfree(lun); 2066 + err_alloc: 2197 2067 return rc; 2198 2068 } 2199 2069 2200 2070 static void ub_disconnect(struct usb_interface *intf) 2201 2071 { 2202 2072 struct ub_dev *sc = usb_get_intfdata(intf); 2203 - struct gendisk *disk = sc->disk; 2073 + struct list_head *p; 2074 + struct ub_lun *lun; 2075 + struct gendisk *disk; 2204 2076 unsigned long flags; 2205 2077 2206 2078 /* ··· 2248 2124 /* 2249 2125 * Unregister the upper layer. 2250 2126 */ 2251 - if (disk->flags & GENHD_FL_UP) 2252 - del_gendisk(disk); 2253 - /* 2254 - * I wish I could do: 2255 - * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 2256 - * As it is, we rely on our internal poisoning and let 2257 - * the upper levels to spin furiously failing all the I/O. 2258 - */ 2127 + list_for_each (p, &sc->luns) { 2128 + lun = list_entry(p, struct ub_lun, link); 2129 + disk = lun->disk; 2130 + if (disk->flags & GENHD_FL_UP) 2131 + del_gendisk(disk); 2132 + /* 2133 + * I wish I could do: 2134 + * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 2135 + * As it is, we rely on our internal poisoning and let 2136 + * the upper levels to spin furiously failing all the I/O. 2137 + */ 2138 + } 2259 2139 2260 2140 /* 2261 2141 * Taking a lock on a structure which is about to be freed ··· 2310 2182 { 2311 2183 int rc; 2312 2184 2313 - /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu\n", 2314 - sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev)); 2185 + /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n", 2186 + sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun)); 2315 2187 2316 2188 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) 2317 2189 goto err_regblkdev;