at v2.6.17-rc2 2517 lines 64 kB view raw
1/* 2 * The low performance USB storage driver (ub). 3 * 4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net) 5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com) 6 * 7 * This work is a part of Linux kernel, is derived from it, 8 * and is not licensed separately. See file COPYING for details. 9 * 10 * TODO (sorted by decreasing priority) 11 * -- set readonly flag for CDs, set removable flag for CF readers 12 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) 13 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries 14 * -- verify the 13 conditions and do bulk resets 15 * -- kill last_pipe and simply do two-state clearing on both pipes 16 * -- highmem 17 * -- move top_sense and work_bcs into separate allocations (if they survive) 18 * for cache purists and esoteric architectures. 19 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? 20 * -- prune comments, they are too volumnous 21 * -- Exterminate P3 printks 22 * -- Resove XXX's 23 * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=? 24 * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. 25 */ 26#include <linux/kernel.h> 27#include <linux/module.h> 28#include <linux/usb.h> 29#include <linux/usb_usual.h> 30#include <linux/blkdev.h> 31#include <linux/devfs_fs_kernel.h> 32#include <linux/timer.h> 33#include <scsi/scsi.h> 34 35#define DRV_NAME "ub" 36#define DEVFS_NAME DRV_NAME 37 38#define UB_MAJOR 180 39 40/* 41 * The command state machine is the key model for understanding of this driver. 42 * 43 * The general rule is that all transitions are done towards the bottom 44 * of the diagram, thus preventing any loops. 45 * 46 * An exception to that is how the STAT state is handled. A counter allows it 47 * to be re-entered along the path marked with [C]. 48 * 49 * +--------+ 50 * ! INIT ! 51 * +--------+ 52 * ! 53 * ub_scsi_cmd_start fails ->--------------------------------------\ 54 * ! ! 55 * V ! 56 * +--------+ ! 57 * ! CMD ! ! 58 * +--------+ ! 59 * ! +--------+ ! 60 * was -EPIPE -->-------------------------------->! CLEAR ! ! 61 * ! +--------+ ! 62 * ! ! ! 63 * was error -->------------------------------------- ! --------->\ 64 * ! ! ! 65 * /--<-- cmd->dir == NONE ? ! ! 66 * ! ! ! ! 67 * ! V ! ! 68 * ! +--------+ ! ! 69 * ! ! DATA ! ! ! 70 * ! +--------+ ! ! 71 * ! ! +---------+ ! ! 72 * ! was -EPIPE -->--------------->! CLR2STS ! ! ! 73 * ! ! +---------+ ! ! 74 * ! ! ! ! ! 75 * ! ! was error -->---- ! --------->\ 76 * ! was error -->--------------------- ! ------------- ! --------->\ 77 * ! ! ! ! ! 78 * ! V ! ! ! 79 * \--->+--------+ ! ! ! 80 * ! STAT !<--------------------------/ ! ! 81 * /--->+--------+ ! ! 82 * ! ! ! ! 83 * [C] was -EPIPE -->-----------\ ! ! 84 * ! ! ! ! ! 85 * +<---- len == 0 ! ! ! 86 * ! ! ! ! ! 87 * ! was error -->--------------------------------------!---------->\ 88 * ! ! ! ! ! 89 * +<---- bad CSW ! ! ! 90 * +<---- bad tag ! ! ! 91 * ! ! V ! ! 92 * ! ! +--------+ ! ! 93 * ! ! ! CLRRS ! ! ! 94 * ! ! +--------+ ! ! 95 * ! ! ! ! ! 96 * \------- ! --------------------[C]--------\ ! ! 97 * ! ! ! ! 98 * cmd->error---\ +--------+ ! ! 99 * ! +--------------->! SENSE !<----------/ ! 100 * STAT_FAIL----/ +--------+ ! 101 * ! ! V 102 * ! V +--------+ 103 * \--------------------------------\--------------------->! DONE ! 104 * +--------+ 105 */ 106 107/* 108 * This many LUNs per USB device. 109 * Every one of them takes a host, see UB_MAX_HOSTS. 110 */ 111#define UB_MAX_LUNS 9 112 113/* 114 */ 115 116#define UB_PARTS_PER_LUN 8 117 118#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ 119 120#define UB_SENSE_SIZE 18 121 122/* 123 */ 124 125/* command block wrapper */ 126struct bulk_cb_wrap { 127 __le32 Signature; /* contains 'USBC' */ 128 u32 Tag; /* unique per command id */ 129 __le32 DataTransferLength; /* size of data */ 130 u8 Flags; /* direction in bit 0 */ 131 u8 Lun; /* LUN */ 132 u8 Length; /* of of the CDB */ 133 u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ 134}; 135 136#define US_BULK_CB_WRAP_LEN 31 137#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ 138#define US_BULK_FLAG_IN 1 139#define US_BULK_FLAG_OUT 0 140 141/* command status wrapper */ 142struct bulk_cs_wrap { 143 __le32 Signature; /* should = 'USBS' */ 144 u32 Tag; /* same as original command */ 145 __le32 Residue; /* amount not transferred */ 146 u8 Status; /* see below */ 147}; 148 149#define US_BULK_CS_WRAP_LEN 13 150#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ 151#define US_BULK_STAT_OK 0 152#define US_BULK_STAT_FAIL 1 153#define US_BULK_STAT_PHASE 2 154 155/* bulk-only class specific requests */ 156#define US_BULK_RESET_REQUEST 0xff 157#define US_BULK_GET_MAX_LUN 0xfe 158 159/* 160 */ 161struct ub_dev; 162 163#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ 164#define UB_MAX_SECTORS 64 165 166/* 167 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS) 168 * even if a webcam hogs the bus, but some devices need time to spin up. 169 */ 170#define UB_URB_TIMEOUT (HZ*2) 171#define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */ 172#define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */ 173#define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */ 174 175/* 176 * An instance of a SCSI command in transit. 177 */ 178#define UB_DIR_NONE 0 179#define UB_DIR_READ 1 180#define UB_DIR_ILLEGAL2 2 181#define UB_DIR_WRITE 3 182 183/* P3 */ 184#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \ 185 (((c)==UB_DIR_READ)? 'r': 'n')) 186 187enum ub_scsi_cmd_state { 188 UB_CMDST_INIT, /* Initial state */ 189 UB_CMDST_CMD, /* Command submitted */ 190 UB_CMDST_DATA, /* Data phase */ 191 UB_CMDST_CLR2STS, /* Clearing before requesting status */ 192 UB_CMDST_STAT, /* Status phase */ 193 UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ 194 UB_CMDST_CLRRS, /* Clearing before retrying status */ 195 UB_CMDST_SENSE, /* Sending Request Sense */ 196 UB_CMDST_DONE /* Final state */ 197}; 198 199struct ub_scsi_cmd { 200 unsigned char cdb[UB_MAX_CDB_SIZE]; 201 unsigned char cdb_len; 202 203 unsigned char dir; /* 0 - none, 1 - read, 3 - write. */ 204 enum ub_scsi_cmd_state state; 205 unsigned int tag; 206 struct ub_scsi_cmd *next; 207 208 int error; /* Return code - valid upon done */ 209 unsigned int act_len; /* Return size */ 210 unsigned char key, asc, ascq; /* May be valid if error==-EIO */ 211 212 int stat_count; /* Retries getting status. */ 213 214 unsigned int len; /* Requested length */ 215 unsigned int current_sg; 216 unsigned int nsg; /* sgv[nsg] */ 217 struct scatterlist sgv[UB_MAX_REQ_SG]; 218 219 struct ub_lun *lun; 220 void (*done)(struct ub_dev *, struct ub_scsi_cmd *); 221 void *back; 222}; 223 224struct ub_request { 225 struct request *rq; 226 unsigned int current_try; 227 unsigned int nsg; /* sgv[nsg] */ 228 struct scatterlist sgv[UB_MAX_REQ_SG]; 229}; 230 231/* 232 */ 233struct ub_capacity { 234 unsigned long nsec; /* Linux size - 512 byte sectors */ 235 unsigned int bsize; /* Linux hardsect_size */ 236 unsigned int bshift; /* Shift between 512 and hard sects */ 237}; 238 239/* 240 * This is a direct take-off from linux/include/completion.h 241 * The difference is that I do not wait on this thing, just poll. 242 * When I want to wait (ub_probe), I just use the stock completion. 243 * 244 * Note that INIT_COMPLETION takes no lock. It is correct. But why 245 * in the bloody hell that thing takes struct instead of pointer to struct 246 * is quite beyond me. I just copied it from the stock completion. 247 */ 248struct ub_completion { 249 unsigned int done; 250 spinlock_t lock; 251}; 252 253static inline void ub_init_completion(struct ub_completion *x) 254{ 255 x->done = 0; 256 spin_lock_init(&x->lock); 257} 258 259#define UB_INIT_COMPLETION(x) ((x).done = 0) 260 261static void ub_complete(struct ub_completion *x) 262{ 263 unsigned long flags; 264 265 spin_lock_irqsave(&x->lock, flags); 266 x->done++; 267 spin_unlock_irqrestore(&x->lock, flags); 268} 269 270static int ub_is_completed(struct ub_completion *x) 271{ 272 unsigned long flags; 273 int ret; 274 275 spin_lock_irqsave(&x->lock, flags); 276 ret = x->done; 277 spin_unlock_irqrestore(&x->lock, flags); 278 return ret; 279} 280 281/* 282 */ 283struct ub_scsi_cmd_queue { 284 int qlen, qmax; 285 struct ub_scsi_cmd *head, *tail; 286}; 287 288/* 289 * The block device instance (one per LUN). 290 */ 291struct ub_lun { 292 struct ub_dev *udev; 293 struct list_head link; 294 struct gendisk *disk; 295 int id; /* Host index */ 296 int num; /* LUN number */ 297 char name[16]; 298 299 int changed; /* Media was changed */ 300 int removable; 301 int readonly; 302 303 struct ub_request urq; 304 305 /* Use Ingo's mempool if or when we have more than one command. */ 306 /* 307 * Currently we never need more than one command for the whole device. 308 * However, giving every LUN a command is a cheap and automatic way 309 * to enforce fairness between them. 310 */ 311 int cmda[1]; 312 struct ub_scsi_cmd cmdv[1]; 313 314 struct ub_capacity capacity; 315}; 316 317/* 318 * The USB device instance. 319 */ 320struct ub_dev { 321 spinlock_t *lock; 322 atomic_t poison; /* The USB device is disconnected */ 323 int openc; /* protected by ub_lock! */ 324 /* kref is too implicit for our taste */ 325 int reset; /* Reset is running */ 326 unsigned int tagcnt; 327 char name[12]; 328 struct usb_device *dev; 329 struct usb_interface *intf; 330 331 struct list_head luns; 332 333 unsigned int send_bulk_pipe; /* cached pipe values */ 334 unsigned int recv_bulk_pipe; 335 unsigned int send_ctrl_pipe; 336 unsigned int recv_ctrl_pipe; 337 338 struct tasklet_struct tasklet; 339 340 struct ub_scsi_cmd_queue cmd_queue; 341 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ 342 unsigned char top_sense[UB_SENSE_SIZE]; 343 344 struct ub_completion work_done; 345 struct urb work_urb; 346 struct timer_list work_timer; 347 int last_pipe; /* What might need clearing */ 348 __le32 signature; /* Learned signature */ 349 struct bulk_cb_wrap work_bcb; 350 struct bulk_cs_wrap work_bcs; 351 struct usb_ctrlrequest work_cr; 352 353 struct work_struct reset_work; 354 wait_queue_head_t reset_wait; 355 356 int sg_stat[6]; 357}; 358 359/* 360 */ 361static void ub_cleanup(struct ub_dev *sc); 362static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); 363static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 364 struct ub_scsi_cmd *cmd, struct ub_request *urq); 365static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 366 struct ub_scsi_cmd *cmd, struct ub_request *urq); 367static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 368static void ub_end_rq(struct request *rq, int uptodate); 369static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 370 struct ub_request *urq, struct ub_scsi_cmd *cmd); 371static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 372static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); 373static void ub_scsi_action(unsigned long _dev); 374static void ub_scsi_dispatch(struct ub_dev *sc); 375static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 376static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 377static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); 378static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 379static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 380static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 381static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 382static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 383 int stalled_pipe); 384static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); 385static void ub_reset_enter(struct ub_dev *sc, int try); 386static void ub_reset_task(void *arg); 387static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); 388static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 389 struct ub_capacity *ret); 390static int ub_sync_reset(struct ub_dev *sc); 391static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe); 392static int ub_probe_lun(struct ub_dev *sc, int lnum); 393 394/* 395 */ 396#ifdef CONFIG_USB_LIBUSUAL 397 398#define ub_usb_ids storage_usb_ids 399#else 400 401static struct usb_device_id ub_usb_ids[] = { 402 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, 403 { } 404}; 405 406MODULE_DEVICE_TABLE(usb, ub_usb_ids); 407#endif /* CONFIG_USB_LIBUSUAL */ 408 409/* 410 * Find me a way to identify "next free minor" for add_disk(), 411 * and the array disappears the next day. However, the number of 412 * hosts has something to do with the naming and /proc/partitions. 413 * This has to be thought out in detail before changing. 414 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure. 415 */ 416#define UB_MAX_HOSTS 26 417static char ub_hostv[UB_MAX_HOSTS]; 418 419#define UB_QLOCK_NUM 5 420static spinlock_t ub_qlockv[UB_QLOCK_NUM]; 421static int ub_qlock_next = 0; 422 423static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ 424 425/* 426 * The id allocator. 427 * 428 * This also stores the host for indexing by minor, which is somewhat dirty. 429 */ 430static int ub_id_get(void) 431{ 432 unsigned long flags; 433 int i; 434 435 spin_lock_irqsave(&ub_lock, flags); 436 for (i = 0; i < UB_MAX_HOSTS; i++) { 437 if (ub_hostv[i] == 0) { 438 ub_hostv[i] = 1; 439 spin_unlock_irqrestore(&ub_lock, flags); 440 return i; 441 } 442 } 443 spin_unlock_irqrestore(&ub_lock, flags); 444 return -1; 445} 446 447static void ub_id_put(int id) 448{ 449 unsigned long flags; 450 451 if (id < 0 || id >= UB_MAX_HOSTS) { 452 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id); 453 return; 454 } 455 456 spin_lock_irqsave(&ub_lock, flags); 457 if (ub_hostv[id] == 0) { 458 spin_unlock_irqrestore(&ub_lock, flags); 459 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id); 460 return; 461 } 462 ub_hostv[id] = 0; 463 spin_unlock_irqrestore(&ub_lock, flags); 464} 465 466/* 467 * This is necessitated by the fact that blk_cleanup_queue does not 468 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt. 469 * Since our blk_init_queue() passes a spinlock common with ub_dev, 470 * we have life time issues when ub_cleanup frees ub_dev. 471 */ 472static spinlock_t *ub_next_lock(void) 473{ 474 unsigned long flags; 475 spinlock_t *ret; 476 477 spin_lock_irqsave(&ub_lock, flags); 478 ret = &ub_qlockv[ub_qlock_next]; 479 ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM; 480 spin_unlock_irqrestore(&ub_lock, flags); 481 return ret; 482} 483 484/* 485 * Downcount for deallocation. This rides on two assumptions: 486 * - once something is poisoned, its refcount cannot grow 487 * - opens cannot happen at this time (del_gendisk was done) 488 * If the above is true, we can drop the lock, which we need for 489 * blk_cleanup_queue(): the silly thing may attempt to sleep. 490 * [Actually, it never needs to sleep for us, but it calls might_sleep()] 491 */ 492static void ub_put(struct ub_dev *sc) 493{ 494 unsigned long flags; 495 496 spin_lock_irqsave(&ub_lock, flags); 497 --sc->openc; 498 if (sc->openc == 0 && atomic_read(&sc->poison)) { 499 spin_unlock_irqrestore(&ub_lock, flags); 500 ub_cleanup(sc); 501 } else { 502 spin_unlock_irqrestore(&ub_lock, flags); 503 } 504} 505 506/* 507 * Final cleanup and deallocation. 508 */ 509static void ub_cleanup(struct ub_dev *sc) 510{ 511 struct list_head *p; 512 struct ub_lun *lun; 513 request_queue_t *q; 514 515 while (!list_empty(&sc->luns)) { 516 p = sc->luns.next; 517 lun = list_entry(p, struct ub_lun, link); 518 list_del(p); 519 520 /* I don't think queue can be NULL. But... Stolen from sx8.c */ 521 if ((q = lun->disk->queue) != NULL) 522 blk_cleanup_queue(q); 523 /* 524 * If we zero disk->private_data BEFORE put_disk, we have 525 * to check for NULL all over the place in open, release, 526 * check_media and revalidate, because the block level 527 * semaphore is well inside the put_disk. 528 * But we cannot zero after the call, because *disk is gone. 529 * The sd.c is blatantly racy in this area. 530 */ 531 /* disk->private_data = NULL; */ 532 put_disk(lun->disk); 533 lun->disk = NULL; 534 535 ub_id_put(lun->id); 536 kfree(lun); 537 } 538 539 kfree(sc); 540} 541 542/* 543 * The "command allocator". 544 */ 545static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) 546{ 547 struct ub_scsi_cmd *ret; 548 549 if (lun->cmda[0]) 550 return NULL; 551 ret = &lun->cmdv[0]; 552 lun->cmda[0] = 1; 553 return ret; 554} 555 556static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) 557{ 558 if (cmd != &lun->cmdv[0]) { 559 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", 560 lun->name, cmd); 561 return; 562 } 563 if (!lun->cmda[0]) { 564 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); 565 return; 566 } 567 lun->cmda[0] = 0; 568} 569 570/* 571 * The command queue. 572 */ 573static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 574{ 575 struct ub_scsi_cmd_queue *t = &sc->cmd_queue; 576 577 if (t->qlen++ == 0) { 578 t->head = cmd; 579 t->tail = cmd; 580 } else { 581 t->tail->next = cmd; 582 t->tail = cmd; 583 } 584 585 if (t->qlen > t->qmax) 586 t->qmax = t->qlen; 587} 588 589static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 590{ 591 struct ub_scsi_cmd_queue *t = &sc->cmd_queue; 592 593 if (t->qlen++ == 0) { 594 t->head = cmd; 595 t->tail = cmd; 596 } else { 597 cmd->next = t->head; 598 t->head = cmd; 599 } 600 601 if (t->qlen > t->qmax) 602 t->qmax = t->qlen; 603} 604 605static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) 606{ 607 struct ub_scsi_cmd_queue *t = &sc->cmd_queue; 608 struct ub_scsi_cmd *cmd; 609 610 if (t->qlen == 0) 611 return NULL; 612 if (--t->qlen == 0) 613 t->tail = NULL; 614 cmd = t->head; 615 t->head = cmd->next; 616 cmd->next = NULL; 617 return cmd; 618} 619 620#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head) 621 622/* 623 * The request function is our main entry point 624 */ 625 626static void ub_request_fn(request_queue_t *q) 627{ 628 struct ub_lun *lun = q->queuedata; 629 struct request *rq; 630 631 while ((rq = elv_next_request(q)) != NULL) { 632 if (ub_request_fn_1(lun, rq) != 0) { 633 blk_stop_queue(q); 634 break; 635 } 636 } 637} 638 639static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) 640{ 641 struct ub_dev *sc = lun->udev; 642 struct ub_scsi_cmd *cmd; 643 struct ub_request *urq; 644 int n_elem; 645 646 if (atomic_read(&sc->poison) || lun->changed) { 647 blkdev_dequeue_request(rq); 648 ub_end_rq(rq, 0); 649 return 0; 650 } 651 652 if (lun->urq.rq != NULL) 653 return -1; 654 if ((cmd = ub_get_cmd(lun)) == NULL) 655 return -1; 656 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 657 658 blkdev_dequeue_request(rq); 659 660 urq = &lun->urq; 661 memset(urq, 0, sizeof(struct ub_request)); 662 urq->rq = rq; 663 664 /* 665 * get scatterlist from block layer 666 */ 667 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); 668 if (n_elem < 0) { 669 printk(KERN_INFO "%s: failed request map (%d)\n", 670 lun->name, n_elem); /* P3 */ 671 goto drop; 672 } 673 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ 674 printk(KERN_WARNING "%s: request with %d segments\n", 675 lun->name, n_elem); 676 goto drop; 677 } 678 urq->nsg = n_elem; 679 sc->sg_stat[n_elem < 5 ? n_elem : 5]++; 680 681 if (blk_pc_request(rq)) { 682 ub_cmd_build_packet(sc, lun, cmd, urq); 683 } else { 684 ub_cmd_build_block(sc, lun, cmd, urq); 685 } 686 cmd->state = UB_CMDST_INIT; 687 cmd->lun = lun; 688 cmd->done = ub_rw_cmd_done; 689 cmd->back = urq; 690 691 cmd->tag = sc->tagcnt++; 692 if (ub_submit_scsi(sc, cmd) != 0) 693 goto drop; 694 695 return 0; 696 697drop: 698 ub_put_cmd(lun, cmd); 699 ub_end_rq(rq, 0); 700 return 0; 701} 702 703static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 704 struct ub_scsi_cmd *cmd, struct ub_request *urq) 705{ 706 struct request *rq = urq->rq; 707 unsigned int block, nblks; 708 709 if (rq_data_dir(rq) == WRITE) 710 cmd->dir = UB_DIR_WRITE; 711 else 712 cmd->dir = UB_DIR_READ; 713 714 cmd->nsg = urq->nsg; 715 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); 716 717 /* 718 * build the command 719 * 720 * The call to blk_queue_hardsect_size() guarantees that request 721 * is aligned, but it is given in terms of 512 byte units, always. 722 */ 723 block = rq->sector >> lun->capacity.bshift; 724 nblks = rq->nr_sectors >> lun->capacity.bshift; 725 726 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; 727 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ 728 cmd->cdb[2] = block >> 24; 729 cmd->cdb[3] = block >> 16; 730 cmd->cdb[4] = block >> 8; 731 cmd->cdb[5] = block; 732 cmd->cdb[7] = nblks >> 8; 733 cmd->cdb[8] = nblks; 734 cmd->cdb_len = 10; 735 736 cmd->len = rq->nr_sectors * 512; 737} 738 739static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 740 struct ub_scsi_cmd *cmd, struct ub_request *urq) 741{ 742 struct request *rq = urq->rq; 743 744 if (rq->data_len == 0) { 745 cmd->dir = UB_DIR_NONE; 746 } else { 747 if (rq_data_dir(rq) == WRITE) 748 cmd->dir = UB_DIR_WRITE; 749 else 750 cmd->dir = UB_DIR_READ; 751 } 752 753 cmd->nsg = urq->nsg; 754 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); 755 756 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 757 cmd->cdb_len = rq->cmd_len; 758 759 cmd->len = rq->data_len; 760} 761 762static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 763{ 764 struct ub_lun *lun = cmd->lun; 765 struct ub_request *urq = cmd->back; 766 struct request *rq; 767 int uptodate; 768 769 rq = urq->rq; 770 771 if (cmd->error == 0) { 772 uptodate = 1; 773 774 if (blk_pc_request(rq)) { 775 if (cmd->act_len >= rq->data_len) 776 rq->data_len = 0; 777 else 778 rq->data_len -= cmd->act_len; 779 } 780 } else { 781 uptodate = 0; 782 783 if (blk_pc_request(rq)) { 784 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ 785 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); 786 rq->sense_len = UB_SENSE_SIZE; 787 if (sc->top_sense[0] != 0) 788 rq->errors = SAM_STAT_CHECK_CONDITION; 789 else 790 rq->errors = DID_ERROR << 16; 791 } else { 792 if (cmd->error == -EIO) { 793 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) 794 return; 795 } 796 } 797 } 798 799 urq->rq = NULL; 800 801 ub_put_cmd(lun, cmd); 802 ub_end_rq(rq, uptodate); 803 blk_start_queue(lun->disk->queue); 804} 805 806static void ub_end_rq(struct request *rq, int uptodate) 807{ 808 end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 809 end_that_request_last(rq, uptodate); 810} 811 812static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 813 struct ub_request *urq, struct ub_scsi_cmd *cmd) 814{ 815 816 if (atomic_read(&sc->poison)) 817 return -ENXIO; 818 819 ub_reset_enter(sc, urq->current_try); 820 821 if (urq->current_try >= 3) 822 return -EIO; 823 urq->current_try++; 824 /* P3 */ printk("%s: dir %c len/act %d/%d " 825 "[sense %x %02x %02x] retry %d\n", 826 sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len, 827 cmd->key, cmd->asc, cmd->ascq, urq->current_try); 828 829 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 830 ub_cmd_build_block(sc, lun, cmd, urq); 831 832 cmd->state = UB_CMDST_INIT; 833 cmd->lun = lun; 834 cmd->done = ub_rw_cmd_done; 835 cmd->back = urq; 836 837 cmd->tag = sc->tagcnt++; 838 839#if 0 /* Wasteful */ 840 return ub_submit_scsi(sc, cmd); 841#else 842 ub_cmdq_add(sc, cmd); 843 return 0; 844#endif 845} 846 847/* 848 * Submit a regular SCSI operation (not an auto-sense). 849 * 850 * The Iron Law of Good Submit Routine is: 851 * Zero return - callback is done, Nonzero return - callback is not done. 852 * No exceptions. 853 * 854 * Host is assumed locked. 855 */ 856static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 857{ 858 859 if (cmd->state != UB_CMDST_INIT || 860 (cmd->dir != UB_DIR_NONE && cmd->len == 0)) { 861 return -EINVAL; 862 } 863 864 ub_cmdq_add(sc, cmd); 865 /* 866 * We can call ub_scsi_dispatch(sc) right away here, but it's a little 867 * safer to jump to a tasklet, in case upper layers do something silly. 868 */ 869 tasklet_schedule(&sc->tasklet); 870 return 0; 871} 872 873/* 874 * Submit the first URB for the queued command. 875 * This function does not deal with queueing in any way. 876 */ 877static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 878{ 879 struct bulk_cb_wrap *bcb; 880 int rc; 881 882 bcb = &sc->work_bcb; 883 884 /* 885 * ``If the allocation length is eighteen or greater, and a device 886 * server returns less than eithteen bytes of data, the application 887 * client should assume that the bytes not transferred would have been 888 * zeroes had the device server returned those bytes.'' 889 * 890 * We zero sense for all commands so that when a packet request 891 * fails it does not return a stale sense. 892 */ 893 memset(&sc->top_sense, 0, UB_SENSE_SIZE); 894 895 /* set up the command wrapper */ 896 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 897 bcb->Tag = cmd->tag; /* Endianness is not important */ 898 bcb->DataTransferLength = cpu_to_le32(cmd->len); 899 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; 900 bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; 901 bcb->Length = cmd->cdb_len; 902 903 /* copy the command payload */ 904 memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE); 905 906 UB_INIT_COMPLETION(sc->work_done); 907 908 sc->last_pipe = sc->send_bulk_pipe; 909 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, 910 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); 911 912 /* Fill what we shouldn't be filling, because usb-storage did so. */ 913 sc->work_urb.actual_length = 0; 914 sc->work_urb.error_count = 0; 915 sc->work_urb.status = 0; 916 917 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 918 /* XXX Clear stalls */ 919 ub_complete(&sc->work_done); 920 return rc; 921 } 922 923 sc->work_timer.expires = jiffies + UB_URB_TIMEOUT; 924 add_timer(&sc->work_timer); 925 926 cmd->state = UB_CMDST_CMD; 927 return 0; 928} 929 930/* 931 * Timeout handler. 932 */ 933static void ub_urb_timeout(unsigned long arg) 934{ 935 struct ub_dev *sc = (struct ub_dev *) arg; 936 unsigned long flags; 937 938 spin_lock_irqsave(sc->lock, flags); 939 if (!ub_is_completed(&sc->work_done)) 940 usb_unlink_urb(&sc->work_urb); 941 spin_unlock_irqrestore(sc->lock, flags); 942} 943 944/* 945 * Completion routine for the work URB. 946 * 947 * This can be called directly from usb_submit_urb (while we have 948 * the sc->lock taken) and from an interrupt (while we do NOT have 949 * the sc->lock taken). Therefore, bounce this off to a tasklet. 950 */ 951static void ub_urb_complete(struct urb *urb, struct pt_regs *pt) 952{ 953 struct ub_dev *sc = urb->context; 954 955 ub_complete(&sc->work_done); 956 tasklet_schedule(&sc->tasklet); 957} 958 959static void ub_scsi_action(unsigned long _dev) 960{ 961 struct ub_dev *sc = (struct ub_dev *) _dev; 962 unsigned long flags; 963 964 spin_lock_irqsave(sc->lock, flags); 965 ub_scsi_dispatch(sc); 966 spin_unlock_irqrestore(sc->lock, flags); 967} 968 969static void ub_scsi_dispatch(struct ub_dev *sc) 970{ 971 struct ub_scsi_cmd *cmd; 972 int rc; 973 974 while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) { 975 if (cmd->state == UB_CMDST_DONE) { 976 ub_cmdq_pop(sc); 977 (*cmd->done)(sc, cmd); 978 } else if (cmd->state == UB_CMDST_INIT) { 979 if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0) 980 break; 981 cmd->error = rc; 982 cmd->state = UB_CMDST_DONE; 983 } else { 984 if (!ub_is_completed(&sc->work_done)) 985 break; 986 del_timer(&sc->work_timer); 987 ub_scsi_urb_compl(sc, cmd); 988 } 989 } 990} 991 992static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 993{ 994 struct urb *urb = &sc->work_urb; 995 struct bulk_cs_wrap *bcs; 996 int len; 997 int rc; 998 999 if (atomic_read(&sc->poison)) { 1000 ub_state_done(sc, cmd, -ENODEV); 1001 return; 1002 } 1003 1004 if (cmd->state == UB_CMDST_CLEAR) { 1005 if (urb->status == -EPIPE) { 1006 /* 1007 * STALL while clearning STALL. 1008 * The control pipe clears itself - nothing to do. 1009 */ 1010 printk(KERN_NOTICE "%s: stall on control pipe\n", 1011 sc->name); 1012 goto Bad_End; 1013 } 1014 1015 /* 1016 * We ignore the result for the halt clear. 1017 */ 1018 1019 /* reset the endpoint toggle */ 1020 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), 1021 usb_pipeout(sc->last_pipe), 0); 1022 1023 ub_state_sense(sc, cmd); 1024 1025 } else if (cmd->state == UB_CMDST_CLR2STS) { 1026 if (urb->status == -EPIPE) { 1027 printk(KERN_NOTICE "%s: stall on control pipe\n", 1028 sc->name); 1029 goto Bad_End; 1030 } 1031 1032 /* 1033 * We ignore the result for the halt clear. 1034 */ 1035 1036 /* reset the endpoint toggle */ 1037 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), 1038 usb_pipeout(sc->last_pipe), 0); 1039 1040 ub_state_stat(sc, cmd); 1041 1042 } else if (cmd->state == UB_CMDST_CLRRS) { 1043 if (urb->status == -EPIPE) { 1044 printk(KERN_NOTICE "%s: stall on control pipe\n", 1045 sc->name); 1046 goto Bad_End; 1047 } 1048 1049 /* 1050 * We ignore the result for the halt clear. 1051 */ 1052 1053 /* reset the endpoint toggle */ 1054 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), 1055 usb_pipeout(sc->last_pipe), 0); 1056 1057 ub_state_stat_counted(sc, cmd); 1058 1059 } else if (cmd->state == UB_CMDST_CMD) { 1060 switch (urb->status) { 1061 case 0: 1062 break; 1063 case -EOVERFLOW: 1064 goto Bad_End; 1065 case -EPIPE: 1066 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1067 if (rc != 0) { 1068 printk(KERN_NOTICE "%s: " 1069 "unable to submit clear (%d)\n", 1070 sc->name, rc); 1071 /* 1072 * This is typically ENOMEM or some other such shit. 1073 * Retrying is pointless. Just do Bad End on it... 1074 */ 1075 ub_state_done(sc, cmd, rc); 1076 return; 1077 } 1078 cmd->state = UB_CMDST_CLEAR; 1079 return; 1080 case -ESHUTDOWN: /* unplug */ 1081 case -EILSEQ: /* unplug timeout on uhci */ 1082 ub_state_done(sc, cmd, -ENODEV); 1083 return; 1084 default: 1085 goto Bad_End; 1086 } 1087 if (urb->actual_length != US_BULK_CB_WRAP_LEN) { 1088 goto Bad_End; 1089 } 1090 1091 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { 1092 ub_state_stat(sc, cmd); 1093 return; 1094 } 1095 1096 // udelay(125); // usb-storage has this 1097 ub_data_start(sc, cmd); 1098 1099 } else if (cmd->state == UB_CMDST_DATA) { 1100 if (urb->status == -EPIPE) { 1101 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1102 if (rc != 0) { 1103 printk(KERN_NOTICE "%s: " 1104 "unable to submit clear (%d)\n", 1105 sc->name, rc); 1106 ub_state_done(sc, cmd, rc); 1107 return; 1108 } 1109 cmd->state = UB_CMDST_CLR2STS; 1110 return; 1111 } 1112 if (urb->status == -EOVERFLOW) { 1113 /* 1114 * A babble? Failure, but we must transfer CSW now. 1115 */ 1116 cmd->error = -EOVERFLOW; /* A cheap trick... */ 1117 ub_state_stat(sc, cmd); 1118 return; 1119 } 1120 1121 if (cmd->dir == UB_DIR_WRITE) { 1122 /* 1123 * Do not continue writes in case of a failure. 1124 * Doing so would cause sectors to be mixed up, 1125 * which is worse than sectors lost. 1126 * 1127 * We must try to read the CSW, or many devices 1128 * get confused. 1129 */ 1130 len = urb->actual_length; 1131 if (urb->status != 0 || 1132 len != cmd->sgv[cmd->current_sg].length) { 1133 cmd->act_len += len; 1134 1135 cmd->error = -EIO; 1136 ub_state_stat(sc, cmd); 1137 return; 1138 } 1139 1140 } else { 1141 /* 1142 * If an error occurs on read, we record it, and 1143 * continue to fetch data in order to avoid bubble. 1144 * 1145 * As a small shortcut, we stop if we detect that 1146 * a CSW mixed into data. 1147 */ 1148 if (urb->status != 0) 1149 cmd->error = -EIO; 1150 1151 len = urb->actual_length; 1152 if (urb->status != 0 || 1153 len != cmd->sgv[cmd->current_sg].length) { 1154 if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN) 1155 goto Bad_End; 1156 } 1157 } 1158 1159 cmd->act_len += urb->actual_length; 1160 1161 if (++cmd->current_sg < cmd->nsg) { 1162 ub_data_start(sc, cmd); 1163 return; 1164 } 1165 ub_state_stat(sc, cmd); 1166 1167 } else if (cmd->state == UB_CMDST_STAT) { 1168 if (urb->status == -EPIPE) { 1169 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1170 if (rc != 0) { 1171 printk(KERN_NOTICE "%s: " 1172 "unable to submit clear (%d)\n", 1173 sc->name, rc); 1174 ub_state_done(sc, cmd, rc); 1175 return; 1176 } 1177 1178 /* 1179 * Having a stall when getting CSW is an error, so 1180 * make sure uppper levels are not oblivious to it. 1181 */ 1182 cmd->error = -EIO; /* A cheap trick... */ 1183 1184 cmd->state = UB_CMDST_CLRRS; 1185 return; 1186 } 1187 1188 /* Catch everything, including -EOVERFLOW and other nasties. */ 1189 if (urb->status != 0) 1190 goto Bad_End; 1191 1192 if (urb->actual_length == 0) { 1193 ub_state_stat_counted(sc, cmd); 1194 return; 1195 } 1196 1197 /* 1198 * Check the returned Bulk protocol status. 1199 * The status block has to be validated first. 1200 */ 1201 1202 bcs = &sc->work_bcs; 1203 1204 if (sc->signature == cpu_to_le32(0)) { 1205 /* 1206 * This is the first reply, so do not perform the check. 1207 * Instead, remember the signature the device uses 1208 * for future checks. But do not allow a nul. 1209 */ 1210 sc->signature = bcs->Signature; 1211 if (sc->signature == cpu_to_le32(0)) { 1212 ub_state_stat_counted(sc, cmd); 1213 return; 1214 } 1215 } else { 1216 if (bcs->Signature != sc->signature) { 1217 ub_state_stat_counted(sc, cmd); 1218 return; 1219 } 1220 } 1221 1222 if (bcs->Tag != cmd->tag) { 1223 /* 1224 * This usually happens when we disagree with the 1225 * device's microcode about something. For instance, 1226 * a few of them throw this after timeouts. They buffer 1227 * commands and reply at commands we timed out before. 1228 * Without flushing these replies we loop forever. 1229 */ 1230 ub_state_stat_counted(sc, cmd); 1231 return; 1232 } 1233 1234 len = le32_to_cpu(bcs->Residue); 1235 if (len != cmd->len - cmd->act_len) { 1236 /* 1237 * It is all right to transfer less, the caller has 1238 * to check. But it's not all right if the device 1239 * counts disagree with our counts. 1240 */ 1241 /* P3 */ printk("%s: resid %d len %d act %d\n", 1242 sc->name, len, cmd->len, cmd->act_len); 1243 goto Bad_End; 1244 } 1245 1246 switch (bcs->Status) { 1247 case US_BULK_STAT_OK: 1248 break; 1249 case US_BULK_STAT_FAIL: 1250 ub_state_sense(sc, cmd); 1251 return; 1252 case US_BULK_STAT_PHASE: 1253 /* P3 */ printk("%s: status PHASE\n", sc->name); 1254 goto Bad_End; 1255 default: 1256 printk(KERN_INFO "%s: unknown CSW status 0x%x\n", 1257 sc->name, bcs->Status); 1258 ub_state_done(sc, cmd, -EINVAL); 1259 return; 1260 } 1261 1262 /* Not zeroing error to preserve a babble indicator */ 1263 if (cmd->error != 0) { 1264 ub_state_sense(sc, cmd); 1265 return; 1266 } 1267 cmd->state = UB_CMDST_DONE; 1268 ub_cmdq_pop(sc); 1269 (*cmd->done)(sc, cmd); 1270 1271 } else if (cmd->state == UB_CMDST_SENSE) { 1272 ub_state_done(sc, cmd, -EIO); 1273 1274 } else { 1275 printk(KERN_WARNING "%s: " 1276 "wrong command state %d\n", 1277 sc->name, cmd->state); 1278 ub_state_done(sc, cmd, -EINVAL); 1279 return; 1280 } 1281 return; 1282 1283Bad_End: /* Little Excel is dead */ 1284 ub_state_done(sc, cmd, -EIO); 1285} 1286 1287/* 1288 * Factorization helper for the command state machine: 1289 * Initiate a data segment transfer. 1290 */ 1291static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1292{ 1293 struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; 1294 int pipe; 1295 int rc; 1296 1297 UB_INIT_COMPLETION(sc->work_done); 1298 1299 if (cmd->dir == UB_DIR_READ) 1300 pipe = sc->recv_bulk_pipe; 1301 else 1302 pipe = sc->send_bulk_pipe; 1303 sc->last_pipe = pipe; 1304 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, 1305 page_address(sg->page) + sg->offset, sg->length, 1306 ub_urb_complete, sc); 1307 sc->work_urb.actual_length = 0; 1308 sc->work_urb.error_count = 0; 1309 sc->work_urb.status = 0; 1310 1311 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1312 /* XXX Clear stalls */ 1313 ub_complete(&sc->work_done); 1314 ub_state_done(sc, cmd, rc); 1315 return; 1316 } 1317 1318 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; 1319 add_timer(&sc->work_timer); 1320 1321 cmd->state = UB_CMDST_DATA; 1322} 1323 1324/* 1325 * Factorization helper for the command state machine: 1326 * Finish the command. 1327 */ 1328static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) 1329{ 1330 1331 cmd->error = rc; 1332 cmd->state = UB_CMDST_DONE; 1333 ub_cmdq_pop(sc); 1334 (*cmd->done)(sc, cmd); 1335} 1336 1337/* 1338 * Factorization helper for the command state machine: 1339 * Submit a CSW read. 1340 */ 1341static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1342{ 1343 int rc; 1344 1345 UB_INIT_COMPLETION(sc->work_done); 1346 1347 sc->last_pipe = sc->recv_bulk_pipe; 1348 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, 1349 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); 1350 sc->work_urb.actual_length = 0; 1351 sc->work_urb.error_count = 0; 1352 sc->work_urb.status = 0; 1353 1354 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1355 /* XXX Clear stalls */ 1356 ub_complete(&sc->work_done); 1357 ub_state_done(sc, cmd, rc); 1358 return -1; 1359 } 1360 1361 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; 1362 add_timer(&sc->work_timer); 1363 return 0; 1364} 1365 1366/* 1367 * Factorization helper for the command state machine: 1368 * Submit a CSW read and go to STAT state. 1369 */ 1370static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1371{ 1372 1373 if (__ub_state_stat(sc, cmd) != 0) 1374 return; 1375 1376 cmd->stat_count = 0; 1377 cmd->state = UB_CMDST_STAT; 1378} 1379 1380/* 1381 * Factorization helper for the command state machine: 1382 * Submit a CSW read and go to STAT state with counter (along [C] path). 1383 */ 1384static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1385{ 1386 1387 if (++cmd->stat_count >= 4) { 1388 ub_state_sense(sc, cmd); 1389 return; 1390 } 1391 1392 if (__ub_state_stat(sc, cmd) != 0) 1393 return; 1394 1395 cmd->state = UB_CMDST_STAT; 1396} 1397 1398/* 1399 * Factorization helper for the command state machine: 1400 * Submit a REQUEST SENSE and go to SENSE state. 1401 */ 1402static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1403{ 1404 struct ub_scsi_cmd *scmd; 1405 struct scatterlist *sg; 1406 int rc; 1407 1408 if (cmd->cdb[0] == REQUEST_SENSE) { 1409 rc = -EPIPE; 1410 goto error; 1411 } 1412 1413 scmd = &sc->top_rqs_cmd; 1414 memset(scmd, 0, sizeof(struct ub_scsi_cmd)); 1415 scmd->cdb[0] = REQUEST_SENSE; 1416 scmd->cdb[4] = UB_SENSE_SIZE; 1417 scmd->cdb_len = 6; 1418 scmd->dir = UB_DIR_READ; 1419 scmd->state = UB_CMDST_INIT; 1420 scmd->nsg = 1; 1421 sg = &scmd->sgv[0]; 1422 sg->page = virt_to_page(sc->top_sense); 1423 sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1); 1424 sg->length = UB_SENSE_SIZE; 1425 scmd->len = UB_SENSE_SIZE; 1426 scmd->lun = cmd->lun; 1427 scmd->done = ub_top_sense_done; 1428 scmd->back = cmd; 1429 1430 scmd->tag = sc->tagcnt++; 1431 1432 cmd->state = UB_CMDST_SENSE; 1433 1434 ub_cmdq_insert(sc, scmd); 1435 return; 1436 1437error: 1438 ub_state_done(sc, cmd, rc); 1439} 1440 1441/* 1442 * A helper for the command's state machine: 1443 * Submit a stall clear. 1444 */ 1445static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 1446 int stalled_pipe) 1447{ 1448 int endp; 1449 struct usb_ctrlrequest *cr; 1450 int rc; 1451 1452 endp = usb_pipeendpoint(stalled_pipe); 1453 if (usb_pipein (stalled_pipe)) 1454 endp |= USB_DIR_IN; 1455 1456 cr = &sc->work_cr; 1457 cr->bRequestType = USB_RECIP_ENDPOINT; 1458 cr->bRequest = USB_REQ_CLEAR_FEATURE; 1459 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); 1460 cr->wIndex = cpu_to_le16(endp); 1461 cr->wLength = cpu_to_le16(0); 1462 1463 UB_INIT_COMPLETION(sc->work_done); 1464 1465 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 1466 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); 1467 sc->work_urb.actual_length = 0; 1468 sc->work_urb.error_count = 0; 1469 sc->work_urb.status = 0; 1470 1471 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1472 ub_complete(&sc->work_done); 1473 return rc; 1474 } 1475 1476 sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT; 1477 add_timer(&sc->work_timer); 1478 return 0; 1479} 1480 1481/* 1482 */ 1483static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) 1484{ 1485 unsigned char *sense = sc->top_sense; 1486 struct ub_scsi_cmd *cmd; 1487 1488 /* 1489 * Find the command which triggered the unit attention or a check, 1490 * save the sense into it, and advance its state machine. 1491 */ 1492 if ((cmd = ub_cmdq_peek(sc)) == NULL) { 1493 printk(KERN_WARNING "%s: sense done while idle\n", sc->name); 1494 return; 1495 } 1496 if (cmd != scmd->back) { 1497 printk(KERN_WARNING "%s: " 1498 "sense done for wrong command 0x%x\n", 1499 sc->name, cmd->tag); 1500 return; 1501 } 1502 if (cmd->state != UB_CMDST_SENSE) { 1503 printk(KERN_WARNING "%s: " 1504 "sense done with bad cmd state %d\n", 1505 sc->name, cmd->state); 1506 return; 1507 } 1508 1509 /* 1510 * Ignoring scmd->act_len, because the buffer was pre-zeroed. 1511 */ 1512 cmd->key = sense[2] & 0x0F; 1513 cmd->asc = sense[12]; 1514 cmd->ascq = sense[13]; 1515 1516 ub_scsi_urb_compl(sc, cmd); 1517} 1518 1519/* 1520 * Reset management 1521 * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing. 1522 * XXX Make usb_sync_reset asynchronous. 1523 */ 1524 1525static void ub_reset_enter(struct ub_dev *sc, int try) 1526{ 1527 1528 if (sc->reset) { 1529 /* This happens often on multi-LUN devices. */ 1530 return; 1531 } 1532 sc->reset = try + 1; 1533 1534#if 0 /* Not needed because the disconnect waits for us. */ 1535 unsigned long flags; 1536 spin_lock_irqsave(&ub_lock, flags); 1537 sc->openc++; 1538 spin_unlock_irqrestore(&ub_lock, flags); 1539#endif 1540 1541#if 0 /* We let them stop themselves. */ 1542 struct list_head *p; 1543 struct ub_lun *lun; 1544 list_for_each(p, &sc->luns) { 1545 lun = list_entry(p, struct ub_lun, link); 1546 blk_stop_queue(lun->disk->queue); 1547 } 1548#endif 1549 1550 schedule_work(&sc->reset_work); 1551} 1552 1553static void ub_reset_task(void *arg) 1554{ 1555 struct ub_dev *sc = arg; 1556 unsigned long flags; 1557 struct list_head *p; 1558 struct ub_lun *lun; 1559 int lkr, rc; 1560 1561 if (!sc->reset) { 1562 printk(KERN_WARNING "%s: Running reset unrequested\n", 1563 sc->name); 1564 return; 1565 } 1566 1567 if (atomic_read(&sc->poison)) { 1568 printk(KERN_NOTICE "%s: Not resetting disconnected device\n", 1569 sc->name); /* P3 This floods. Remove soon. XXX */ 1570 } else if ((sc->reset & 1) == 0) { 1571 ub_sync_reset(sc); 1572 msleep(700); /* usb-storage sleeps 6s (!) */ 1573 ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 1574 ub_probe_clear_stall(sc, sc->send_bulk_pipe); 1575 } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { 1576 printk(KERN_NOTICE "%s: Not resetting multi-interface device\n", 1577 sc->name); /* P3 This floods. Remove soon. XXX */ 1578 } else { 1579 if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) { 1580 printk(KERN_NOTICE 1581 "%s: usb_lock_device_for_reset failed (%d)\n", 1582 sc->name, lkr); 1583 } else { 1584 rc = usb_reset_device(sc->dev); 1585 if (rc < 0) { 1586 printk(KERN_NOTICE "%s: " 1587 "usb_lock_device_for_reset failed (%d)\n", 1588 sc->name, rc); 1589 } 1590 1591 if (lkr) 1592 usb_unlock_device(sc->dev); 1593 } 1594 } 1595 1596 /* 1597 * In theory, no commands can be running while reset is active, 1598 * so nobody can ask for another reset, and so we do not need any 1599 * queues of resets or anything. We do need a spinlock though, 1600 * to interact with block layer. 1601 */ 1602 spin_lock_irqsave(sc->lock, flags); 1603 sc->reset = 0; 1604 tasklet_schedule(&sc->tasklet); 1605 list_for_each(p, &sc->luns) { 1606 lun = list_entry(p, struct ub_lun, link); 1607 blk_start_queue(lun->disk->queue); 1608 } 1609 wake_up(&sc->reset_wait); 1610 spin_unlock_irqrestore(sc->lock, flags); 1611} 1612 1613/* 1614 * This is called from a process context. 1615 */ 1616static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) 1617{ 1618 1619 lun->readonly = 0; /* XXX Query this from the device */ 1620 1621 lun->capacity.nsec = 0; 1622 lun->capacity.bsize = 512; 1623 lun->capacity.bshift = 0; 1624 1625 if (ub_sync_tur(sc, lun) != 0) 1626 return; /* Not ready */ 1627 lun->changed = 0; 1628 1629 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { 1630 /* 1631 * The retry here means something is wrong, either with the 1632 * device, with the transport, or with our code. 1633 * We keep this because sd.c has retries for capacity. 1634 */ 1635 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { 1636 lun->capacity.nsec = 0; 1637 lun->capacity.bsize = 512; 1638 lun->capacity.bshift = 0; 1639 } 1640 } 1641} 1642 1643/* 1644 * The open funcion. 1645 * This is mostly needed to keep refcounting, but also to support 1646 * media checks on removable media drives. 1647 */ 1648static int ub_bd_open(struct inode *inode, struct file *filp) 1649{ 1650 struct gendisk *disk = inode->i_bdev->bd_disk; 1651 struct ub_lun *lun; 1652 struct ub_dev *sc; 1653 unsigned long flags; 1654 int rc; 1655 1656 if ((lun = disk->private_data) == NULL) 1657 return -ENXIO; 1658 sc = lun->udev; 1659 1660 spin_lock_irqsave(&ub_lock, flags); 1661 if (atomic_read(&sc->poison)) { 1662 spin_unlock_irqrestore(&ub_lock, flags); 1663 return -ENXIO; 1664 } 1665 sc->openc++; 1666 spin_unlock_irqrestore(&ub_lock, flags); 1667 1668 if (lun->removable || lun->readonly) 1669 check_disk_change(inode->i_bdev); 1670 1671 /* 1672 * The sd.c considers ->media_present and ->changed not equivalent, 1673 * under some pretty murky conditions (a failure of READ CAPACITY). 1674 * We may need it one day. 1675 */ 1676 if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { 1677 rc = -ENOMEDIUM; 1678 goto err_open; 1679 } 1680 1681 if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { 1682 rc = -EROFS; 1683 goto err_open; 1684 } 1685 1686 return 0; 1687 1688err_open: 1689 ub_put(sc); 1690 return rc; 1691} 1692 1693/* 1694 */ 1695static int ub_bd_release(struct inode *inode, struct file *filp) 1696{ 1697 struct gendisk *disk = inode->i_bdev->bd_disk; 1698 struct ub_lun *lun = disk->private_data; 1699 struct ub_dev *sc = lun->udev; 1700 1701 ub_put(sc); 1702 return 0; 1703} 1704 1705/* 1706 * The ioctl interface. 1707 */ 1708static int ub_bd_ioctl(struct inode *inode, struct file *filp, 1709 unsigned int cmd, unsigned long arg) 1710{ 1711 struct gendisk *disk = inode->i_bdev->bd_disk; 1712 void __user *usermem = (void __user *) arg; 1713 1714 return scsi_cmd_ioctl(filp, disk, cmd, usermem); 1715} 1716 1717/* 1718 * This is called once a new disk was seen by the block layer or by ub_probe(). 1719 * The main onjective here is to discover the features of the media such as 1720 * the capacity, read-only status, etc. USB storage generally does not 1721 * need to be spun up, but if we needed it, this would be the place. 1722 * 1723 * This call can sleep. 1724 * 1725 * The return code is not used. 1726 */ 1727static int ub_bd_revalidate(struct gendisk *disk) 1728{ 1729 struct ub_lun *lun = disk->private_data; 1730 1731 ub_revalidate(lun->udev, lun); 1732 1733 /* XXX Support sector size switching like in sr.c */ 1734 blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); 1735 set_capacity(disk, lun->capacity.nsec); 1736 // set_disk_ro(sdkp->disk, lun->readonly); 1737 1738 return 0; 1739} 1740 1741/* 1742 * The check is called by the block layer to verify if the media 1743 * is still available. It is supposed to be harmless, lightweight and 1744 * non-intrusive in case the media was not changed. 1745 * 1746 * This call can sleep. 1747 * 1748 * The return code is bool! 1749 */ 1750static int ub_bd_media_changed(struct gendisk *disk) 1751{ 1752 struct ub_lun *lun = disk->private_data; 1753 1754 if (!lun->removable) 1755 return 0; 1756 1757 /* 1758 * We clean checks always after every command, so this is not 1759 * as dangerous as it looks. If the TEST_UNIT_READY fails here, 1760 * the device is actually not ready with operator or software 1761 * intervention required. One dangerous item might be a drive which 1762 * spins itself down, and come the time to write dirty pages, this 1763 * will fail, then block layer discards the data. Since we never 1764 * spin drives up, such devices simply cannot be used with ub anyway. 1765 */ 1766 if (ub_sync_tur(lun->udev, lun) != 0) { 1767 lun->changed = 1; 1768 return 1; 1769 } 1770 1771 return lun->changed; 1772} 1773 1774static struct block_device_operations ub_bd_fops = { 1775 .owner = THIS_MODULE, 1776 .open = ub_bd_open, 1777 .release = ub_bd_release, 1778 .ioctl = ub_bd_ioctl, 1779 .media_changed = ub_bd_media_changed, 1780 .revalidate_disk = ub_bd_revalidate, 1781}; 1782 1783/* 1784 * Common ->done routine for commands executed synchronously. 1785 */ 1786static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1787{ 1788 struct completion *cop = cmd->back; 1789 complete(cop); 1790} 1791 1792/* 1793 * Test if the device has a check condition on it, synchronously. 1794 */ 1795static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) 1796{ 1797 struct ub_scsi_cmd *cmd; 1798 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; 1799 unsigned long flags; 1800 struct completion compl; 1801 int rc; 1802 1803 init_completion(&compl); 1804 1805 rc = -ENOMEM; 1806 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 1807 goto err_alloc; 1808 1809 cmd->cdb[0] = TEST_UNIT_READY; 1810 cmd->cdb_len = 6; 1811 cmd->dir = UB_DIR_NONE; 1812 cmd->state = UB_CMDST_INIT; 1813 cmd->lun = lun; /* This may be NULL, but that's ok */ 1814 cmd->done = ub_probe_done; 1815 cmd->back = &compl; 1816 1817 spin_lock_irqsave(sc->lock, flags); 1818 cmd->tag = sc->tagcnt++; 1819 1820 rc = ub_submit_scsi(sc, cmd); 1821 spin_unlock_irqrestore(sc->lock, flags); 1822 1823 if (rc != 0) { 1824 printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */ 1825 goto err_submit; 1826 } 1827 1828 wait_for_completion(&compl); 1829 1830 rc = cmd->error; 1831 1832 if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */ 1833 rc = cmd->key; 1834 1835err_submit: 1836 kfree(cmd); 1837err_alloc: 1838 return rc; 1839} 1840 1841/* 1842 * Read the SCSI capacity synchronously (for probing). 1843 */ 1844static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 1845 struct ub_capacity *ret) 1846{ 1847 struct ub_scsi_cmd *cmd; 1848 struct scatterlist *sg; 1849 char *p; 1850 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; 1851 unsigned long flags; 1852 unsigned int bsize, shift; 1853 unsigned long nsec; 1854 struct completion compl; 1855 int rc; 1856 1857 init_completion(&compl); 1858 1859 rc = -ENOMEM; 1860 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 1861 goto err_alloc; 1862 p = (char *)cmd + sizeof(struct ub_scsi_cmd); 1863 1864 cmd->cdb[0] = 0x25; 1865 cmd->cdb_len = 10; 1866 cmd->dir = UB_DIR_READ; 1867 cmd->state = UB_CMDST_INIT; 1868 cmd->nsg = 1; 1869 sg = &cmd->sgv[0]; 1870 sg->page = virt_to_page(p); 1871 sg->offset = (unsigned long)p & (PAGE_SIZE-1); 1872 sg->length = 8; 1873 cmd->len = 8; 1874 cmd->lun = lun; 1875 cmd->done = ub_probe_done; 1876 cmd->back = &compl; 1877 1878 spin_lock_irqsave(sc->lock, flags); 1879 cmd->tag = sc->tagcnt++; 1880 1881 rc = ub_submit_scsi(sc, cmd); 1882 spin_unlock_irqrestore(sc->lock, flags); 1883 1884 if (rc != 0) { 1885 printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */ 1886 goto err_submit; 1887 } 1888 1889 wait_for_completion(&compl); 1890 1891 if (cmd->error != 0) { 1892 printk("ub: reading capacity: error %d\n", cmd->error); /* P3 */ 1893 rc = -EIO; 1894 goto err_read; 1895 } 1896 if (cmd->act_len != 8) { 1897 printk("ub: reading capacity: size %d\n", cmd->act_len); /* P3 */ 1898 rc = -EIO; 1899 goto err_read; 1900 } 1901 1902 /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */ 1903 nsec = be32_to_cpu(*(__be32 *)p) + 1; 1904 bsize = be32_to_cpu(*(__be32 *)(p + 4)); 1905 switch (bsize) { 1906 case 512: shift = 0; break; 1907 case 1024: shift = 1; break; 1908 case 2048: shift = 2; break; 1909 case 4096: shift = 3; break; 1910 default: 1911 printk("ub: Bad sector size %u\n", bsize); /* P3 */ 1912 rc = -EDOM; 1913 goto err_inv_bsize; 1914 } 1915 1916 ret->bsize = bsize; 1917 ret->bshift = shift; 1918 ret->nsec = nsec << shift; 1919 rc = 0; 1920 1921err_inv_bsize: 1922err_read: 1923err_submit: 1924 kfree(cmd); 1925err_alloc: 1926 return rc; 1927} 1928 1929/* 1930 */ 1931static void ub_probe_urb_complete(struct urb *urb, struct pt_regs *pt) 1932{ 1933 struct completion *cop = urb->context; 1934 complete(cop); 1935} 1936 1937static void ub_probe_timeout(unsigned long arg) 1938{ 1939 struct completion *cop = (struct completion *) arg; 1940 complete(cop); 1941} 1942 1943/* 1944 * Reset with a Bulk reset. 1945 */ 1946static int ub_sync_reset(struct ub_dev *sc) 1947{ 1948 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; 1949 struct usb_ctrlrequest *cr; 1950 struct completion compl; 1951 struct timer_list timer; 1952 int rc; 1953 1954 init_completion(&compl); 1955 1956 cr = &sc->work_cr; 1957 cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE; 1958 cr->bRequest = US_BULK_RESET_REQUEST; 1959 cr->wValue = cpu_to_le16(0); 1960 cr->wIndex = cpu_to_le16(ifnum); 1961 cr->wLength = cpu_to_le16(0); 1962 1963 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 1964 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); 1965 sc->work_urb.actual_length = 0; 1966 sc->work_urb.error_count = 0; 1967 sc->work_urb.status = 0; 1968 1969 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 1970 printk(KERN_WARNING 1971 "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc); 1972 return rc; 1973 } 1974 1975 init_timer(&timer); 1976 timer.function = ub_probe_timeout; 1977 timer.data = (unsigned long) &compl; 1978 timer.expires = jiffies + UB_CTRL_TIMEOUT; 1979 add_timer(&timer); 1980 1981 wait_for_completion(&compl); 1982 1983 del_timer_sync(&timer); 1984 usb_kill_urb(&sc->work_urb); 1985 1986 return sc->work_urb.status; 1987} 1988 1989/* 1990 * Get number of LUNs by the way of Bulk GetMaxLUN command. 1991 */ 1992static int ub_sync_getmaxlun(struct ub_dev *sc) 1993{ 1994 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; 1995 unsigned char *p; 1996 enum { ALLOC_SIZE = 1 }; 1997 struct usb_ctrlrequest *cr; 1998 struct completion compl; 1999 struct timer_list timer; 2000 int nluns; 2001 int rc; 2002 2003 init_completion(&compl); 2004 2005 rc = -ENOMEM; 2006 if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 2007 goto err_alloc; 2008 *p = 55; 2009 2010 cr = &sc->work_cr; 2011 cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; 2012 cr->bRequest = US_BULK_GET_MAX_LUN; 2013 cr->wValue = cpu_to_le16(0); 2014 cr->wIndex = cpu_to_le16(ifnum); 2015 cr->wLength = cpu_to_le16(1); 2016 2017 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, 2018 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); 2019 sc->work_urb.actual_length = 0; 2020 sc->work_urb.error_count = 0; 2021 sc->work_urb.status = 0; 2022 2023 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 2024 if (rc == -EPIPE) { 2025 printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n", 2026 sc->name); /* P3 */ 2027 } else { 2028 printk(KERN_NOTICE 2029 "%s: Unable to submit GetMaxLUN (%d)\n", 2030 sc->name, rc); 2031 } 2032 goto err_submit; 2033 } 2034 2035 init_timer(&timer); 2036 timer.function = ub_probe_timeout; 2037 timer.data = (unsigned long) &compl; 2038 timer.expires = jiffies + UB_CTRL_TIMEOUT; 2039 add_timer(&timer); 2040 2041 wait_for_completion(&compl); 2042 2043 del_timer_sync(&timer); 2044 usb_kill_urb(&sc->work_urb); 2045 2046 if ((rc = sc->work_urb.status) < 0) { 2047 if (rc == -EPIPE) { 2048 printk("%s: Stall at GetMaxLUN, using 1 LUN\n", 2049 sc->name); /* P3 */ 2050 } else { 2051 printk(KERN_NOTICE 2052 "%s: Error at GetMaxLUN (%d)\n", 2053 sc->name, rc); 2054 } 2055 goto err_io; 2056 } 2057 2058 if (sc->work_urb.actual_length != 1) { 2059 printk("%s: GetMaxLUN returned %d bytes\n", sc->name, 2060 sc->work_urb.actual_length); /* P3 */ 2061 nluns = 0; 2062 } else { 2063 if ((nluns = *p) == 55) { 2064 nluns = 0; 2065 } else { 2066 /* GetMaxLUN returns the maximum LUN number */ 2067 nluns += 1; 2068 if (nluns > UB_MAX_LUNS) 2069 nluns = UB_MAX_LUNS; 2070 } 2071 printk("%s: GetMaxLUN returned %d, using %d LUNs\n", sc->name, 2072 *p, nluns); /* P3 */ 2073 } 2074 2075 kfree(p); 2076 return nluns; 2077 2078err_io: 2079err_submit: 2080 kfree(p); 2081err_alloc: 2082 return rc; 2083} 2084 2085/* 2086 * Clear initial stalls. 2087 */ 2088static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) 2089{ 2090 int endp; 2091 struct usb_ctrlrequest *cr; 2092 struct completion compl; 2093 struct timer_list timer; 2094 int rc; 2095 2096 init_completion(&compl); 2097 2098 endp = usb_pipeendpoint(stalled_pipe); 2099 if (usb_pipein (stalled_pipe)) 2100 endp |= USB_DIR_IN; 2101 2102 cr = &sc->work_cr; 2103 cr->bRequestType = USB_RECIP_ENDPOINT; 2104 cr->bRequest = USB_REQ_CLEAR_FEATURE; 2105 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); 2106 cr->wIndex = cpu_to_le16(endp); 2107 cr->wLength = cpu_to_le16(0); 2108 2109 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 2110 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); 2111 sc->work_urb.actual_length = 0; 2112 sc->work_urb.error_count = 0; 2113 sc->work_urb.status = 0; 2114 2115 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 2116 printk(KERN_WARNING 2117 "%s: Unable to submit a probe clear (%d)\n", sc->name, rc); 2118 return rc; 2119 } 2120 2121 init_timer(&timer); 2122 timer.function = ub_probe_timeout; 2123 timer.data = (unsigned long) &compl; 2124 timer.expires = jiffies + UB_CTRL_TIMEOUT; 2125 add_timer(&timer); 2126 2127 wait_for_completion(&compl); 2128 2129 del_timer_sync(&timer); 2130 usb_kill_urb(&sc->work_urb); 2131 2132 /* reset the endpoint toggle */ 2133 usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0); 2134 2135 return 0; 2136} 2137 2138/* 2139 * Get the pipe settings. 2140 */ 2141static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, 2142 struct usb_interface *intf) 2143{ 2144 struct usb_host_interface *altsetting = intf->cur_altsetting; 2145 struct usb_endpoint_descriptor *ep_in = NULL; 2146 struct usb_endpoint_descriptor *ep_out = NULL; 2147 struct usb_endpoint_descriptor *ep; 2148 int i; 2149 2150 /* 2151 * Find the endpoints we need. 2152 * We are expecting a minimum of 2 endpoints - in and out (bulk). 2153 * We will ignore any others. 2154 */ 2155 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { 2156 ep = &altsetting->endpoint[i].desc; 2157 2158 /* Is it a BULK endpoint? */ 2159 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 2160 == USB_ENDPOINT_XFER_BULK) { 2161 /* BULK in or out? */ 2162 if (ep->bEndpointAddress & USB_DIR_IN) 2163 ep_in = ep; 2164 else 2165 ep_out = ep; 2166 } 2167 } 2168 2169 if (ep_in == NULL || ep_out == NULL) { 2170 printk(KERN_NOTICE "%s: failed endpoint check\n", 2171 sc->name); 2172 return -ENODEV; 2173 } 2174 2175 /* Calculate and store the pipe values */ 2176 sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0); 2177 sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0); 2178 sc->send_bulk_pipe = usb_sndbulkpipe(dev, 2179 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 2180 sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 2181 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 2182 2183 return 0; 2184} 2185 2186/* 2187 * Probing is done in the process context, which allows us to cheat 2188 * and not to build a state machine for the discovery. 2189 */ 2190static int ub_probe(struct usb_interface *intf, 2191 const struct usb_device_id *dev_id) 2192{ 2193 struct ub_dev *sc; 2194 int nluns; 2195 int rc; 2196 int i; 2197 2198 if (usb_usual_check_type(dev_id, USB_US_TYPE_UB)) 2199 return -ENXIO; 2200 2201 rc = -ENOMEM; 2202 if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) 2203 goto err_core; 2204 sc->lock = ub_next_lock(); 2205 INIT_LIST_HEAD(&sc->luns); 2206 usb_init_urb(&sc->work_urb); 2207 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 2208 atomic_set(&sc->poison, 0); 2209 INIT_WORK(&sc->reset_work, ub_reset_task, sc); 2210 init_waitqueue_head(&sc->reset_wait); 2211 2212 init_timer(&sc->work_timer); 2213 sc->work_timer.data = (unsigned long) sc; 2214 sc->work_timer.function = ub_urb_timeout; 2215 2216 ub_init_completion(&sc->work_done); 2217 sc->work_done.done = 1; /* A little yuk, but oh well... */ 2218 2219 sc->dev = interface_to_usbdev(intf); 2220 sc->intf = intf; 2221 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; 2222 usb_set_intfdata(intf, sc); 2223 usb_get_dev(sc->dev); 2224 // usb_get_intf(sc->intf); /* Do we need this? */ 2225 2226 snprintf(sc->name, 12, DRV_NAME "(%d.%d)", 2227 sc->dev->bus->busnum, sc->dev->devnum); 2228 2229 /* XXX Verify that we can handle the device (from descriptors) */ 2230 2231 if (ub_get_pipes(sc, sc->dev, intf) != 0) 2232 goto err_dev_desc; 2233 2234 /* 2235 * At this point, all USB initialization is done, do upper layer. 2236 * We really hate halfway initialized structures, so from the 2237 * invariants perspective, this ub_dev is fully constructed at 2238 * this point. 2239 */ 2240 2241 /* 2242 * This is needed to clear toggles. It is a problem only if we do 2243 * `rmmod ub && modprobe ub` without disconnects, but we like that. 2244 */ 2245#if 0 /* iPod Mini fails if we do this (big white iPod works) */ 2246 ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2247 ub_probe_clear_stall(sc, sc->send_bulk_pipe); 2248#endif 2249 2250 /* 2251 * The way this is used by the startup code is a little specific. 2252 * A SCSI check causes a USB stall. Our common case code sees it 2253 * and clears the check, after which the device is ready for use. 2254 * But if a check was not present, any command other than 2255 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE). 2256 * 2257 * If we neglect to clear the SCSI check, the first real command fails 2258 * (which is the capacity readout). We clear that and retry, but why 2259 * causing spurious retries for no reason. 2260 * 2261 * Revalidation may start with its own TEST_UNIT_READY, but that one 2262 * has to succeed, so we clear checks with an additional one here. 2263 * In any case it's not our business how revaliadation is implemented. 2264 */ 2265 for (i = 0; i < 3; i++) { /* Retries for benh's key */ 2266 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; 2267 if (rc != 0x6) break; 2268 msleep(10); 2269 } 2270 2271 nluns = 1; 2272 for (i = 0; i < 3; i++) { 2273 if ((rc = ub_sync_getmaxlun(sc)) < 0) 2274 break; 2275 if (rc != 0) { 2276 nluns = rc; 2277 break; 2278 } 2279 msleep(100); 2280 } 2281 2282 for (i = 0; i < nluns; i++) { 2283 ub_probe_lun(sc, i); 2284 } 2285 return 0; 2286 2287err_dev_desc: 2288 usb_set_intfdata(intf, NULL); 2289 // usb_put_intf(sc->intf); 2290 usb_put_dev(sc->dev); 2291 kfree(sc); 2292err_core: 2293 return rc; 2294} 2295 2296static int ub_probe_lun(struct ub_dev *sc, int lnum) 2297{ 2298 struct ub_lun *lun; 2299 request_queue_t *q; 2300 struct gendisk *disk; 2301 int rc; 2302 2303 rc = -ENOMEM; 2304 if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) 2305 goto err_alloc; 2306 lun->num = lnum; 2307 2308 rc = -ENOSR; 2309 if ((lun->id = ub_id_get()) == -1) 2310 goto err_id; 2311 2312 lun->udev = sc; 2313 list_add(&lun->link, &sc->luns); 2314 2315 snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", 2316 lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); 2317 2318 lun->removable = 1; /* XXX Query this from the device */ 2319 lun->changed = 1; /* ub_revalidate clears only */ 2320 ub_revalidate(sc, lun); 2321 2322 rc = -ENOMEM; 2323 if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL) 2324 goto err_diskalloc; 2325 2326 lun->disk = disk; 2327 sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); 2328 sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a'); 2329 disk->major = UB_MAJOR; 2330 disk->first_minor = lun->id * UB_PARTS_PER_LUN; 2331 disk->fops = &ub_bd_fops; 2332 disk->private_data = lun; 2333 disk->driverfs_dev = &sc->intf->dev; 2334 2335 rc = -ENOMEM; 2336 if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL) 2337 goto err_blkqinit; 2338 2339 disk->queue = q; 2340 2341 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 2342 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); 2343 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2344 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2345 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2346 blk_queue_hardsect_size(q, lun->capacity.bsize); 2347 2348 q->queuedata = lun; 2349 2350 set_capacity(disk, lun->capacity.nsec); 2351 if (lun->removable) 2352 disk->flags |= GENHD_FL_REMOVABLE; 2353 2354 add_disk(disk); 2355 2356 return 0; 2357 2358err_blkqinit: 2359 put_disk(disk); 2360err_diskalloc: 2361 list_del(&lun->link); 2362 ub_id_put(lun->id); 2363err_id: 2364 kfree(lun); 2365err_alloc: 2366 return rc; 2367} 2368 2369static void ub_disconnect(struct usb_interface *intf) 2370{ 2371 struct ub_dev *sc = usb_get_intfdata(intf); 2372 struct list_head *p; 2373 struct ub_lun *lun; 2374 struct gendisk *disk; 2375 unsigned long flags; 2376 2377 /* 2378 * Prevent ub_bd_release from pulling the rug from under us. 2379 * XXX This is starting to look like a kref. 2380 * XXX Why not to take this ref at probe time? 2381 */ 2382 spin_lock_irqsave(&ub_lock, flags); 2383 sc->openc++; 2384 spin_unlock_irqrestore(&ub_lock, flags); 2385 2386 /* 2387 * Fence stall clearnings, operations triggered by unlinkings and so on. 2388 * We do not attempt to unlink any URBs, because we do not trust the 2389 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway. 2390 */ 2391 atomic_set(&sc->poison, 1); 2392 2393 /* 2394 * Wait for reset to end, if any. 2395 */ 2396 wait_event(sc->reset_wait, !sc->reset); 2397 2398 /* 2399 * Blow away queued commands. 2400 * 2401 * Actually, this never works, because before we get here 2402 * the HCD terminates outstanding URB(s). It causes our 2403 * SCSI command queue to advance, commands fail to submit, 2404 * and the whole queue drains. So, we just use this code to 2405 * print warnings. 2406 */ 2407 spin_lock_irqsave(sc->lock, flags); 2408 { 2409 struct ub_scsi_cmd *cmd; 2410 int cnt = 0; 2411 while ((cmd = ub_cmdq_peek(sc)) != NULL) { 2412 cmd->error = -ENOTCONN; 2413 cmd->state = UB_CMDST_DONE; 2414 ub_cmdq_pop(sc); 2415 (*cmd->done)(sc, cmd); 2416 cnt++; 2417 } 2418 if (cnt != 0) { 2419 printk(KERN_WARNING "%s: " 2420 "%d was queued after shutdown\n", sc->name, cnt); 2421 } 2422 } 2423 spin_unlock_irqrestore(sc->lock, flags); 2424 2425 /* 2426 * Unregister the upper layer. 2427 */ 2428 list_for_each (p, &sc->luns) { 2429 lun = list_entry(p, struct ub_lun, link); 2430 disk = lun->disk; 2431 if (disk->flags & GENHD_FL_UP) 2432 del_gendisk(disk); 2433 /* 2434 * I wish I could do: 2435 * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 2436 * As it is, we rely on our internal poisoning and let 2437 * the upper levels to spin furiously failing all the I/O. 2438 */ 2439 } 2440 2441 /* 2442 * Testing for -EINPROGRESS is always a bug, so we are bending 2443 * the rules a little. 2444 */ 2445 spin_lock_irqsave(sc->lock, flags); 2446 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ 2447 printk(KERN_WARNING "%s: " 2448 "URB is active after disconnect\n", sc->name); 2449 } 2450 spin_unlock_irqrestore(sc->lock, flags); 2451 2452 /* 2453 * There is virtually no chance that other CPU runs times so long 2454 * after ub_urb_complete should have called del_timer, but only if HCD 2455 * didn't forget to deliver a callback on unlink. 2456 */ 2457 del_timer_sync(&sc->work_timer); 2458 2459 /* 2460 * At this point there must be no commands coming from anyone 2461 * and no URBs left in transit. 2462 */ 2463 2464 usb_set_intfdata(intf, NULL); 2465 // usb_put_intf(sc->intf); 2466 sc->intf = NULL; 2467 usb_put_dev(sc->dev); 2468 sc->dev = NULL; 2469 2470 ub_put(sc); 2471} 2472 2473static struct usb_driver ub_driver = { 2474 .name = "ub", 2475 .probe = ub_probe, 2476 .disconnect = ub_disconnect, 2477 .id_table = ub_usb_ids, 2478}; 2479 2480static int __init ub_init(void) 2481{ 2482 int rc; 2483 int i; 2484 2485 for (i = 0; i < UB_QLOCK_NUM; i++) 2486 spin_lock_init(&ub_qlockv[i]); 2487 2488 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) 2489 goto err_regblkdev; 2490 devfs_mk_dir(DEVFS_NAME); 2491 2492 if ((rc = usb_register(&ub_driver)) != 0) 2493 goto err_register; 2494 2495 usb_usual_set_present(USB_US_TYPE_UB); 2496 return 0; 2497 2498err_register: 2499 devfs_remove(DEVFS_NAME); 2500 unregister_blkdev(UB_MAJOR, DRV_NAME); 2501err_regblkdev: 2502 return rc; 2503} 2504 2505static void __exit ub_exit(void) 2506{ 2507 usb_deregister(&ub_driver); 2508 2509 devfs_remove(DEVFS_NAME); 2510 unregister_blkdev(UB_MAJOR, DRV_NAME); 2511 usb_usual_clear_present(USB_US_TYPE_UB); 2512} 2513 2514module_init(ub_init); 2515module_exit(ub_exit); 2516 2517MODULE_LICENSE("GPL");