Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.15-rc2 2502 lines 64 kB view raw
1/* 2 * The low performance USB storage driver (ub). 3 * 4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net) 5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com) 6 * 7 * This work is a part of Linux kernel, is derived from it, 8 * and is not licensed separately. See file COPYING for details. 9 * 10 * TODO (sorted by decreasing priority) 11 * -- Kill first_open (Al Viro fixed the block layer now) 12 * -- Do resets with usb_device_reset (needs a thread context, use khubd) 13 * -- set readonly flag for CDs, set removable flag for CF readers 14 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) 15 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries 16 * -- verify the 13 conditions and do bulk resets 17 * -- kill last_pipe and simply do two-state clearing on both pipes 18 * -- verify protocol (bulk) from USB descriptors (maybe...) 19 * -- highmem 20 * -- move top_sense and work_bcs into separate allocations (if they survive) 21 * for cache purists and esoteric architectures. 22 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? 23 * -- prune comments, they are too volumnous 24 * -- Exterminate P3 printks 25 * -- Resove XXX's 26 * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=? 27 * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. 28 */ 29#include <linux/kernel.h> 30#include <linux/module.h> 31#include <linux/usb.h> 32#include <linux/blkdev.h> 33#include <linux/devfs_fs_kernel.h> 34#include <linux/timer.h> 35#include <scsi/scsi.h> 36 37#define DRV_NAME "ub" 38#define DEVFS_NAME DRV_NAME 39 40#define UB_MAJOR 180 41 42/* 43 * The command state machine is the key model for understanding of this driver. 44 * 45 * The general rule is that all transitions are done towards the bottom 46 * of the diagram, thus preventing any loops. 47 * 48 * An exception to that is how the STAT state is handled. A counter allows it 49 * to be re-entered along the path marked with [C]. 50 * 51 * +--------+ 52 * ! INIT ! 53 * +--------+ 54 * ! 55 * ub_scsi_cmd_start fails ->--------------------------------------\ 56 * ! ! 57 * V ! 58 * +--------+ ! 59 * ! CMD ! ! 60 * +--------+ ! 61 * ! +--------+ ! 62 * was -EPIPE -->-------------------------------->! CLEAR ! ! 63 * ! +--------+ ! 64 * ! ! ! 65 * was error -->------------------------------------- ! --------->\ 66 * ! ! ! 67 * /--<-- cmd->dir == NONE ? ! ! 68 * ! ! ! ! 69 * ! V ! ! 70 * ! +--------+ ! ! 71 * ! ! DATA ! ! ! 72 * ! +--------+ ! ! 73 * ! ! +---------+ ! ! 74 * ! was -EPIPE -->--------------->! CLR2STS ! ! ! 75 * ! ! +---------+ ! ! 76 * ! ! ! ! ! 77 * ! ! was error -->---- ! --------->\ 78 * ! was error -->--------------------- ! ------------- ! --------->\ 79 * ! ! ! ! ! 80 * ! V ! ! ! 81 * \--->+--------+ ! ! ! 82 * ! STAT !<--------------------------/ ! ! 83 * /--->+--------+ ! ! 84 * ! ! ! ! 85 * [C] was -EPIPE -->-----------\ ! ! 86 * ! ! ! ! ! 87 * +<---- len == 0 ! ! ! 88 * ! ! ! ! ! 89 * ! was error -->--------------------------------------!---------->\ 90 * ! ! ! ! ! 91 * +<---- bad CSW ! ! ! 92 * +<---- bad tag ! ! ! 93 * ! ! V ! ! 94 * ! ! +--------+ ! ! 95 * ! ! ! CLRRS ! ! ! 96 * ! ! +--------+ ! ! 97 * ! ! ! ! ! 98 * \------- ! --------------------[C]--------\ ! ! 99 * ! ! ! ! 100 * cmd->error---\ +--------+ ! ! 101 * ! +--------------->! SENSE !<----------/ ! 102 * STAT_FAIL----/ +--------+ ! 103 * ! ! V 104 * ! V +--------+ 105 * \--------------------------------\--------------------->! DONE ! 106 * +--------+ 107 */ 108 109/* 110 * Definitions which have to be scattered once we understand the layout better. 111 */ 112 113/* Transport (despite PR in the name) */ 114#define US_PR_BULK 0x50 /* bulk only */ 115 116/* Protocol */ 117#define US_SC_SCSI 0x06 /* Transparent */ 118 119/* 120 * This many LUNs per USB device. 121 * Every one of them takes a host, see UB_MAX_HOSTS. 122 */ 123#define UB_MAX_LUNS 9 124 125/* 126 */ 127 128#define UB_MINORS_PER_MAJOR 8 129 130#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ 131 132#define UB_SENSE_SIZE 18 133 134/* 135 */ 136 137/* command block wrapper */ 138struct bulk_cb_wrap { 139 __le32 Signature; /* contains 'USBC' */ 140 u32 Tag; /* unique per command id */ 141 __le32 DataTransferLength; /* size of data */ 142 u8 Flags; /* direction in bit 0 */ 143 u8 Lun; /* LUN */ 144 u8 Length; /* of of the CDB */ 145 u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ 146}; 147 148#define US_BULK_CB_WRAP_LEN 31 149#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ 150#define US_BULK_FLAG_IN 1 151#define US_BULK_FLAG_OUT 0 152 153/* command status wrapper */ 154struct bulk_cs_wrap { 155 __le32 Signature; /* should = 'USBS' */ 156 u32 Tag; /* same as original command */ 157 __le32 Residue; /* amount not transferred */ 158 u8 Status; /* see below */ 159}; 160 161#define US_BULK_CS_WRAP_LEN 13 162#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ 163#define US_BULK_STAT_OK 0 164#define US_BULK_STAT_FAIL 1 165#define US_BULK_STAT_PHASE 2 166 167/* bulk-only class specific requests */ 168#define US_BULK_RESET_REQUEST 0xff 169#define US_BULK_GET_MAX_LUN 0xfe 170 171/* 172 */ 173struct ub_dev; 174 175#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ 176#define UB_MAX_SECTORS 64 177 178/* 179 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS) 180 * even if a webcam hogs the bus, but some devices need time to spin up. 181 */ 182#define UB_URB_TIMEOUT (HZ*2) 183#define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */ 184#define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */ 185#define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */ 186 187/* 188 * An instance of a SCSI command in transit. 189 */ 190#define UB_DIR_NONE 0 191#define UB_DIR_READ 1 192#define UB_DIR_ILLEGAL2 2 193#define UB_DIR_WRITE 3 194 195#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \ 196 (((c)==UB_DIR_READ)? 'r': 'n')) 197 198enum ub_scsi_cmd_state { 199 UB_CMDST_INIT, /* Initial state */ 200 UB_CMDST_CMD, /* Command submitted */ 201 UB_CMDST_DATA, /* Data phase */ 202 UB_CMDST_CLR2STS, /* Clearing before requesting status */ 203 UB_CMDST_STAT, /* Status phase */ 204 UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ 205 UB_CMDST_CLRRS, /* Clearing before retrying status */ 206 UB_CMDST_SENSE, /* Sending Request Sense */ 207 UB_CMDST_DONE /* Final state */ 208}; 209 210static char *ub_scsi_cmd_stname[] = { 211 ". ", 212 "Cmd", 213 "dat", 214 "c2s", 215 "sts", 216 "clr", 217 "crs", 218 "Sen", 219 "fin" 220}; 221 222struct ub_scsi_cmd { 223 unsigned char cdb[UB_MAX_CDB_SIZE]; 224 unsigned char cdb_len; 225 226 unsigned char dir; /* 0 - none, 1 - read, 3 - write. */ 227 unsigned char trace_index; 228 enum ub_scsi_cmd_state state; 229 unsigned int tag; 230 struct ub_scsi_cmd *next; 231 232 int error; /* Return code - valid upon done */ 233 unsigned int act_len; /* Return size */ 234 unsigned char key, asc, ascq; /* May be valid if error==-EIO */ 235 236 int stat_count; /* Retries getting status. */ 237 238 unsigned int len; /* Requested length */ 239 unsigned int current_sg; 240 unsigned int nsg; /* sgv[nsg] */ 241 struct scatterlist sgv[UB_MAX_REQ_SG]; 242 243 struct ub_lun *lun; 244 void (*done)(struct ub_dev *, struct ub_scsi_cmd *); 245 void *back; 246}; 247 248/* 249 */ 250struct ub_capacity { 251 unsigned long nsec; /* Linux size - 512 byte sectors */ 252 unsigned int bsize; /* Linux hardsect_size */ 253 unsigned int bshift; /* Shift between 512 and hard sects */ 254}; 255 256/* 257 * The SCSI command tracing structure. 258 */ 259 260#define SCMD_ST_HIST_SZ 8 261#define SCMD_TRACE_SZ 63 /* Less than 4KB of 61-byte lines */ 262 263struct ub_scsi_cmd_trace { 264 int hcur; 265 unsigned int tag; 266 unsigned int req_size, act_size; 267 unsigned char op; 268 unsigned char dir; 269 unsigned char key, asc, ascq; 270 char st_hst[SCMD_ST_HIST_SZ]; 271}; 272 273struct ub_scsi_trace { 274 int cur; 275 struct ub_scsi_cmd_trace vec[SCMD_TRACE_SZ]; 276}; 277 278/* 279 * This is a direct take-off from linux/include/completion.h 280 * The difference is that I do not wait on this thing, just poll. 281 * When I want to wait (ub_probe), I just use the stock completion. 282 * 283 * Note that INIT_COMPLETION takes no lock. It is correct. But why 284 * in the bloody hell that thing takes struct instead of pointer to struct 285 * is quite beyond me. I just copied it from the stock completion. 286 */ 287struct ub_completion { 288 unsigned int done; 289 spinlock_t lock; 290}; 291 292static inline void ub_init_completion(struct ub_completion *x) 293{ 294 x->done = 0; 295 spin_lock_init(&x->lock); 296} 297 298#define UB_INIT_COMPLETION(x) ((x).done = 0) 299 300static void ub_complete(struct ub_completion *x) 301{ 302 unsigned long flags; 303 304 spin_lock_irqsave(&x->lock, flags); 305 x->done++; 306 spin_unlock_irqrestore(&x->lock, flags); 307} 308 309static int ub_is_completed(struct ub_completion *x) 310{ 311 unsigned long flags; 312 int ret; 313 314 spin_lock_irqsave(&x->lock, flags); 315 ret = x->done; 316 spin_unlock_irqrestore(&x->lock, flags); 317 return ret; 318} 319 320/* 321 */ 322struct ub_scsi_cmd_queue { 323 int qlen, qmax; 324 struct ub_scsi_cmd *head, *tail; 325}; 326 327/* 328 * The block device instance (one per LUN). 329 */ 330struct ub_lun { 331 struct ub_dev *udev; 332 struct list_head link; 333 struct gendisk *disk; 334 int id; /* Host index */ 335 int num; /* LUN number */ 336 char name[16]; 337 338 int changed; /* Media was changed */ 339 int removable; 340 int readonly; 341 int first_open; /* Kludge. See ub_bd_open. */ 342 343 /* Use Ingo's mempool if or when we have more than one command. */ 344 /* 345 * Currently we never need more than one command for the whole device. 346 * However, giving every LUN a command is a cheap and automatic way 347 * to enforce fairness between them. 348 */ 349 int cmda[1]; 350 struct ub_scsi_cmd cmdv[1]; 351 352 struct ub_capacity capacity; 353}; 354 355/* 356 * The USB device instance. 357 */ 358struct ub_dev { 359 spinlock_t lock; 360 atomic_t poison; /* The USB device is disconnected */ 361 int openc; /* protected by ub_lock! */ 362 /* kref is too implicit for our taste */ 363 unsigned int tagcnt; 364 char name[12]; 365 struct usb_device *dev; 366 struct usb_interface *intf; 367 368 struct list_head luns; 369 370 unsigned int send_bulk_pipe; /* cached pipe values */ 371 unsigned int recv_bulk_pipe; 372 unsigned int send_ctrl_pipe; 373 unsigned int recv_ctrl_pipe; 374 375 struct tasklet_struct tasklet; 376 377 struct ub_scsi_cmd_queue cmd_queue; 378 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ 379 unsigned char top_sense[UB_SENSE_SIZE]; 380 381 struct ub_completion work_done; 382 struct urb work_urb; 383 struct timer_list work_timer; 384 int last_pipe; /* What might need clearing */ 385 __le32 signature; /* Learned signature */ 386 struct bulk_cb_wrap work_bcb; 387 struct bulk_cs_wrap work_bcs; 388 struct usb_ctrlrequest work_cr; 389 390 int sg_stat[6]; 391 struct ub_scsi_trace tr; 392}; 393 394/* 395 */ 396static void ub_cleanup(struct ub_dev *sc); 397static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); 398static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 399 struct ub_scsi_cmd *cmd, struct request *rq); 400static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 401 struct ub_scsi_cmd *cmd, struct request *rq); 402static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 403static void ub_end_rq(struct request *rq, int uptodate); 404static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 405static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); 406static void ub_scsi_action(unsigned long _dev); 407static void ub_scsi_dispatch(struct ub_dev *sc); 408static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 409static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 410static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); 411static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 412static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 413static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 414static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 415static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 416 int stalled_pipe); 417static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); 418static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); 419static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 420 struct ub_capacity *ret); 421static int ub_probe_lun(struct ub_dev *sc, int lnum); 422 423/* 424 */ 425static struct usb_device_id ub_usb_ids[] = { 426 // { USB_DEVICE_VER(0x0781, 0x0002, 0x0009, 0x0009) }, /* SDDR-31 */ 427 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, 428 { } 429}; 430 431MODULE_DEVICE_TABLE(usb, ub_usb_ids); 432 433/* 434 * Find me a way to identify "next free minor" for add_disk(), 435 * and the array disappears the next day. However, the number of 436 * hosts has something to do with the naming and /proc/partitions. 437 * This has to be thought out in detail before changing. 438 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure. 439 */ 440#define UB_MAX_HOSTS 26 441static char ub_hostv[UB_MAX_HOSTS]; 442 443static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ 444 445/* 446 * The SCSI command tracing procedures. 447 */ 448 449static void ub_cmdtr_new(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 450{ 451 int n; 452 struct ub_scsi_cmd_trace *t; 453 454 if ((n = sc->tr.cur + 1) == SCMD_TRACE_SZ) n = 0; 455 t = &sc->tr.vec[n]; 456 457 memset(t, 0, sizeof(struct ub_scsi_cmd_trace)); 458 t->tag = cmd->tag; 459 t->op = cmd->cdb[0]; 460 t->dir = cmd->dir; 461 t->req_size = cmd->len; 462 t->st_hst[0] = cmd->state; 463 464 sc->tr.cur = n; 465 cmd->trace_index = n; 466} 467 468static void ub_cmdtr_state(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 469{ 470 int n; 471 struct ub_scsi_cmd_trace *t; 472 473 t = &sc->tr.vec[cmd->trace_index]; 474 if (t->tag == cmd->tag) { 475 if ((n = t->hcur + 1) == SCMD_ST_HIST_SZ) n = 0; 476 t->st_hst[n] = cmd->state; 477 t->hcur = n; 478 } 479} 480 481static void ub_cmdtr_act_len(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 482{ 483 struct ub_scsi_cmd_trace *t; 484 485 t = &sc->tr.vec[cmd->trace_index]; 486 if (t->tag == cmd->tag) 487 t->act_size = cmd->act_len; 488} 489 490static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 491 unsigned char *sense) 492{ 493 struct ub_scsi_cmd_trace *t; 494 495 t = &sc->tr.vec[cmd->trace_index]; 496 if (t->tag == cmd->tag) { 497 t->key = sense[2] & 0x0F; 498 t->asc = sense[12]; 499 t->ascq = sense[13]; 500 } 501} 502 503static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, 504 char *page) 505{ 506 struct usb_interface *intf; 507 struct ub_dev *sc; 508 struct list_head *p; 509 struct ub_lun *lun; 510 int cnt; 511 unsigned long flags; 512 int nc, nh; 513 int i, j; 514 struct ub_scsi_cmd_trace *t; 515 516 intf = to_usb_interface(dev); 517 sc = usb_get_intfdata(intf); 518 if (sc == NULL) 519 return 0; 520 521 cnt = 0; 522 spin_lock_irqsave(&sc->lock, flags); 523 524 cnt += sprintf(page + cnt, 525 "qlen %d qmax %d\n", 526 sc->cmd_queue.qlen, sc->cmd_queue.qmax); 527 cnt += sprintf(page + cnt, 528 "sg %d %d %d %d %d .. %d\n", 529 sc->sg_stat[0], 530 sc->sg_stat[1], 531 sc->sg_stat[2], 532 sc->sg_stat[3], 533 sc->sg_stat[4], 534 sc->sg_stat[5]); 535 536 list_for_each (p, &sc->luns) { 537 lun = list_entry(p, struct ub_lun, link); 538 cnt += sprintf(page + cnt, 539 "lun %u changed %d removable %d readonly %d\n", 540 lun->num, lun->changed, lun->removable, lun->readonly); 541 } 542 543 if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0; 544 for (j = 0; j < SCMD_TRACE_SZ; j++) { 545 t = &sc->tr.vec[nc]; 546 547 cnt += sprintf(page + cnt, "%08x %02x", t->tag, t->op); 548 if (t->op == REQUEST_SENSE) { 549 cnt += sprintf(page + cnt, " [sense %x %02x %02x]", 550 t->key, t->asc, t->ascq); 551 } else { 552 cnt += sprintf(page + cnt, " %c", UB_DIR_CHAR(t->dir)); 553 cnt += sprintf(page + cnt, " [%5d %5d]", 554 t->req_size, t->act_size); 555 } 556 if ((nh = t->hcur + 1) == SCMD_ST_HIST_SZ) nh = 0; 557 for (i = 0; i < SCMD_ST_HIST_SZ; i++) { 558 cnt += sprintf(page + cnt, " %s", 559 ub_scsi_cmd_stname[(int)t->st_hst[nh]]); 560 if (++nh == SCMD_ST_HIST_SZ) nh = 0; 561 } 562 cnt += sprintf(page + cnt, "\n"); 563 564 if (++nc == SCMD_TRACE_SZ) nc = 0; 565 } 566 567 spin_unlock_irqrestore(&sc->lock, flags); 568 return cnt; 569} 570 571static DEVICE_ATTR(diag, S_IRUGO, ub_diag_show, NULL); /* N.B. World readable */ 572 573/* 574 * The id allocator. 575 * 576 * This also stores the host for indexing by minor, which is somewhat dirty. 577 */ 578static int ub_id_get(void) 579{ 580 unsigned long flags; 581 int i; 582 583 spin_lock_irqsave(&ub_lock, flags); 584 for (i = 0; i < UB_MAX_HOSTS; i++) { 585 if (ub_hostv[i] == 0) { 586 ub_hostv[i] = 1; 587 spin_unlock_irqrestore(&ub_lock, flags); 588 return i; 589 } 590 } 591 spin_unlock_irqrestore(&ub_lock, flags); 592 return -1; 593} 594 595static void ub_id_put(int id) 596{ 597 unsigned long flags; 598 599 if (id < 0 || id >= UB_MAX_HOSTS) { 600 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id); 601 return; 602 } 603 604 spin_lock_irqsave(&ub_lock, flags); 605 if (ub_hostv[id] == 0) { 606 spin_unlock_irqrestore(&ub_lock, flags); 607 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id); 608 return; 609 } 610 ub_hostv[id] = 0; 611 spin_unlock_irqrestore(&ub_lock, flags); 612} 613 614/* 615 * Downcount for deallocation. This rides on two assumptions: 616 * - once something is poisoned, its refcount cannot grow 617 * - opens cannot happen at this time (del_gendisk was done) 618 * If the above is true, we can drop the lock, which we need for 619 * blk_cleanup_queue(): the silly thing may attempt to sleep. 620 * [Actually, it never needs to sleep for us, but it calls might_sleep()] 621 */ 622static void ub_put(struct ub_dev *sc) 623{ 624 unsigned long flags; 625 626 spin_lock_irqsave(&ub_lock, flags); 627 --sc->openc; 628 if (sc->openc == 0 && atomic_read(&sc->poison)) { 629 spin_unlock_irqrestore(&ub_lock, flags); 630 ub_cleanup(sc); 631 } else { 632 spin_unlock_irqrestore(&ub_lock, flags); 633 } 634} 635 636/* 637 * Final cleanup and deallocation. 638 */ 639static void ub_cleanup(struct ub_dev *sc) 640{ 641 struct list_head *p; 642 struct ub_lun *lun; 643 request_queue_t *q; 644 645 while (!list_empty(&sc->luns)) { 646 p = sc->luns.next; 647 lun = list_entry(p, struct ub_lun, link); 648 list_del(p); 649 650 /* I don't think queue can be NULL. But... Stolen from sx8.c */ 651 if ((q = lun->disk->queue) != NULL) 652 blk_cleanup_queue(q); 653 /* 654 * If we zero disk->private_data BEFORE put_disk, we have 655 * to check for NULL all over the place in open, release, 656 * check_media and revalidate, because the block level 657 * semaphore is well inside the put_disk. 658 * But we cannot zero after the call, because *disk is gone. 659 * The sd.c is blatantly racy in this area. 660 */ 661 /* disk->private_data = NULL; */ 662 put_disk(lun->disk); 663 lun->disk = NULL; 664 665 ub_id_put(lun->id); 666 kfree(lun); 667 } 668 669 kfree(sc); 670} 671 672/* 673 * The "command allocator". 674 */ 675static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) 676{ 677 struct ub_scsi_cmd *ret; 678 679 if (lun->cmda[0]) 680 return NULL; 681 ret = &lun->cmdv[0]; 682 lun->cmda[0] = 1; 683 return ret; 684} 685 686static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) 687{ 688 if (cmd != &lun->cmdv[0]) { 689 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", 690 lun->name, cmd); 691 return; 692 } 693 if (!lun->cmda[0]) { 694 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); 695 return; 696 } 697 lun->cmda[0] = 0; 698} 699 700/* 701 * The command queue. 702 */ 703static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 704{ 705 struct ub_scsi_cmd_queue *t = &sc->cmd_queue; 706 707 if (t->qlen++ == 0) { 708 t->head = cmd; 709 t->tail = cmd; 710 } else { 711 t->tail->next = cmd; 712 t->tail = cmd; 713 } 714 715 if (t->qlen > t->qmax) 716 t->qmax = t->qlen; 717} 718 719static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 720{ 721 struct ub_scsi_cmd_queue *t = &sc->cmd_queue; 722 723 if (t->qlen++ == 0) { 724 t->head = cmd; 725 t->tail = cmd; 726 } else { 727 cmd->next = t->head; 728 t->head = cmd; 729 } 730 731 if (t->qlen > t->qmax) 732 t->qmax = t->qlen; 733} 734 735static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) 736{ 737 struct ub_scsi_cmd_queue *t = &sc->cmd_queue; 738 struct ub_scsi_cmd *cmd; 739 740 if (t->qlen == 0) 741 return NULL; 742 if (--t->qlen == 0) 743 t->tail = NULL; 744 cmd = t->head; 745 t->head = cmd->next; 746 cmd->next = NULL; 747 return cmd; 748} 749 750#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head) 751 752/* 753 * The request function is our main entry point 754 */ 755 756static void ub_request_fn(request_queue_t *q) 757{ 758 struct ub_lun *lun = q->queuedata; 759 struct request *rq; 760 761 while ((rq = elv_next_request(q)) != NULL) { 762 if (ub_request_fn_1(lun, rq) != 0) { 763 blk_stop_queue(q); 764 break; 765 } 766 } 767} 768 769static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) 770{ 771 struct ub_dev *sc = lun->udev; 772 struct ub_scsi_cmd *cmd; 773 int rc; 774 775 if (atomic_read(&sc->poison) || lun->changed) { 776 blkdev_dequeue_request(rq); 777 ub_end_rq(rq, 0); 778 return 0; 779 } 780 781 if ((cmd = ub_get_cmd(lun)) == NULL) 782 return -1; 783 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 784 785 blkdev_dequeue_request(rq); 786 if (blk_pc_request(rq)) { 787 rc = ub_cmd_build_packet(sc, lun, cmd, rq); 788 } else { 789 rc = ub_cmd_build_block(sc, lun, cmd, rq); 790 } 791 if (rc != 0) { 792 ub_put_cmd(lun, cmd); 793 ub_end_rq(rq, 0); 794 return 0; 795 } 796 cmd->state = UB_CMDST_INIT; 797 cmd->lun = lun; 798 cmd->done = ub_rw_cmd_done; 799 cmd->back = rq; 800 801 cmd->tag = sc->tagcnt++; 802 if (ub_submit_scsi(sc, cmd) != 0) { 803 ub_put_cmd(lun, cmd); 804 ub_end_rq(rq, 0); 805 return 0; 806 } 807 808 return 0; 809} 810 811static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 812 struct ub_scsi_cmd *cmd, struct request *rq) 813{ 814 int ub_dir; 815 int n_elem; 816 unsigned int block, nblks; 817 818 if (rq_data_dir(rq) == WRITE) 819 ub_dir = UB_DIR_WRITE; 820 else 821 ub_dir = UB_DIR_READ; 822 cmd->dir = ub_dir; 823 824 /* 825 * get scatterlist from block layer 826 */ 827 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); 828 if (n_elem <= 0) { 829 printk(KERN_INFO "%s: failed request map (%d)\n", 830 sc->name, n_elem); /* P3 */ 831 return -1; /* request with no s/g entries? */ 832 } 833 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ 834 printk(KERN_WARNING "%s: request with %d segments\n", 835 sc->name, n_elem); 836 return -1; 837 } 838 cmd->nsg = n_elem; 839 sc->sg_stat[n_elem < 5 ? n_elem : 5]++; 840 841 /* 842 * build the command 843 * 844 * The call to blk_queue_hardsect_size() guarantees that request 845 * is aligned, but it is given in terms of 512 byte units, always. 846 */ 847 block = rq->sector >> lun->capacity.bshift; 848 nblks = rq->nr_sectors >> lun->capacity.bshift; 849 850 cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; 851 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ 852 cmd->cdb[2] = block >> 24; 853 cmd->cdb[3] = block >> 16; 854 cmd->cdb[4] = block >> 8; 855 cmd->cdb[5] = block; 856 cmd->cdb[7] = nblks >> 8; 857 cmd->cdb[8] = nblks; 858 cmd->cdb_len = 10; 859 860 cmd->len = rq->nr_sectors * 512; 861 862 return 0; 863} 864 865static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 866 struct ub_scsi_cmd *cmd, struct request *rq) 867{ 868 int n_elem; 869 870 if (rq->data_len == 0) { 871 cmd->dir = UB_DIR_NONE; 872 } else { 873 if (rq_data_dir(rq) == WRITE) 874 cmd->dir = UB_DIR_WRITE; 875 else 876 cmd->dir = UB_DIR_READ; 877 878 } 879 880 /* 881 * get scatterlist from block layer 882 */ 883 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); 884 if (n_elem < 0) { 885 printk(KERN_INFO "%s: failed request map (%d)\n", 886 sc->name, n_elem); /* P3 */ 887 return -1; 888 } 889 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ 890 printk(KERN_WARNING "%s: request with %d segments\n", 891 sc->name, n_elem); 892 return -1; 893 } 894 cmd->nsg = n_elem; 895 sc->sg_stat[n_elem < 5 ? n_elem : 5]++; 896 897 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 898 cmd->cdb_len = rq->cmd_len; 899 900 cmd->len = rq->data_len; 901 902 return 0; 903} 904 905static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 906{ 907 struct request *rq = cmd->back; 908 struct ub_lun *lun = cmd->lun; 909 int uptodate; 910 911 if (cmd->error == 0) { 912 uptodate = 1; 913 914 if (blk_pc_request(rq)) { 915 if (cmd->act_len >= rq->data_len) 916 rq->data_len = 0; 917 else 918 rq->data_len -= cmd->act_len; 919 } 920 } else { 921 uptodate = 0; 922 923 if (blk_pc_request(rq)) { 924 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ 925 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); 926 rq->sense_len = UB_SENSE_SIZE; 927 if (sc->top_sense[0] != 0) 928 rq->errors = SAM_STAT_CHECK_CONDITION; 929 else 930 rq->errors = DID_ERROR << 16; 931 } 932 } 933 934 ub_put_cmd(lun, cmd); 935 ub_end_rq(rq, uptodate); 936 blk_start_queue(lun->disk->queue); 937} 938 939static void ub_end_rq(struct request *rq, int uptodate) 940{ 941 int rc; 942 943 rc = end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 944 // assert(rc == 0); 945 end_that_request_last(rq); 946} 947 948/* 949 * Submit a regular SCSI operation (not an auto-sense). 950 * 951 * The Iron Law of Good Submit Routine is: 952 * Zero return - callback is done, Nonzero return - callback is not done. 953 * No exceptions. 954 * 955 * Host is assumed locked. 956 * 957 * XXX We only support Bulk for the moment. 958 */ 959static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 960{ 961 962 if (cmd->state != UB_CMDST_INIT || 963 (cmd->dir != UB_DIR_NONE && cmd->len == 0)) { 964 return -EINVAL; 965 } 966 967 ub_cmdq_add(sc, cmd); 968 /* 969 * We can call ub_scsi_dispatch(sc) right away here, but it's a little 970 * safer to jump to a tasklet, in case upper layers do something silly. 971 */ 972 tasklet_schedule(&sc->tasklet); 973 return 0; 974} 975 976/* 977 * Submit the first URB for the queued command. 978 * This function does not deal with queueing in any way. 979 */ 980static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 981{ 982 struct bulk_cb_wrap *bcb; 983 int rc; 984 985 bcb = &sc->work_bcb; 986 987 /* 988 * ``If the allocation length is eighteen or greater, and a device 989 * server returns less than eithteen bytes of data, the application 990 * client should assume that the bytes not transferred would have been 991 * zeroes had the device server returned those bytes.'' 992 * 993 * We zero sense for all commands so that when a packet request 994 * fails it does not return a stale sense. 995 */ 996 memset(&sc->top_sense, 0, UB_SENSE_SIZE); 997 998 /* set up the command wrapper */ 999 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 1000 bcb->Tag = cmd->tag; /* Endianness is not important */ 1001 bcb->DataTransferLength = cpu_to_le32(cmd->len); 1002 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; 1003 bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; 1004 bcb->Length = cmd->cdb_len; 1005 1006 /* copy the command payload */ 1007 memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE); 1008 1009 UB_INIT_COMPLETION(sc->work_done); 1010 1011 sc->last_pipe = sc->send_bulk_pipe; 1012 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, 1013 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); 1014 1015 /* Fill what we shouldn't be filling, because usb-storage did so. */ 1016 sc->work_urb.actual_length = 0; 1017 sc->work_urb.error_count = 0; 1018 sc->work_urb.status = 0; 1019 1020 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1021 /* XXX Clear stalls */ 1022 ub_complete(&sc->work_done); 1023 return rc; 1024 } 1025 1026 sc->work_timer.expires = jiffies + UB_URB_TIMEOUT; 1027 add_timer(&sc->work_timer); 1028 1029 cmd->state = UB_CMDST_CMD; 1030 ub_cmdtr_state(sc, cmd); 1031 return 0; 1032} 1033 1034/* 1035 * Timeout handler. 1036 */ 1037static void ub_urb_timeout(unsigned long arg) 1038{ 1039 struct ub_dev *sc = (struct ub_dev *) arg; 1040 unsigned long flags; 1041 1042 spin_lock_irqsave(&sc->lock, flags); 1043 usb_unlink_urb(&sc->work_urb); 1044 spin_unlock_irqrestore(&sc->lock, flags); 1045} 1046 1047/* 1048 * Completion routine for the work URB. 1049 * 1050 * This can be called directly from usb_submit_urb (while we have 1051 * the sc->lock taken) and from an interrupt (while we do NOT have 1052 * the sc->lock taken). Therefore, bounce this off to a tasklet. 1053 */ 1054static void ub_urb_complete(struct urb *urb, struct pt_regs *pt) 1055{ 1056 struct ub_dev *sc = urb->context; 1057 1058 ub_complete(&sc->work_done); 1059 tasklet_schedule(&sc->tasklet); 1060} 1061 1062static void ub_scsi_action(unsigned long _dev) 1063{ 1064 struct ub_dev *sc = (struct ub_dev *) _dev; 1065 unsigned long flags; 1066 1067 spin_lock_irqsave(&sc->lock, flags); 1068 del_timer(&sc->work_timer); 1069 ub_scsi_dispatch(sc); 1070 spin_unlock_irqrestore(&sc->lock, flags); 1071} 1072 1073static void ub_scsi_dispatch(struct ub_dev *sc) 1074{ 1075 struct ub_scsi_cmd *cmd; 1076 int rc; 1077 1078 while ((cmd = ub_cmdq_peek(sc)) != NULL) { 1079 if (cmd->state == UB_CMDST_DONE) { 1080 ub_cmdq_pop(sc); 1081 (*cmd->done)(sc, cmd); 1082 } else if (cmd->state == UB_CMDST_INIT) { 1083 ub_cmdtr_new(sc, cmd); 1084 if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0) 1085 break; 1086 cmd->error = rc; 1087 cmd->state = UB_CMDST_DONE; 1088 ub_cmdtr_state(sc, cmd); 1089 } else { 1090 if (!ub_is_completed(&sc->work_done)) 1091 break; 1092 ub_scsi_urb_compl(sc, cmd); 1093 } 1094 } 1095} 1096 1097static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1098{ 1099 struct urb *urb = &sc->work_urb; 1100 struct bulk_cs_wrap *bcs; 1101 int rc; 1102 1103 if (atomic_read(&sc->poison)) { 1104 /* A little too simplistic, I feel... */ 1105 goto Bad_End; 1106 } 1107 1108 if (cmd->state == UB_CMDST_CLEAR) { 1109 if (urb->status == -EPIPE) { 1110 /* 1111 * STALL while clearning STALL. 1112 * The control pipe clears itself - nothing to do. 1113 * XXX Might try to reset the device here and retry. 1114 */ 1115 printk(KERN_NOTICE "%s: stall on control pipe\n", 1116 sc->name); 1117 goto Bad_End; 1118 } 1119 1120 /* 1121 * We ignore the result for the halt clear. 1122 */ 1123 1124 /* reset the endpoint toggle */ 1125 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), 1126 usb_pipeout(sc->last_pipe), 0); 1127 1128 ub_state_sense(sc, cmd); 1129 1130 } else if (cmd->state == UB_CMDST_CLR2STS) { 1131 if (urb->status == -EPIPE) { 1132 /* 1133 * STALL while clearning STALL. 1134 * The control pipe clears itself - nothing to do. 1135 * XXX Might try to reset the device here and retry. 1136 */ 1137 printk(KERN_NOTICE "%s: stall on control pipe\n", 1138 sc->name); 1139 goto Bad_End; 1140 } 1141 1142 /* 1143 * We ignore the result for the halt clear. 1144 */ 1145 1146 /* reset the endpoint toggle */ 1147 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), 1148 usb_pipeout(sc->last_pipe), 0); 1149 1150 ub_state_stat(sc, cmd); 1151 1152 } else if (cmd->state == UB_CMDST_CLRRS) { 1153 if (urb->status == -EPIPE) { 1154 /* 1155 * STALL while clearning STALL. 1156 * The control pipe clears itself - nothing to do. 1157 * XXX Might try to reset the device here and retry. 1158 */ 1159 printk(KERN_NOTICE "%s: stall on control pipe\n", 1160 sc->name); 1161 goto Bad_End; 1162 } 1163 1164 /* 1165 * We ignore the result for the halt clear. 1166 */ 1167 1168 /* reset the endpoint toggle */ 1169 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), 1170 usb_pipeout(sc->last_pipe), 0); 1171 1172 ub_state_stat_counted(sc, cmd); 1173 1174 } else if (cmd->state == UB_CMDST_CMD) { 1175 if (urb->status == -EPIPE) { 1176 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1177 if (rc != 0) { 1178 printk(KERN_NOTICE "%s: " 1179 "unable to submit clear (%d)\n", 1180 sc->name, rc); 1181 /* 1182 * This is typically ENOMEM or some other such shit. 1183 * Retrying is pointless. Just do Bad End on it... 1184 */ 1185 goto Bad_End; 1186 } 1187 cmd->state = UB_CMDST_CLEAR; 1188 ub_cmdtr_state(sc, cmd); 1189 return; 1190 } 1191 if (urb->status != 0) { 1192 goto Bad_End; 1193 } 1194 if (urb->actual_length != US_BULK_CB_WRAP_LEN) { 1195 /* XXX Must do reset here to unconfuse the device */ 1196 goto Bad_End; 1197 } 1198 1199 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { 1200 ub_state_stat(sc, cmd); 1201 return; 1202 } 1203 1204 // udelay(125); // usb-storage has this 1205 ub_data_start(sc, cmd); 1206 1207 } else if (cmd->state == UB_CMDST_DATA) { 1208 if (urb->status == -EPIPE) { 1209 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1210 if (rc != 0) { 1211 printk(KERN_NOTICE "%s: " 1212 "unable to submit clear (%d)\n", 1213 sc->name, rc); 1214 /* 1215 * This is typically ENOMEM or some other such shit. 1216 * Retrying is pointless. Just do Bad End on it... 1217 */ 1218 goto Bad_End; 1219 } 1220 cmd->state = UB_CMDST_CLR2STS; 1221 ub_cmdtr_state(sc, cmd); 1222 return; 1223 } 1224 if (urb->status == -EOVERFLOW) { 1225 /* 1226 * A babble? Failure, but we must transfer CSW now. 1227 * XXX This is going to end in perpetual babble. Reset. 1228 */ 1229 cmd->error = -EOVERFLOW; /* A cheap trick... */ 1230 ub_state_stat(sc, cmd); 1231 return; 1232 } 1233 if (urb->status != 0) 1234 goto Bad_End; 1235 1236 cmd->act_len += urb->actual_length; 1237 ub_cmdtr_act_len(sc, cmd); 1238 1239 if (++cmd->current_sg < cmd->nsg) { 1240 ub_data_start(sc, cmd); 1241 return; 1242 } 1243 ub_state_stat(sc, cmd); 1244 1245 } else if (cmd->state == UB_CMDST_STAT) { 1246 if (urb->status == -EPIPE) { 1247 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1248 if (rc != 0) { 1249 printk(KERN_NOTICE "%s: " 1250 "unable to submit clear (%d)\n", 1251 sc->name, rc); 1252 /* 1253 * This is typically ENOMEM or some other such shit. 1254 * Retrying is pointless. Just do Bad End on it... 1255 */ 1256 goto Bad_End; 1257 } 1258 1259 /* 1260 * Having a stall when getting CSW is an error, so 1261 * make sure uppper levels are not oblivious to it. 1262 */ 1263 cmd->error = -EIO; /* A cheap trick... */ 1264 1265 cmd->state = UB_CMDST_CLRRS; 1266 ub_cmdtr_state(sc, cmd); 1267 return; 1268 } 1269 if (urb->status == -EOVERFLOW) { 1270 /* 1271 * XXX We are screwed here. Retrying is pointless, 1272 * because the pipelined data will not get in until 1273 * we read with a big enough buffer. We must reset XXX. 1274 */ 1275 goto Bad_End; 1276 } 1277 if (urb->status != 0) 1278 goto Bad_End; 1279 1280 if (urb->actual_length == 0) { 1281 ub_state_stat_counted(sc, cmd); 1282 return; 1283 } 1284 1285 /* 1286 * Check the returned Bulk protocol status. 1287 * The status block has to be validated first. 1288 */ 1289 1290 bcs = &sc->work_bcs; 1291 1292 if (sc->signature == cpu_to_le32(0)) { 1293 /* 1294 * This is the first reply, so do not perform the check. 1295 * Instead, remember the signature the device uses 1296 * for future checks. But do not allow a nul. 1297 */ 1298 sc->signature = bcs->Signature; 1299 if (sc->signature == cpu_to_le32(0)) { 1300 ub_state_stat_counted(sc, cmd); 1301 return; 1302 } 1303 } else { 1304 if (bcs->Signature != sc->signature) { 1305 ub_state_stat_counted(sc, cmd); 1306 return; 1307 } 1308 } 1309 1310 if (bcs->Tag != cmd->tag) { 1311 /* 1312 * This usually happens when we disagree with the 1313 * device's microcode about something. For instance, 1314 * a few of them throw this after timeouts. They buffer 1315 * commands and reply at commands we timed out before. 1316 * Without flushing these replies we loop forever. 1317 */ 1318 ub_state_stat_counted(sc, cmd); 1319 return; 1320 } 1321 1322 rc = le32_to_cpu(bcs->Residue); 1323 if (rc != cmd->len - cmd->act_len) { 1324 /* 1325 * It is all right to transfer less, the caller has 1326 * to check. But it's not all right if the device 1327 * counts disagree with our counts. 1328 */ 1329 /* P3 */ printk("%s: resid %d len %d act %d\n", 1330 sc->name, rc, cmd->len, cmd->act_len); 1331 goto Bad_End; 1332 } 1333 1334 switch (bcs->Status) { 1335 case US_BULK_STAT_OK: 1336 break; 1337 case US_BULK_STAT_FAIL: 1338 ub_state_sense(sc, cmd); 1339 return; 1340 case US_BULK_STAT_PHASE: 1341 /* XXX We must reset the transport here */ 1342 /* P3 */ printk("%s: status PHASE\n", sc->name); 1343 goto Bad_End; 1344 default: 1345 printk(KERN_INFO "%s: unknown CSW status 0x%x\n", 1346 sc->name, bcs->Status); 1347 goto Bad_End; 1348 } 1349 1350 /* Not zeroing error to preserve a babble indicator */ 1351 if (cmd->error != 0) { 1352 ub_state_sense(sc, cmd); 1353 return; 1354 } 1355 cmd->state = UB_CMDST_DONE; 1356 ub_cmdtr_state(sc, cmd); 1357 ub_cmdq_pop(sc); 1358 (*cmd->done)(sc, cmd); 1359 1360 } else if (cmd->state == UB_CMDST_SENSE) { 1361 ub_state_done(sc, cmd, -EIO); 1362 1363 } else { 1364 printk(KERN_WARNING "%s: " 1365 "wrong command state %d\n", 1366 sc->name, cmd->state); 1367 goto Bad_End; 1368 } 1369 return; 1370 1371Bad_End: /* Little Excel is dead */ 1372 ub_state_done(sc, cmd, -EIO); 1373} 1374 1375/* 1376 * Factorization helper for the command state machine: 1377 * Initiate a data segment transfer. 1378 */ 1379static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1380{ 1381 struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; 1382 int pipe; 1383 int rc; 1384 1385 UB_INIT_COMPLETION(sc->work_done); 1386 1387 if (cmd->dir == UB_DIR_READ) 1388 pipe = sc->recv_bulk_pipe; 1389 else 1390 pipe = sc->send_bulk_pipe; 1391 sc->last_pipe = pipe; 1392 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, 1393 page_address(sg->page) + sg->offset, sg->length, 1394 ub_urb_complete, sc); 1395 sc->work_urb.actual_length = 0; 1396 sc->work_urb.error_count = 0; 1397 sc->work_urb.status = 0; 1398 1399 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1400 /* XXX Clear stalls */ 1401 ub_complete(&sc->work_done); 1402 ub_state_done(sc, cmd, rc); 1403 return; 1404 } 1405 1406 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; 1407 add_timer(&sc->work_timer); 1408 1409 cmd->state = UB_CMDST_DATA; 1410 ub_cmdtr_state(sc, cmd); 1411} 1412 1413/* 1414 * Factorization helper for the command state machine: 1415 * Finish the command. 1416 */ 1417static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) 1418{ 1419 1420 cmd->error = rc; 1421 cmd->state = UB_CMDST_DONE; 1422 ub_cmdtr_state(sc, cmd); 1423 ub_cmdq_pop(sc); 1424 (*cmd->done)(sc, cmd); 1425} 1426 1427/* 1428 * Factorization helper for the command state machine: 1429 * Submit a CSW read. 1430 */ 1431static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1432{ 1433 int rc; 1434 1435 UB_INIT_COMPLETION(sc->work_done); 1436 1437 sc->last_pipe = sc->recv_bulk_pipe; 1438 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, 1439 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); 1440 sc->work_urb.actual_length = 0; 1441 sc->work_urb.error_count = 0; 1442 sc->work_urb.status = 0; 1443 1444 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1445 /* XXX Clear stalls */ 1446 ub_complete(&sc->work_done); 1447 ub_state_done(sc, cmd, rc); 1448 return -1; 1449 } 1450 1451 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; 1452 add_timer(&sc->work_timer); 1453 return 0; 1454} 1455 1456/* 1457 * Factorization helper for the command state machine: 1458 * Submit a CSW read and go to STAT state. 1459 */ 1460static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1461{ 1462 1463 if (__ub_state_stat(sc, cmd) != 0) 1464 return; 1465 1466 cmd->stat_count = 0; 1467 cmd->state = UB_CMDST_STAT; 1468 ub_cmdtr_state(sc, cmd); 1469} 1470 1471/* 1472 * Factorization helper for the command state machine: 1473 * Submit a CSW read and go to STAT state with counter (along [C] path). 1474 */ 1475static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1476{ 1477 1478 if (++cmd->stat_count >= 4) { 1479 ub_state_sense(sc, cmd); 1480 return; 1481 } 1482 1483 if (__ub_state_stat(sc, cmd) != 0) 1484 return; 1485 1486 cmd->state = UB_CMDST_STAT; 1487 ub_cmdtr_state(sc, cmd); 1488} 1489 1490/* 1491 * Factorization helper for the command state machine: 1492 * Submit a REQUEST SENSE and go to SENSE state. 1493 */ 1494static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1495{ 1496 struct ub_scsi_cmd *scmd; 1497 struct scatterlist *sg; 1498 int rc; 1499 1500 if (cmd->cdb[0] == REQUEST_SENSE) { 1501 rc = -EPIPE; 1502 goto error; 1503 } 1504 1505 scmd = &sc->top_rqs_cmd; 1506 memset(scmd, 0, sizeof(struct ub_scsi_cmd)); 1507 scmd->cdb[0] = REQUEST_SENSE; 1508 scmd->cdb[4] = UB_SENSE_SIZE; 1509 scmd->cdb_len = 6; 1510 scmd->dir = UB_DIR_READ; 1511 scmd->state = UB_CMDST_INIT; 1512 scmd->nsg = 1; 1513 sg = &scmd->sgv[0]; 1514 sg->page = virt_to_page(sc->top_sense); 1515 sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1); 1516 sg->length = UB_SENSE_SIZE; 1517 scmd->len = UB_SENSE_SIZE; 1518 scmd->lun = cmd->lun; 1519 scmd->done = ub_top_sense_done; 1520 scmd->back = cmd; 1521 1522 scmd->tag = sc->tagcnt++; 1523 1524 cmd->state = UB_CMDST_SENSE; 1525 ub_cmdtr_state(sc, cmd); 1526 1527 ub_cmdq_insert(sc, scmd); 1528 return; 1529 1530error: 1531 ub_state_done(sc, cmd, rc); 1532} 1533 1534/* 1535 * A helper for the command's state machine: 1536 * Submit a stall clear. 1537 */ 1538static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 1539 int stalled_pipe) 1540{ 1541 int endp; 1542 struct usb_ctrlrequest *cr; 1543 int rc; 1544 1545 endp = usb_pipeendpoint(stalled_pipe); 1546 if (usb_pipein (stalled_pipe)) 1547 endp |= USB_DIR_IN; 1548 1549 cr = &sc->work_cr; 1550 cr->bRequestType = USB_RECIP_ENDPOINT; 1551 cr->bRequest = USB_REQ_CLEAR_FEATURE; 1552 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); 1553 cr->wIndex = cpu_to_le16(endp); 1554 cr->wLength = cpu_to_le16(0); 1555 1556 UB_INIT_COMPLETION(sc->work_done); 1557 1558 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 1559 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); 1560 sc->work_urb.actual_length = 0; 1561 sc->work_urb.error_count = 0; 1562 sc->work_urb.status = 0; 1563 1564 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1565 ub_complete(&sc->work_done); 1566 return rc; 1567 } 1568 1569 sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT; 1570 add_timer(&sc->work_timer); 1571 return 0; 1572} 1573 1574/* 1575 */ 1576static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) 1577{ 1578 unsigned char *sense = sc->top_sense; 1579 struct ub_scsi_cmd *cmd; 1580 1581 /* 1582 * Ignoring scmd->act_len, because the buffer was pre-zeroed. 1583 */ 1584 ub_cmdtr_sense(sc, scmd, sense); 1585 1586 /* 1587 * Find the command which triggered the unit attention or a check, 1588 * save the sense into it, and advance its state machine. 1589 */ 1590 if ((cmd = ub_cmdq_peek(sc)) == NULL) { 1591 printk(KERN_WARNING "%s: sense done while idle\n", sc->name); 1592 return; 1593 } 1594 if (cmd != scmd->back) { 1595 printk(KERN_WARNING "%s: " 1596 "sense done for wrong command 0x%x\n", 1597 sc->name, cmd->tag); 1598 return; 1599 } 1600 if (cmd->state != UB_CMDST_SENSE) { 1601 printk(KERN_WARNING "%s: " 1602 "sense done with bad cmd state %d\n", 1603 sc->name, cmd->state); 1604 return; 1605 } 1606 1607 cmd->key = sense[2] & 0x0F; 1608 cmd->asc = sense[12]; 1609 cmd->ascq = sense[13]; 1610 1611 ub_scsi_urb_compl(sc, cmd); 1612} 1613 1614/* 1615 * This is called from a process context. 1616 */ 1617static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) 1618{ 1619 1620 lun->readonly = 0; /* XXX Query this from the device */ 1621 1622 lun->capacity.nsec = 0; 1623 lun->capacity.bsize = 512; 1624 lun->capacity.bshift = 0; 1625 1626 if (ub_sync_tur(sc, lun) != 0) 1627 return; /* Not ready */ 1628 lun->changed = 0; 1629 1630 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { 1631 /* 1632 * The retry here means something is wrong, either with the 1633 * device, with the transport, or with our code. 1634 * We keep this because sd.c has retries for capacity. 1635 */ 1636 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { 1637 lun->capacity.nsec = 0; 1638 lun->capacity.bsize = 512; 1639 lun->capacity.bshift = 0; 1640 } 1641 } 1642} 1643 1644/* 1645 * The open funcion. 1646 * This is mostly needed to keep refcounting, but also to support 1647 * media checks on removable media drives. 1648 */ 1649static int ub_bd_open(struct inode *inode, struct file *filp) 1650{ 1651 struct gendisk *disk = inode->i_bdev->bd_disk; 1652 struct ub_lun *lun; 1653 struct ub_dev *sc; 1654 unsigned long flags; 1655 int rc; 1656 1657 if ((lun = disk->private_data) == NULL) 1658 return -ENXIO; 1659 sc = lun->udev; 1660 1661 spin_lock_irqsave(&ub_lock, flags); 1662 if (atomic_read(&sc->poison)) { 1663 spin_unlock_irqrestore(&ub_lock, flags); 1664 return -ENXIO; 1665 } 1666 sc->openc++; 1667 spin_unlock_irqrestore(&ub_lock, flags); 1668 1669 /* 1670 * This is a workaround for a specific problem in our block layer. 1671 * In 2.6.9, register_disk duplicates the code from rescan_partitions. 1672 * However, if we do add_disk with a device which persistently reports 1673 * a changed media, add_disk calls register_disk, which does do_open, 1674 * which will call rescan_paritions for changed media. After that, 1675 * register_disk attempts to do it all again and causes double kobject 1676 * registration and a eventually an oops on module removal. 1677 * 1678 * The bottom line is, Al Viro says that we should not allow 1679 * bdev->bd_invalidated to be set when doing add_disk no matter what. 1680 */ 1681 if (lun->first_open) { 1682 lun->first_open = 0; 1683 if (lun->changed) { 1684 rc = -ENOMEDIUM; 1685 goto err_open; 1686 } 1687 } 1688 1689 if (lun->removable || lun->readonly) 1690 check_disk_change(inode->i_bdev); 1691 1692 /* 1693 * The sd.c considers ->media_present and ->changed not equivalent, 1694 * under some pretty murky conditions (a failure of READ CAPACITY). 1695 * We may need it one day. 1696 */ 1697 if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { 1698 rc = -ENOMEDIUM; 1699 goto err_open; 1700 } 1701 1702 if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { 1703 rc = -EROFS; 1704 goto err_open; 1705 } 1706 1707 return 0; 1708 1709err_open: 1710 ub_put(sc); 1711 return rc; 1712} 1713 1714/* 1715 */ 1716static int ub_bd_release(struct inode *inode, struct file *filp) 1717{ 1718 struct gendisk *disk = inode->i_bdev->bd_disk; 1719 struct ub_lun *lun = disk->private_data; 1720 struct ub_dev *sc = lun->udev; 1721 1722 ub_put(sc); 1723 return 0; 1724} 1725 1726/* 1727 * The ioctl interface. 1728 */ 1729static int ub_bd_ioctl(struct inode *inode, struct file *filp, 1730 unsigned int cmd, unsigned long arg) 1731{ 1732 struct gendisk *disk = inode->i_bdev->bd_disk; 1733 void __user *usermem = (void __user *) arg; 1734 1735 return scsi_cmd_ioctl(filp, disk, cmd, usermem); 1736} 1737 1738/* 1739 * This is called once a new disk was seen by the block layer or by ub_probe(). 1740 * The main onjective here is to discover the features of the media such as 1741 * the capacity, read-only status, etc. USB storage generally does not 1742 * need to be spun up, but if we needed it, this would be the place. 1743 * 1744 * This call can sleep. 1745 * 1746 * The return code is not used. 1747 */ 1748static int ub_bd_revalidate(struct gendisk *disk) 1749{ 1750 struct ub_lun *lun = disk->private_data; 1751 1752 ub_revalidate(lun->udev, lun); 1753 1754 /* XXX Support sector size switching like in sr.c */ 1755 blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); 1756 set_capacity(disk, lun->capacity.nsec); 1757 // set_disk_ro(sdkp->disk, lun->readonly); 1758 1759 return 0; 1760} 1761 1762/* 1763 * The check is called by the block layer to verify if the media 1764 * is still available. It is supposed to be harmless, lightweight and 1765 * non-intrusive in case the media was not changed. 1766 * 1767 * This call can sleep. 1768 * 1769 * The return code is bool! 1770 */ 1771static int ub_bd_media_changed(struct gendisk *disk) 1772{ 1773 struct ub_lun *lun = disk->private_data; 1774 1775 if (!lun->removable) 1776 return 0; 1777 1778 /* 1779 * We clean checks always after every command, so this is not 1780 * as dangerous as it looks. If the TEST_UNIT_READY fails here, 1781 * the device is actually not ready with operator or software 1782 * intervention required. One dangerous item might be a drive which 1783 * spins itself down, and come the time to write dirty pages, this 1784 * will fail, then block layer discards the data. Since we never 1785 * spin drives up, such devices simply cannot be used with ub anyway. 1786 */ 1787 if (ub_sync_tur(lun->udev, lun) != 0) { 1788 lun->changed = 1; 1789 return 1; 1790 } 1791 1792 return lun->changed; 1793} 1794 1795static struct block_device_operations ub_bd_fops = { 1796 .owner = THIS_MODULE, 1797 .open = ub_bd_open, 1798 .release = ub_bd_release, 1799 .ioctl = ub_bd_ioctl, 1800 .media_changed = ub_bd_media_changed, 1801 .revalidate_disk = ub_bd_revalidate, 1802}; 1803 1804/* 1805 * Common ->done routine for commands executed synchronously. 1806 */ 1807static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1808{ 1809 struct completion *cop = cmd->back; 1810 complete(cop); 1811} 1812 1813/* 1814 * Test if the device has a check condition on it, synchronously. 1815 */ 1816static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) 1817{ 1818 struct ub_scsi_cmd *cmd; 1819 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; 1820 unsigned long flags; 1821 struct completion compl; 1822 int rc; 1823 1824 init_completion(&compl); 1825 1826 rc = -ENOMEM; 1827 if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 1828 goto err_alloc; 1829 memset(cmd, 0, ALLOC_SIZE); 1830 1831 cmd->cdb[0] = TEST_UNIT_READY; 1832 cmd->cdb_len = 6; 1833 cmd->dir = UB_DIR_NONE; 1834 cmd->state = UB_CMDST_INIT; 1835 cmd->lun = lun; /* This may be NULL, but that's ok */ 1836 cmd->done = ub_probe_done; 1837 cmd->back = &compl; 1838 1839 spin_lock_irqsave(&sc->lock, flags); 1840 cmd->tag = sc->tagcnt++; 1841 1842 rc = ub_submit_scsi(sc, cmd); 1843 spin_unlock_irqrestore(&sc->lock, flags); 1844 1845 if (rc != 0) { 1846 printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */ 1847 goto err_submit; 1848 } 1849 1850 wait_for_completion(&compl); 1851 1852 rc = cmd->error; 1853 1854 if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */ 1855 rc = cmd->key; 1856 1857err_submit: 1858 kfree(cmd); 1859err_alloc: 1860 return rc; 1861} 1862 1863/* 1864 * Read the SCSI capacity synchronously (for probing). 1865 */ 1866static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 1867 struct ub_capacity *ret) 1868{ 1869 struct ub_scsi_cmd *cmd; 1870 struct scatterlist *sg; 1871 char *p; 1872 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; 1873 unsigned long flags; 1874 unsigned int bsize, shift; 1875 unsigned long nsec; 1876 struct completion compl; 1877 int rc; 1878 1879 init_completion(&compl); 1880 1881 rc = -ENOMEM; 1882 if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 1883 goto err_alloc; 1884 memset(cmd, 0, ALLOC_SIZE); 1885 p = (char *)cmd + sizeof(struct ub_scsi_cmd); 1886 1887 cmd->cdb[0] = 0x25; 1888 cmd->cdb_len = 10; 1889 cmd->dir = UB_DIR_READ; 1890 cmd->state = UB_CMDST_INIT; 1891 cmd->nsg = 1; 1892 sg = &cmd->sgv[0]; 1893 sg->page = virt_to_page(p); 1894 sg->offset = (unsigned long)p & (PAGE_SIZE-1); 1895 sg->length = 8; 1896 cmd->len = 8; 1897 cmd->lun = lun; 1898 cmd->done = ub_probe_done; 1899 cmd->back = &compl; 1900 1901 spin_lock_irqsave(&sc->lock, flags); 1902 cmd->tag = sc->tagcnt++; 1903 1904 rc = ub_submit_scsi(sc, cmd); 1905 spin_unlock_irqrestore(&sc->lock, flags); 1906 1907 if (rc != 0) { 1908 printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */ 1909 goto err_submit; 1910 } 1911 1912 wait_for_completion(&compl); 1913 1914 if (cmd->error != 0) { 1915 printk("ub: reading capacity: error %d\n", cmd->error); /* P3 */ 1916 rc = -EIO; 1917 goto err_read; 1918 } 1919 if (cmd->act_len != 8) { 1920 printk("ub: reading capacity: size %d\n", cmd->act_len); /* P3 */ 1921 rc = -EIO; 1922 goto err_read; 1923 } 1924 1925 /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */ 1926 nsec = be32_to_cpu(*(__be32 *)p) + 1; 1927 bsize = be32_to_cpu(*(__be32 *)(p + 4)); 1928 switch (bsize) { 1929 case 512: shift = 0; break; 1930 case 1024: shift = 1; break; 1931 case 2048: shift = 2; break; 1932 case 4096: shift = 3; break; 1933 default: 1934 printk("ub: Bad sector size %u\n", bsize); /* P3 */ 1935 rc = -EDOM; 1936 goto err_inv_bsize; 1937 } 1938 1939 ret->bsize = bsize; 1940 ret->bshift = shift; 1941 ret->nsec = nsec << shift; 1942 rc = 0; 1943 1944err_inv_bsize: 1945err_read: 1946err_submit: 1947 kfree(cmd); 1948err_alloc: 1949 return rc; 1950} 1951 1952/* 1953 */ 1954static void ub_probe_urb_complete(struct urb *urb, struct pt_regs *pt) 1955{ 1956 struct completion *cop = urb->context; 1957 complete(cop); 1958} 1959 1960static void ub_probe_timeout(unsigned long arg) 1961{ 1962 struct completion *cop = (struct completion *) arg; 1963 complete(cop); 1964} 1965 1966/* 1967 * Get number of LUNs by the way of Bulk GetMaxLUN command. 1968 */ 1969static int ub_sync_getmaxlun(struct ub_dev *sc) 1970{ 1971 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; 1972 unsigned char *p; 1973 enum { ALLOC_SIZE = 1 }; 1974 struct usb_ctrlrequest *cr; 1975 struct completion compl; 1976 struct timer_list timer; 1977 int nluns; 1978 int rc; 1979 1980 init_completion(&compl); 1981 1982 rc = -ENOMEM; 1983 if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 1984 goto err_alloc; 1985 *p = 55; 1986 1987 cr = &sc->work_cr; 1988 cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; 1989 cr->bRequest = US_BULK_GET_MAX_LUN; 1990 cr->wValue = cpu_to_le16(0); 1991 cr->wIndex = cpu_to_le16(ifnum); 1992 cr->wLength = cpu_to_le16(1); 1993 1994 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, 1995 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); 1996 sc->work_urb.actual_length = 0; 1997 sc->work_urb.error_count = 0; 1998 sc->work_urb.status = 0; 1999 2000 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 2001 if (rc == -EPIPE) { 2002 printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n", 2003 sc->name); /* P3 */ 2004 } else { 2005 printk(KERN_NOTICE 2006 "%s: Unable to submit GetMaxLUN (%d)\n", 2007 sc->name, rc); 2008 } 2009 goto err_submit; 2010 } 2011 2012 init_timer(&timer); 2013 timer.function = ub_probe_timeout; 2014 timer.data = (unsigned long) &compl; 2015 timer.expires = jiffies + UB_CTRL_TIMEOUT; 2016 add_timer(&timer); 2017 2018 wait_for_completion(&compl); 2019 2020 del_timer_sync(&timer); 2021 usb_kill_urb(&sc->work_urb); 2022 2023 if ((rc = sc->work_urb.status) < 0) { 2024 if (rc == -EPIPE) { 2025 printk("%s: Stall at GetMaxLUN, using 1 LUN\n", 2026 sc->name); /* P3 */ 2027 } else { 2028 printk(KERN_NOTICE 2029 "%s: Error at GetMaxLUN (%d)\n", 2030 sc->name, rc); 2031 } 2032 goto err_io; 2033 } 2034 2035 if (sc->work_urb.actual_length != 1) { 2036 printk("%s: GetMaxLUN returned %d bytes\n", sc->name, 2037 sc->work_urb.actual_length); /* P3 */ 2038 nluns = 0; 2039 } else { 2040 if ((nluns = *p) == 55) { 2041 nluns = 0; 2042 } else { 2043 /* GetMaxLUN returns the maximum LUN number */ 2044 nluns += 1; 2045 if (nluns > UB_MAX_LUNS) 2046 nluns = UB_MAX_LUNS; 2047 } 2048 printk("%s: GetMaxLUN returned %d, using %d LUNs\n", sc->name, 2049 *p, nluns); /* P3 */ 2050 } 2051 2052 kfree(p); 2053 return nluns; 2054 2055err_io: 2056err_submit: 2057 kfree(p); 2058err_alloc: 2059 return rc; 2060} 2061 2062/* 2063 * Clear initial stalls. 2064 */ 2065static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) 2066{ 2067 int endp; 2068 struct usb_ctrlrequest *cr; 2069 struct completion compl; 2070 struct timer_list timer; 2071 int rc; 2072 2073 init_completion(&compl); 2074 2075 endp = usb_pipeendpoint(stalled_pipe); 2076 if (usb_pipein (stalled_pipe)) 2077 endp |= USB_DIR_IN; 2078 2079 cr = &sc->work_cr; 2080 cr->bRequestType = USB_RECIP_ENDPOINT; 2081 cr->bRequest = USB_REQ_CLEAR_FEATURE; 2082 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); 2083 cr->wIndex = cpu_to_le16(endp); 2084 cr->wLength = cpu_to_le16(0); 2085 2086 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 2087 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); 2088 sc->work_urb.actual_length = 0; 2089 sc->work_urb.error_count = 0; 2090 sc->work_urb.status = 0; 2091 2092 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 2093 printk(KERN_WARNING 2094 "%s: Unable to submit a probe clear (%d)\n", sc->name, rc); 2095 return rc; 2096 } 2097 2098 init_timer(&timer); 2099 timer.function = ub_probe_timeout; 2100 timer.data = (unsigned long) &compl; 2101 timer.expires = jiffies + UB_CTRL_TIMEOUT; 2102 add_timer(&timer); 2103 2104 wait_for_completion(&compl); 2105 2106 del_timer_sync(&timer); 2107 usb_kill_urb(&sc->work_urb); 2108 2109 /* reset the endpoint toggle */ 2110 usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0); 2111 2112 return 0; 2113} 2114 2115/* 2116 * Get the pipe settings. 2117 */ 2118static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, 2119 struct usb_interface *intf) 2120{ 2121 struct usb_host_interface *altsetting = intf->cur_altsetting; 2122 struct usb_endpoint_descriptor *ep_in = NULL; 2123 struct usb_endpoint_descriptor *ep_out = NULL; 2124 struct usb_endpoint_descriptor *ep; 2125 int i; 2126 2127 /* 2128 * Find the endpoints we need. 2129 * We are expecting a minimum of 2 endpoints - in and out (bulk). 2130 * We will ignore any others. 2131 */ 2132 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { 2133 ep = &altsetting->endpoint[i].desc; 2134 2135 /* Is it a BULK endpoint? */ 2136 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 2137 == USB_ENDPOINT_XFER_BULK) { 2138 /* BULK in or out? */ 2139 if (ep->bEndpointAddress & USB_DIR_IN) 2140 ep_in = ep; 2141 else 2142 ep_out = ep; 2143 } 2144 } 2145 2146 if (ep_in == NULL || ep_out == NULL) { 2147 printk(KERN_NOTICE "%s: failed endpoint check\n", 2148 sc->name); 2149 return -EIO; 2150 } 2151 2152 /* Calculate and store the pipe values */ 2153 sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0); 2154 sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0); 2155 sc->send_bulk_pipe = usb_sndbulkpipe(dev, 2156 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 2157 sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 2158 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 2159 2160 return 0; 2161} 2162 2163/* 2164 * Probing is done in the process context, which allows us to cheat 2165 * and not to build a state machine for the discovery. 2166 */ 2167static int ub_probe(struct usb_interface *intf, 2168 const struct usb_device_id *dev_id) 2169{ 2170 struct ub_dev *sc; 2171 int nluns; 2172 int rc; 2173 int i; 2174 2175 rc = -ENOMEM; 2176 if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) 2177 goto err_core; 2178 memset(sc, 0, sizeof(struct ub_dev)); 2179 spin_lock_init(&sc->lock); 2180 INIT_LIST_HEAD(&sc->luns); 2181 usb_init_urb(&sc->work_urb); 2182 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 2183 atomic_set(&sc->poison, 0); 2184 2185 init_timer(&sc->work_timer); 2186 sc->work_timer.data = (unsigned long) sc; 2187 sc->work_timer.function = ub_urb_timeout; 2188 2189 ub_init_completion(&sc->work_done); 2190 sc->work_done.done = 1; /* A little yuk, but oh well... */ 2191 2192 sc->dev = interface_to_usbdev(intf); 2193 sc->intf = intf; 2194 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; 2195 usb_set_intfdata(intf, sc); 2196 usb_get_dev(sc->dev); 2197 // usb_get_intf(sc->intf); /* Do we need this? */ 2198 2199 snprintf(sc->name, 12, DRV_NAME "(%d.%d)", 2200 sc->dev->bus->busnum, sc->dev->devnum); 2201 2202 /* XXX Verify that we can handle the device (from descriptors) */ 2203 2204 ub_get_pipes(sc, sc->dev, intf); 2205 2206 if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0) 2207 goto err_diag; 2208 2209 /* 2210 * At this point, all USB initialization is done, do upper layer. 2211 * We really hate halfway initialized structures, so from the 2212 * invariants perspective, this ub_dev is fully constructed at 2213 * this point. 2214 */ 2215 2216 /* 2217 * This is needed to clear toggles. It is a problem only if we do 2218 * `rmmod ub && modprobe ub` without disconnects, but we like that. 2219 */ 2220#if 0 /* iPod Mini fails if we do this (big white iPod works) */ 2221 ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2222 ub_probe_clear_stall(sc, sc->send_bulk_pipe); 2223#endif 2224 2225 /* 2226 * The way this is used by the startup code is a little specific. 2227 * A SCSI check causes a USB stall. Our common case code sees it 2228 * and clears the check, after which the device is ready for use. 2229 * But if a check was not present, any command other than 2230 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE). 2231 * 2232 * If we neglect to clear the SCSI check, the first real command fails 2233 * (which is the capacity readout). We clear that and retry, but why 2234 * causing spurious retries for no reason. 2235 * 2236 * Revalidation may start with its own TEST_UNIT_READY, but that one 2237 * has to succeed, so we clear checks with an additional one here. 2238 * In any case it's not our business how revaliadation is implemented. 2239 */ 2240 for (i = 0; i < 3; i++) { /* Retries for benh's key */ 2241 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; 2242 if (rc != 0x6) break; 2243 msleep(10); 2244 } 2245 2246 nluns = 1; 2247 for (i = 0; i < 3; i++) { 2248 if ((rc = ub_sync_getmaxlun(sc)) < 0) { 2249 /* 2250 * This segment is taken from usb-storage. They say 2251 * that ZIP-100 needs this, but my own ZIP-100 works 2252 * fine without this. 2253 * Still, it does not seem to hurt anything. 2254 */ 2255 if (rc == -EPIPE) { 2256 ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2257 ub_probe_clear_stall(sc, sc->send_bulk_pipe); 2258 } 2259 break; 2260 } 2261 if (rc != 0) { 2262 nluns = rc; 2263 break; 2264 } 2265 msleep(100); 2266 } 2267 2268 for (i = 0; i < nluns; i++) { 2269 ub_probe_lun(sc, i); 2270 } 2271 return 0; 2272 2273 /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */ 2274err_diag: 2275 usb_set_intfdata(intf, NULL); 2276 // usb_put_intf(sc->intf); 2277 usb_put_dev(sc->dev); 2278 kfree(sc); 2279err_core: 2280 return rc; 2281} 2282 2283static int ub_probe_lun(struct ub_dev *sc, int lnum) 2284{ 2285 struct ub_lun *lun; 2286 request_queue_t *q; 2287 struct gendisk *disk; 2288 int rc; 2289 2290 rc = -ENOMEM; 2291 if ((lun = kmalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) 2292 goto err_alloc; 2293 memset(lun, 0, sizeof(struct ub_lun)); 2294 lun->num = lnum; 2295 2296 rc = -ENOSR; 2297 if ((lun->id = ub_id_get()) == -1) 2298 goto err_id; 2299 2300 lun->udev = sc; 2301 list_add(&lun->link, &sc->luns); 2302 2303 snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", 2304 lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); 2305 2306 lun->removable = 1; /* XXX Query this from the device */ 2307 lun->changed = 1; /* ub_revalidate clears only */ 2308 lun->first_open = 1; 2309 ub_revalidate(sc, lun); 2310 2311 rc = -ENOMEM; 2312 if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL) 2313 goto err_diskalloc; 2314 2315 lun->disk = disk; 2316 sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); 2317 sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a'); 2318 disk->major = UB_MAJOR; 2319 disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; 2320 disk->fops = &ub_bd_fops; 2321 disk->private_data = lun; 2322 disk->driverfs_dev = &sc->intf->dev; 2323 2324 rc = -ENOMEM; 2325 if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL) 2326 goto err_blkqinit; 2327 2328 disk->queue = q; 2329 2330 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 2331 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); 2332 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2333 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2334 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2335 blk_queue_hardsect_size(q, lun->capacity.bsize); 2336 2337 q->queuedata = lun; 2338 2339 set_capacity(disk, lun->capacity.nsec); 2340 if (lun->removable) 2341 disk->flags |= GENHD_FL_REMOVABLE; 2342 2343 add_disk(disk); 2344 2345 return 0; 2346 2347err_blkqinit: 2348 put_disk(disk); 2349err_diskalloc: 2350 list_del(&lun->link); 2351 ub_id_put(lun->id); 2352err_id: 2353 kfree(lun); 2354err_alloc: 2355 return rc; 2356} 2357 2358static void ub_disconnect(struct usb_interface *intf) 2359{ 2360 struct ub_dev *sc = usb_get_intfdata(intf); 2361 struct list_head *p; 2362 struct ub_lun *lun; 2363 struct gendisk *disk; 2364 unsigned long flags; 2365 2366 /* 2367 * Prevent ub_bd_release from pulling the rug from under us. 2368 * XXX This is starting to look like a kref. 2369 * XXX Why not to take this ref at probe time? 2370 */ 2371 spin_lock_irqsave(&ub_lock, flags); 2372 sc->openc++; 2373 spin_unlock_irqrestore(&ub_lock, flags); 2374 2375 /* 2376 * Fence stall clearnings, operations triggered by unlinkings and so on. 2377 * We do not attempt to unlink any URBs, because we do not trust the 2378 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway. 2379 */ 2380 atomic_set(&sc->poison, 1); 2381 2382 /* 2383 * Blow away queued commands. 2384 * 2385 * Actually, this never works, because before we get here 2386 * the HCD terminates outstanding URB(s). It causes our 2387 * SCSI command queue to advance, commands fail to submit, 2388 * and the whole queue drains. So, we just use this code to 2389 * print warnings. 2390 */ 2391 spin_lock_irqsave(&sc->lock, flags); 2392 { 2393 struct ub_scsi_cmd *cmd; 2394 int cnt = 0; 2395 while ((cmd = ub_cmdq_pop(sc)) != NULL) { 2396 cmd->error = -ENOTCONN; 2397 cmd->state = UB_CMDST_DONE; 2398 ub_cmdtr_state(sc, cmd); 2399 ub_cmdq_pop(sc); 2400 (*cmd->done)(sc, cmd); 2401 cnt++; 2402 } 2403 if (cnt != 0) { 2404 printk(KERN_WARNING "%s: " 2405 "%d was queued after shutdown\n", sc->name, cnt); 2406 } 2407 } 2408 spin_unlock_irqrestore(&sc->lock, flags); 2409 2410 /* 2411 * Unregister the upper layer. 2412 */ 2413 list_for_each (p, &sc->luns) { 2414 lun = list_entry(p, struct ub_lun, link); 2415 disk = lun->disk; 2416 if (disk->flags & GENHD_FL_UP) 2417 del_gendisk(disk); 2418 /* 2419 * I wish I could do: 2420 * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 2421 * As it is, we rely on our internal poisoning and let 2422 * the upper levels to spin furiously failing all the I/O. 2423 */ 2424 } 2425 2426 /* 2427 * Taking a lock on a structure which is about to be freed 2428 * is very nonsensual. Here it is largely a way to do a debug freeze, 2429 * and a bracket which shows where the nonsensual code segment ends. 2430 * 2431 * Testing for -EINPROGRESS is always a bug, so we are bending 2432 * the rules a little. 2433 */ 2434 spin_lock_irqsave(&sc->lock, flags); 2435 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ 2436 printk(KERN_WARNING "%s: " 2437 "URB is active after disconnect\n", sc->name); 2438 } 2439 spin_unlock_irqrestore(&sc->lock, flags); 2440 2441 /* 2442 * There is virtually no chance that other CPU runs times so long 2443 * after ub_urb_complete should have called del_timer, but only if HCD 2444 * didn't forget to deliver a callback on unlink. 2445 */ 2446 del_timer_sync(&sc->work_timer); 2447 2448 /* 2449 * At this point there must be no commands coming from anyone 2450 * and no URBs left in transit. 2451 */ 2452 2453 device_remove_file(&sc->intf->dev, &dev_attr_diag); 2454 usb_set_intfdata(intf, NULL); 2455 // usb_put_intf(sc->intf); 2456 sc->intf = NULL; 2457 usb_put_dev(sc->dev); 2458 sc->dev = NULL; 2459 2460 ub_put(sc); 2461} 2462 2463static struct usb_driver ub_driver = { 2464 .owner = THIS_MODULE, 2465 .name = "ub", 2466 .probe = ub_probe, 2467 .disconnect = ub_disconnect, 2468 .id_table = ub_usb_ids, 2469}; 2470 2471static int __init ub_init(void) 2472{ 2473 int rc; 2474 2475 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) 2476 goto err_regblkdev; 2477 devfs_mk_dir(DEVFS_NAME); 2478 2479 if ((rc = usb_register(&ub_driver)) != 0) 2480 goto err_register; 2481 2482 return 0; 2483 2484err_register: 2485 devfs_remove(DEVFS_NAME); 2486 unregister_blkdev(UB_MAJOR, DRV_NAME); 2487err_regblkdev: 2488 return rc; 2489} 2490 2491static void __exit ub_exit(void) 2492{ 2493 usb_deregister(&ub_driver); 2494 2495 devfs_remove(DEVFS_NAME); 2496 unregister_blkdev(UB_MAJOR, DRV_NAME); 2497} 2498 2499module_init(ub_init); 2500module_exit(ub_exit); 2501 2502MODULE_LICENSE("GPL");