at v2.6.14-rc2 2499 lines 65 kB view raw
1/* 2 * The low performance USB storage driver (ub). 3 * 4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net) 5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com) 6 * 7 * This work is a part of Linux kernel, is derived from it, 8 * and is not licensed separately. See file COPYING for details. 9 * 10 * TODO (sorted by decreasing priority) 11 * -- Kill first_open (Al Viro fixed the block layer now) 12 * -- Do resets with usb_device_reset (needs a thread context, use khubd) 13 * -- set readonly flag for CDs, set removable flag for CF readers 14 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) 15 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries 16 * -- verify the 13 conditions and do bulk resets 17 * -- kill last_pipe and simply do two-state clearing on both pipes 18 * -- verify protocol (bulk) from USB descriptors (maybe...) 19 * -- highmem 20 * -- move top_sense and work_bcs into separate allocations (if they survive) 21 * for cache purists and esoteric architectures. 22 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? 23 * -- prune comments, they are too volumnous 24 * -- Exterminate P3 printks 25 * -- Resove XXX's 26 * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=? 27 * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. 28 */ 29#include <linux/kernel.h> 30#include <linux/module.h> 31#include <linux/usb.h> 32#include <linux/blkdev.h> 33#include <linux/devfs_fs_kernel.h> 34#include <linux/timer.h> 35#include <scsi/scsi.h> 36 37#define DRV_NAME "ub" 38#define DEVFS_NAME DRV_NAME 39 40#define UB_MAJOR 180 41 42/* 43 * The command state machine is the key model for understanding of this driver. 44 * 45 * The general rule is that all transitions are done towards the bottom 46 * of the diagram, thus preventing any loops. 47 * 48 * An exception to that is how the STAT state is handled. A counter allows it 49 * to be re-entered along the path marked with [C]. 50 * 51 * +--------+ 52 * ! INIT ! 53 * +--------+ 54 * ! 55 * ub_scsi_cmd_start fails ->--------------------------------------\ 56 * ! ! 57 * V ! 58 * +--------+ ! 59 * ! CMD ! ! 60 * +--------+ ! 61 * ! +--------+ ! 62 * was -EPIPE -->-------------------------------->! CLEAR ! ! 63 * ! +--------+ ! 64 * ! ! ! 65 * was error -->------------------------------------- ! --------->\ 66 * ! ! ! 67 * /--<-- cmd->dir == NONE ? ! ! 68 * ! ! ! ! 69 * ! V ! ! 70 * ! +--------+ ! ! 71 * ! ! DATA ! ! ! 72 * ! +--------+ ! ! 73 * ! ! +---------+ ! ! 74 * ! was -EPIPE -->--------------->! CLR2STS ! ! ! 75 * ! ! +---------+ ! ! 76 * ! ! ! ! ! 77 * ! ! was error -->---- ! --------->\ 78 * ! was error -->--------------------- ! ------------- ! --------->\ 79 * ! ! ! ! ! 80 * ! V ! ! ! 81 * \--->+--------+ ! ! ! 82 * ! STAT !<--------------------------/ ! ! 83 * /--->+--------+ ! ! 84 * ! ! ! ! 85 * [C] was -EPIPE -->-----------\ ! ! 86 * ! ! ! ! ! 87 * +<---- len == 0 ! ! ! 88 * ! ! ! ! ! 89 * ! was error -->--------------------------------------!---------->\ 90 * ! ! ! ! ! 91 * +<---- bad CSW ! ! ! 92 * +<---- bad tag ! ! ! 93 * ! ! V ! ! 94 * ! ! +--------+ ! ! 95 * ! ! ! CLRRS ! ! ! 96 * ! ! +--------+ ! ! 97 * ! ! ! ! ! 98 * \------- ! --------------------[C]--------\ ! ! 99 * ! ! ! ! 100 * cmd->error---\ +--------+ ! ! 101 * ! +--------------->! SENSE !<----------/ ! 102 * STAT_FAIL----/ +--------+ ! 103 * ! ! V 104 * ! V +--------+ 105 * \--------------------------------\--------------------->! DONE ! 106 * +--------+ 107 */ 108 109/* 110 * Definitions which have to be scattered once we understand the layout better. 111 */ 112 113/* Transport (despite PR in the name) */ 114#define US_PR_BULK 0x50 /* bulk only */ 115 116/* Protocol */ 117#define US_SC_SCSI 0x06 /* Transparent */ 118 119/* 120 * This many LUNs per USB device. 121 * Every one of them takes a host, see UB_MAX_HOSTS. 122 */ 123#define UB_MAX_LUNS 9 124 125/* 126 */ 127 128#define UB_MINORS_PER_MAJOR 8 129 130#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ 131 132#define UB_SENSE_SIZE 18 133 134/* 135 */ 136 137/* command block wrapper */ 138struct bulk_cb_wrap { 139 __le32 Signature; /* contains 'USBC' */ 140 u32 Tag; /* unique per command id */ 141 __le32 DataTransferLength; /* size of data */ 142 u8 Flags; /* direction in bit 0 */ 143 u8 Lun; /* LUN */ 144 u8 Length; /* of of the CDB */ 145 u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ 146}; 147 148#define US_BULK_CB_WRAP_LEN 31 149#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ 150#define US_BULK_FLAG_IN 1 151#define US_BULK_FLAG_OUT 0 152 153/* command status wrapper */ 154struct bulk_cs_wrap { 155 __le32 Signature; /* should = 'USBS' */ 156 u32 Tag; /* same as original command */ 157 __le32 Residue; /* amount not transferred */ 158 u8 Status; /* see below */ 159}; 160 161#define US_BULK_CS_WRAP_LEN 13 162#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ 163#define US_BULK_STAT_OK 0 164#define US_BULK_STAT_FAIL 1 165#define US_BULK_STAT_PHASE 2 166 167/* bulk-only class specific requests */ 168#define US_BULK_RESET_REQUEST 0xff 169#define US_BULK_GET_MAX_LUN 0xfe 170 171/* 172 */ 173struct ub_dev; 174 175#define UB_MAX_REQ_SG 4 176#define UB_MAX_SECTORS 64 177 178/* 179 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS) 180 * even if a webcam hogs the bus, but some devices need time to spin up. 181 */ 182#define UB_URB_TIMEOUT (HZ*2) 183#define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */ 184#define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */ 185#define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */ 186 187/* 188 * An instance of a SCSI command in transit. 189 */ 190#define UB_DIR_NONE 0 191#define UB_DIR_READ 1 192#define UB_DIR_ILLEGAL2 2 193#define UB_DIR_WRITE 3 194 195#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \ 196 (((c)==UB_DIR_READ)? 'r': 'n')) 197 198enum ub_scsi_cmd_state { 199 UB_CMDST_INIT, /* Initial state */ 200 UB_CMDST_CMD, /* Command submitted */ 201 UB_CMDST_DATA, /* Data phase */ 202 UB_CMDST_CLR2STS, /* Clearing before requesting status */ 203 UB_CMDST_STAT, /* Status phase */ 204 UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ 205 UB_CMDST_CLRRS, /* Clearing before retrying status */ 206 UB_CMDST_SENSE, /* Sending Request Sense */ 207 UB_CMDST_DONE /* Final state */ 208}; 209 210static char *ub_scsi_cmd_stname[] = { 211 ". ", 212 "Cmd", 213 "dat", 214 "c2s", 215 "sts", 216 "clr", 217 "crs", 218 "Sen", 219 "fin" 220}; 221 222struct ub_scsi_cmd { 223 unsigned char cdb[UB_MAX_CDB_SIZE]; 224 unsigned char cdb_len; 225 226 unsigned char dir; /* 0 - none, 1 - read, 3 - write. */ 227 unsigned char trace_index; 228 enum ub_scsi_cmd_state state; 229 unsigned int tag; 230 struct ub_scsi_cmd *next; 231 232 int error; /* Return code - valid upon done */ 233 unsigned int act_len; /* Return size */ 234 unsigned char key, asc, ascq; /* May be valid if error==-EIO */ 235 236 int stat_count; /* Retries getting status. */ 237 238 unsigned int len; /* Requested length */ 239 unsigned int current_sg; 240 unsigned int nsg; /* sgv[nsg] */ 241 struct scatterlist sgv[UB_MAX_REQ_SG]; 242 243 struct ub_lun *lun; 244 void (*done)(struct ub_dev *, struct ub_scsi_cmd *); 245 void *back; 246}; 247 248/* 249 */ 250struct ub_capacity { 251 unsigned long nsec; /* Linux size - 512 byte sectors */ 252 unsigned int bsize; /* Linux hardsect_size */ 253 unsigned int bshift; /* Shift between 512 and hard sects */ 254}; 255 256/* 257 * The SCSI command tracing structure. 258 */ 259 260#define SCMD_ST_HIST_SZ 8 261#define SCMD_TRACE_SZ 63 /* Less than 4KB of 61-byte lines */ 262 263struct ub_scsi_cmd_trace { 264 int hcur; 265 unsigned int tag; 266 unsigned int req_size, act_size; 267 unsigned char op; 268 unsigned char dir; 269 unsigned char key, asc, ascq; 270 char st_hst[SCMD_ST_HIST_SZ]; 271}; 272 273struct ub_scsi_trace { 274 int cur; 275 struct ub_scsi_cmd_trace vec[SCMD_TRACE_SZ]; 276}; 277 278/* 279 * This is a direct take-off from linux/include/completion.h 280 * The difference is that I do not wait on this thing, just poll. 281 * When I want to wait (ub_probe), I just use the stock completion. 282 * 283 * Note that INIT_COMPLETION takes no lock. It is correct. But why 284 * in the bloody hell that thing takes struct instead of pointer to struct 285 * is quite beyond me. I just copied it from the stock completion. 286 */ 287struct ub_completion { 288 unsigned int done; 289 spinlock_t lock; 290}; 291 292static inline void ub_init_completion(struct ub_completion *x) 293{ 294 x->done = 0; 295 spin_lock_init(&x->lock); 296} 297 298#define UB_INIT_COMPLETION(x) ((x).done = 0) 299 300static void ub_complete(struct ub_completion *x) 301{ 302 unsigned long flags; 303 304 spin_lock_irqsave(&x->lock, flags); 305 x->done++; 306 spin_unlock_irqrestore(&x->lock, flags); 307} 308 309static int ub_is_completed(struct ub_completion *x) 310{ 311 unsigned long flags; 312 int ret; 313 314 spin_lock_irqsave(&x->lock, flags); 315 ret = x->done; 316 spin_unlock_irqrestore(&x->lock, flags); 317 return ret; 318} 319 320/* 321 */ 322struct ub_scsi_cmd_queue { 323 int qlen, qmax; 324 struct ub_scsi_cmd *head, *tail; 325}; 326 327/* 328 * The block device instance (one per LUN). 329 */ 330struct ub_lun { 331 struct ub_dev *udev; 332 struct list_head link; 333 struct gendisk *disk; 334 int id; /* Host index */ 335 int num; /* LUN number */ 336 char name[16]; 337 338 int changed; /* Media was changed */ 339 int removable; 340 int readonly; 341 int first_open; /* Kludge. See ub_bd_open. */ 342 343 /* Use Ingo's mempool if or when we have more than one command. */ 344 /* 345 * Currently we never need more than one command for the whole device. 346 * However, giving every LUN a command is a cheap and automatic way 347 * to enforce fairness between them. 348 */ 349 int cmda[1]; 350 struct ub_scsi_cmd cmdv[1]; 351 352 struct ub_capacity capacity; 353}; 354 355/* 356 * The USB device instance. 357 */ 358struct ub_dev { 359 spinlock_t lock; 360 atomic_t poison; /* The USB device is disconnected */ 361 int openc; /* protected by ub_lock! */ 362 /* kref is too implicit for our taste */ 363 unsigned int tagcnt; 364 char name[12]; 365 struct usb_device *dev; 366 struct usb_interface *intf; 367 368 struct list_head luns; 369 370 unsigned int send_bulk_pipe; /* cached pipe values */ 371 unsigned int recv_bulk_pipe; 372 unsigned int send_ctrl_pipe; 373 unsigned int recv_ctrl_pipe; 374 375 struct tasklet_struct tasklet; 376 377 struct ub_scsi_cmd_queue cmd_queue; 378 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ 379 unsigned char top_sense[UB_SENSE_SIZE]; 380 381 struct ub_completion work_done; 382 struct urb work_urb; 383 struct timer_list work_timer; 384 int last_pipe; /* What might need clearing */ 385 __le32 signature; /* Learned signature */ 386 struct bulk_cb_wrap work_bcb; 387 struct bulk_cs_wrap work_bcs; 388 struct usb_ctrlrequest work_cr; 389 390 int sg_stat[UB_MAX_REQ_SG+1]; 391 struct ub_scsi_trace tr; 392}; 393 394/* 395 */ 396static void ub_cleanup(struct ub_dev *sc); 397static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); 398static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 399 struct ub_scsi_cmd *cmd, struct request *rq); 400static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 401 struct ub_scsi_cmd *cmd, struct request *rq); 402static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 403static void ub_end_rq(struct request *rq, int uptodate); 404static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 405static void ub_urb_complete(struct urb *urb, struct pt_regs *pt); 406static void ub_scsi_action(unsigned long _dev); 407static void ub_scsi_dispatch(struct ub_dev *sc); 408static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 409static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 410static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); 411static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 412static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 413static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 414static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 415static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 416 int stalled_pipe); 417static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); 418static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); 419static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 420 struct ub_capacity *ret); 421static int ub_probe_lun(struct ub_dev *sc, int lnum); 422 423/* 424 */ 425static struct usb_device_id ub_usb_ids[] = { 426 // { USB_DEVICE_VER(0x0781, 0x0002, 0x0009, 0x0009) }, /* SDDR-31 */ 427 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, 428 { } 429}; 430 431MODULE_DEVICE_TABLE(usb, ub_usb_ids); 432 433/* 434 * Find me a way to identify "next free minor" for add_disk(), 435 * and the array disappears the next day. However, the number of 436 * hosts has something to do with the naming and /proc/partitions. 437 * This has to be thought out in detail before changing. 438 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure. 439 */ 440#define UB_MAX_HOSTS 26 441static char ub_hostv[UB_MAX_HOSTS]; 442 443static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ 444 445/* 446 * The SCSI command tracing procedures. 447 */ 448 449static void ub_cmdtr_new(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 450{ 451 int n; 452 struct ub_scsi_cmd_trace *t; 453 454 if ((n = sc->tr.cur + 1) == SCMD_TRACE_SZ) n = 0; 455 t = &sc->tr.vec[n]; 456 457 memset(t, 0, sizeof(struct ub_scsi_cmd_trace)); 458 t->tag = cmd->tag; 459 t->op = cmd->cdb[0]; 460 t->dir = cmd->dir; 461 t->req_size = cmd->len; 462 t->st_hst[0] = cmd->state; 463 464 sc->tr.cur = n; 465 cmd->trace_index = n; 466} 467 468static void ub_cmdtr_state(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 469{ 470 int n; 471 struct ub_scsi_cmd_trace *t; 472 473 t = &sc->tr.vec[cmd->trace_index]; 474 if (t->tag == cmd->tag) { 475 if ((n = t->hcur + 1) == SCMD_ST_HIST_SZ) n = 0; 476 t->st_hst[n] = cmd->state; 477 t->hcur = n; 478 } 479} 480 481static void ub_cmdtr_act_len(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 482{ 483 struct ub_scsi_cmd_trace *t; 484 485 t = &sc->tr.vec[cmd->trace_index]; 486 if (t->tag == cmd->tag) 487 t->act_size = cmd->act_len; 488} 489 490static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 491 unsigned char *sense) 492{ 493 struct ub_scsi_cmd_trace *t; 494 495 t = &sc->tr.vec[cmd->trace_index]; 496 if (t->tag == cmd->tag) { 497 t->key = sense[2] & 0x0F; 498 t->asc = sense[12]; 499 t->ascq = sense[13]; 500 } 501} 502 503static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, 504 char *page) 505{ 506 struct usb_interface *intf; 507 struct ub_dev *sc; 508 struct list_head *p; 509 struct ub_lun *lun; 510 int cnt; 511 unsigned long flags; 512 int nc, nh; 513 int i, j; 514 struct ub_scsi_cmd_trace *t; 515 516 intf = to_usb_interface(dev); 517 sc = usb_get_intfdata(intf); 518 if (sc == NULL) 519 return 0; 520 521 cnt = 0; 522 spin_lock_irqsave(&sc->lock, flags); 523 524 cnt += sprintf(page + cnt, 525 "qlen %d qmax %d\n", 526 sc->cmd_queue.qlen, sc->cmd_queue.qmax); 527 cnt += sprintf(page + cnt, 528 "sg %d %d %d %d %d\n", 529 sc->sg_stat[0], 530 sc->sg_stat[1], 531 sc->sg_stat[2], 532 sc->sg_stat[3], 533 sc->sg_stat[4]); 534 535 list_for_each (p, &sc->luns) { 536 lun = list_entry(p, struct ub_lun, link); 537 cnt += sprintf(page + cnt, 538 "lun %u changed %d removable %d readonly %d\n", 539 lun->num, lun->changed, lun->removable, lun->readonly); 540 } 541 542 if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0; 543 for (j = 0; j < SCMD_TRACE_SZ; j++) { 544 t = &sc->tr.vec[nc]; 545 546 cnt += sprintf(page + cnt, "%08x %02x", t->tag, t->op); 547 if (t->op == REQUEST_SENSE) { 548 cnt += sprintf(page + cnt, " [sense %x %02x %02x]", 549 t->key, t->asc, t->ascq); 550 } else { 551 cnt += sprintf(page + cnt, " %c", UB_DIR_CHAR(t->dir)); 552 cnt += sprintf(page + cnt, " [%5d %5d]", 553 t->req_size, t->act_size); 554 } 555 if ((nh = t->hcur + 1) == SCMD_ST_HIST_SZ) nh = 0; 556 for (i = 0; i < SCMD_ST_HIST_SZ; i++) { 557 cnt += sprintf(page + cnt, " %s", 558 ub_scsi_cmd_stname[(int)t->st_hst[nh]]); 559 if (++nh == SCMD_ST_HIST_SZ) nh = 0; 560 } 561 cnt += sprintf(page + cnt, "\n"); 562 563 if (++nc == SCMD_TRACE_SZ) nc = 0; 564 } 565 566 spin_unlock_irqrestore(&sc->lock, flags); 567 return cnt; 568} 569 570static DEVICE_ATTR(diag, S_IRUGO, ub_diag_show, NULL); /* N.B. World readable */ 571 572/* 573 * The id allocator. 574 * 575 * This also stores the host for indexing by minor, which is somewhat dirty. 576 */ 577static int ub_id_get(void) 578{ 579 unsigned long flags; 580 int i; 581 582 spin_lock_irqsave(&ub_lock, flags); 583 for (i = 0; i < UB_MAX_HOSTS; i++) { 584 if (ub_hostv[i] == 0) { 585 ub_hostv[i] = 1; 586 spin_unlock_irqrestore(&ub_lock, flags); 587 return i; 588 } 589 } 590 spin_unlock_irqrestore(&ub_lock, flags); 591 return -1; 592} 593 594static void ub_id_put(int id) 595{ 596 unsigned long flags; 597 598 if (id < 0 || id >= UB_MAX_HOSTS) { 599 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id); 600 return; 601 } 602 603 spin_lock_irqsave(&ub_lock, flags); 604 if (ub_hostv[id] == 0) { 605 spin_unlock_irqrestore(&ub_lock, flags); 606 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id); 607 return; 608 } 609 ub_hostv[id] = 0; 610 spin_unlock_irqrestore(&ub_lock, flags); 611} 612 613/* 614 * Downcount for deallocation. This rides on two assumptions: 615 * - once something is poisoned, its refcount cannot grow 616 * - opens cannot happen at this time (del_gendisk was done) 617 * If the above is true, we can drop the lock, which we need for 618 * blk_cleanup_queue(): the silly thing may attempt to sleep. 619 * [Actually, it never needs to sleep for us, but it calls might_sleep()] 620 */ 621static void ub_put(struct ub_dev *sc) 622{ 623 unsigned long flags; 624 625 spin_lock_irqsave(&ub_lock, flags); 626 --sc->openc; 627 if (sc->openc == 0 && atomic_read(&sc->poison)) { 628 spin_unlock_irqrestore(&ub_lock, flags); 629 ub_cleanup(sc); 630 } else { 631 spin_unlock_irqrestore(&ub_lock, flags); 632 } 633} 634 635/* 636 * Final cleanup and deallocation. 637 */ 638static void ub_cleanup(struct ub_dev *sc) 639{ 640 struct list_head *p; 641 struct ub_lun *lun; 642 request_queue_t *q; 643 644 while (!list_empty(&sc->luns)) { 645 p = sc->luns.next; 646 lun = list_entry(p, struct ub_lun, link); 647 list_del(p); 648 649 /* I don't think queue can be NULL. But... Stolen from sx8.c */ 650 if ((q = lun->disk->queue) != NULL) 651 blk_cleanup_queue(q); 652 /* 653 * If we zero disk->private_data BEFORE put_disk, we have 654 * to check for NULL all over the place in open, release, 655 * check_media and revalidate, because the block level 656 * semaphore is well inside the put_disk. 657 * But we cannot zero after the call, because *disk is gone. 658 * The sd.c is blatantly racy in this area. 659 */ 660 /* disk->private_data = NULL; */ 661 put_disk(lun->disk); 662 lun->disk = NULL; 663 664 ub_id_put(lun->id); 665 kfree(lun); 666 } 667 668 kfree(sc); 669} 670 671/* 672 * The "command allocator". 673 */ 674static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) 675{ 676 struct ub_scsi_cmd *ret; 677 678 if (lun->cmda[0]) 679 return NULL; 680 ret = &lun->cmdv[0]; 681 lun->cmda[0] = 1; 682 return ret; 683} 684 685static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) 686{ 687 if (cmd != &lun->cmdv[0]) { 688 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", 689 lun->name, cmd); 690 return; 691 } 692 if (!lun->cmda[0]) { 693 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); 694 return; 695 } 696 lun->cmda[0] = 0; 697} 698 699/* 700 * The command queue. 701 */ 702static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 703{ 704 struct ub_scsi_cmd_queue *t = &sc->cmd_queue; 705 706 if (t->qlen++ == 0) { 707 t->head = cmd; 708 t->tail = cmd; 709 } else { 710 t->tail->next = cmd; 711 t->tail = cmd; 712 } 713 714 if (t->qlen > t->qmax) 715 t->qmax = t->qlen; 716} 717 718static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 719{ 720 struct ub_scsi_cmd_queue *t = &sc->cmd_queue; 721 722 if (t->qlen++ == 0) { 723 t->head = cmd; 724 t->tail = cmd; 725 } else { 726 cmd->next = t->head; 727 t->head = cmd; 728 } 729 730 if (t->qlen > t->qmax) 731 t->qmax = t->qlen; 732} 733 734static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) 735{ 736 struct ub_scsi_cmd_queue *t = &sc->cmd_queue; 737 struct ub_scsi_cmd *cmd; 738 739 if (t->qlen == 0) 740 return NULL; 741 if (--t->qlen == 0) 742 t->tail = NULL; 743 cmd = t->head; 744 t->head = cmd->next; 745 cmd->next = NULL; 746 return cmd; 747} 748 749#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head) 750 751/* 752 * The request function is our main entry point 753 */ 754 755static void ub_request_fn(request_queue_t *q) 756{ 757 struct ub_lun *lun = q->queuedata; 758 struct request *rq; 759 760 while ((rq = elv_next_request(q)) != NULL) { 761 if (ub_request_fn_1(lun, rq) != 0) { 762 blk_stop_queue(q); 763 break; 764 } 765 } 766} 767 768static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) 769{ 770 struct ub_dev *sc = lun->udev; 771 struct ub_scsi_cmd *cmd; 772 int rc; 773 774 if (atomic_read(&sc->poison) || lun->changed) { 775 blkdev_dequeue_request(rq); 776 ub_end_rq(rq, 0); 777 return 0; 778 } 779 780 if ((cmd = ub_get_cmd(lun)) == NULL) 781 return -1; 782 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 783 784 blkdev_dequeue_request(rq); 785 if (blk_pc_request(rq)) { 786 rc = ub_cmd_build_packet(sc, lun, cmd, rq); 787 } else { 788 rc = ub_cmd_build_block(sc, lun, cmd, rq); 789 } 790 if (rc != 0) { 791 ub_put_cmd(lun, cmd); 792 ub_end_rq(rq, 0); 793 return 0; 794 } 795 cmd->state = UB_CMDST_INIT; 796 cmd->lun = lun; 797 cmd->done = ub_rw_cmd_done; 798 cmd->back = rq; 799 800 cmd->tag = sc->tagcnt++; 801 if (ub_submit_scsi(sc, cmd) != 0) { 802 ub_put_cmd(lun, cmd); 803 ub_end_rq(rq, 0); 804 return 0; 805 } 806 807 return 0; 808} 809 810static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, 811 struct ub_scsi_cmd *cmd, struct request *rq) 812{ 813 int ub_dir; 814 int n_elem; 815 unsigned int block, nblks; 816 817 if (rq_data_dir(rq) == WRITE) 818 ub_dir = UB_DIR_WRITE; 819 else 820 ub_dir = UB_DIR_READ; 821 cmd->dir = ub_dir; 822 823 /* 824 * get scatterlist from block layer 825 */ 826 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); 827 if (n_elem <= 0) { 828 printk(KERN_INFO "%s: failed request map (%d)\n", 829 sc->name, n_elem); /* P3 */ 830 return -1; /* request with no s/g entries? */ 831 } 832 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ 833 printk(KERN_WARNING "%s: request with %d segments\n", 834 sc->name, n_elem); 835 return -1; 836 } 837 cmd->nsg = n_elem; 838 sc->sg_stat[n_elem]++; 839 840 /* 841 * build the command 842 * 843 * The call to blk_queue_hardsect_size() guarantees that request 844 * is aligned, but it is given in terms of 512 byte units, always. 845 */ 846 block = rq->sector >> lun->capacity.bshift; 847 nblks = rq->nr_sectors >> lun->capacity.bshift; 848 849 cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10; 850 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ 851 cmd->cdb[2] = block >> 24; 852 cmd->cdb[3] = block >> 16; 853 cmd->cdb[4] = block >> 8; 854 cmd->cdb[5] = block; 855 cmd->cdb[7] = nblks >> 8; 856 cmd->cdb[8] = nblks; 857 cmd->cdb_len = 10; 858 859 cmd->len = rq->nr_sectors * 512; 860 861 return 0; 862} 863 864static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 865 struct ub_scsi_cmd *cmd, struct request *rq) 866{ 867 int n_elem; 868 869 if (rq->data_len == 0) { 870 cmd->dir = UB_DIR_NONE; 871 } else { 872 if (rq_data_dir(rq) == WRITE) 873 cmd->dir = UB_DIR_WRITE; 874 else 875 cmd->dir = UB_DIR_READ; 876 877 } 878 879 /* 880 * get scatterlist from block layer 881 */ 882 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &cmd->sgv[0]); 883 if (n_elem < 0) { 884 printk(KERN_INFO "%s: failed request map (%d)\n", 885 sc->name, n_elem); /* P3 */ 886 return -1; 887 } 888 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ 889 printk(KERN_WARNING "%s: request with %d segments\n", 890 sc->name, n_elem); 891 return -1; 892 } 893 cmd->nsg = n_elem; 894 sc->sg_stat[n_elem]++; 895 896 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 897 cmd->cdb_len = rq->cmd_len; 898 899 cmd->len = rq->data_len; 900 901 return 0; 902} 903 904static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 905{ 906 struct request *rq = cmd->back; 907 struct ub_lun *lun = cmd->lun; 908 int uptodate; 909 910 if (cmd->error == 0) { 911 uptodate = 1; 912 913 if (blk_pc_request(rq)) { 914 if (cmd->act_len >= rq->data_len) 915 rq->data_len = 0; 916 else 917 rq->data_len -= cmd->act_len; 918 } 919 } else { 920 uptodate = 0; 921 922 if (blk_pc_request(rq)) { 923 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ 924 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); 925 rq->sense_len = UB_SENSE_SIZE; 926 if (sc->top_sense[0] != 0) 927 rq->errors = SAM_STAT_CHECK_CONDITION; 928 else 929 rq->errors = DID_ERROR << 16; 930 } 931 } 932 933 ub_put_cmd(lun, cmd); 934 ub_end_rq(rq, uptodate); 935 blk_start_queue(lun->disk->queue); 936} 937 938static void ub_end_rq(struct request *rq, int uptodate) 939{ 940 int rc; 941 942 rc = end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 943 // assert(rc == 0); 944 end_that_request_last(rq); 945} 946 947/* 948 * Submit a regular SCSI operation (not an auto-sense). 949 * 950 * The Iron Law of Good Submit Routine is: 951 * Zero return - callback is done, Nonzero return - callback is not done. 952 * No exceptions. 953 * 954 * Host is assumed locked. 955 * 956 * XXX We only support Bulk for the moment. 957 */ 958static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 959{ 960 961 if (cmd->state != UB_CMDST_INIT || 962 (cmd->dir != UB_DIR_NONE && cmd->len == 0)) { 963 return -EINVAL; 964 } 965 966 ub_cmdq_add(sc, cmd); 967 /* 968 * We can call ub_scsi_dispatch(sc) right away here, but it's a little 969 * safer to jump to a tasklet, in case upper layers do something silly. 970 */ 971 tasklet_schedule(&sc->tasklet); 972 return 0; 973} 974 975/* 976 * Submit the first URB for the queued command. 977 * This function does not deal with queueing in any way. 978 */ 979static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 980{ 981 struct bulk_cb_wrap *bcb; 982 int rc; 983 984 bcb = &sc->work_bcb; 985 986 /* 987 * ``If the allocation length is eighteen or greater, and a device 988 * server returns less than eithteen bytes of data, the application 989 * client should assume that the bytes not transferred would have been 990 * zeroes had the device server returned those bytes.'' 991 * 992 * We zero sense for all commands so that when a packet request 993 * fails it does not return a stale sense. 994 */ 995 memset(&sc->top_sense, 0, UB_SENSE_SIZE); 996 997 /* set up the command wrapper */ 998 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 999 bcb->Tag = cmd->tag; /* Endianness is not important */ 1000 bcb->DataTransferLength = cpu_to_le32(cmd->len); 1001 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; 1002 bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; 1003 bcb->Length = cmd->cdb_len; 1004 1005 /* copy the command payload */ 1006 memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE); 1007 1008 UB_INIT_COMPLETION(sc->work_done); 1009 1010 sc->last_pipe = sc->send_bulk_pipe; 1011 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, 1012 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); 1013 sc->work_urb.transfer_flags = 0; 1014 1015 /* Fill what we shouldn't be filling, because usb-storage did so. */ 1016 sc->work_urb.actual_length = 0; 1017 sc->work_urb.error_count = 0; 1018 sc->work_urb.status = 0; 1019 1020 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1021 /* XXX Clear stalls */ 1022 printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */ 1023 ub_complete(&sc->work_done); 1024 return rc; 1025 } 1026 1027 sc->work_timer.expires = jiffies + UB_URB_TIMEOUT; 1028 add_timer(&sc->work_timer); 1029 1030 cmd->state = UB_CMDST_CMD; 1031 ub_cmdtr_state(sc, cmd); 1032 return 0; 1033} 1034 1035/* 1036 * Timeout handler. 1037 */ 1038static void ub_urb_timeout(unsigned long arg) 1039{ 1040 struct ub_dev *sc = (struct ub_dev *) arg; 1041 unsigned long flags; 1042 1043 spin_lock_irqsave(&sc->lock, flags); 1044 usb_unlink_urb(&sc->work_urb); 1045 spin_unlock_irqrestore(&sc->lock, flags); 1046} 1047 1048/* 1049 * Completion routine for the work URB. 1050 * 1051 * This can be called directly from usb_submit_urb (while we have 1052 * the sc->lock taken) and from an interrupt (while we do NOT have 1053 * the sc->lock taken). Therefore, bounce this off to a tasklet. 1054 */ 1055static void ub_urb_complete(struct urb *urb, struct pt_regs *pt) 1056{ 1057 struct ub_dev *sc = urb->context; 1058 1059 ub_complete(&sc->work_done); 1060 tasklet_schedule(&sc->tasklet); 1061} 1062 1063static void ub_scsi_action(unsigned long _dev) 1064{ 1065 struct ub_dev *sc = (struct ub_dev *) _dev; 1066 unsigned long flags; 1067 1068 spin_lock_irqsave(&sc->lock, flags); 1069 del_timer(&sc->work_timer); 1070 ub_scsi_dispatch(sc); 1071 spin_unlock_irqrestore(&sc->lock, flags); 1072} 1073 1074static void ub_scsi_dispatch(struct ub_dev *sc) 1075{ 1076 struct ub_scsi_cmd *cmd; 1077 int rc; 1078 1079 while ((cmd = ub_cmdq_peek(sc)) != NULL) { 1080 if (cmd->state == UB_CMDST_DONE) { 1081 ub_cmdq_pop(sc); 1082 (*cmd->done)(sc, cmd); 1083 } else if (cmd->state == UB_CMDST_INIT) { 1084 ub_cmdtr_new(sc, cmd); 1085 if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0) 1086 break; 1087 cmd->error = rc; 1088 cmd->state = UB_CMDST_DONE; 1089 ub_cmdtr_state(sc, cmd); 1090 } else { 1091 if (!ub_is_completed(&sc->work_done)) 1092 break; 1093 ub_scsi_urb_compl(sc, cmd); 1094 } 1095 } 1096} 1097 1098static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1099{ 1100 struct urb *urb = &sc->work_urb; 1101 struct bulk_cs_wrap *bcs; 1102 int rc; 1103 1104 if (atomic_read(&sc->poison)) { 1105 /* A little too simplistic, I feel... */ 1106 goto Bad_End; 1107 } 1108 1109 if (cmd->state == UB_CMDST_CLEAR) { 1110 if (urb->status == -EPIPE) { 1111 /* 1112 * STALL while clearning STALL. 1113 * The control pipe clears itself - nothing to do. 1114 * XXX Might try to reset the device here and retry. 1115 */ 1116 printk(KERN_NOTICE "%s: stall on control pipe\n", 1117 sc->name); 1118 goto Bad_End; 1119 } 1120 1121 /* 1122 * We ignore the result for the halt clear. 1123 */ 1124 1125 /* reset the endpoint toggle */ 1126 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), 1127 usb_pipeout(sc->last_pipe), 0); 1128 1129 ub_state_sense(sc, cmd); 1130 1131 } else if (cmd->state == UB_CMDST_CLR2STS) { 1132 if (urb->status == -EPIPE) { 1133 /* 1134 * STALL while clearning STALL. 1135 * The control pipe clears itself - nothing to do. 1136 * XXX Might try to reset the device here and retry. 1137 */ 1138 printk(KERN_NOTICE "%s: stall on control pipe\n", 1139 sc->name); 1140 goto Bad_End; 1141 } 1142 1143 /* 1144 * We ignore the result for the halt clear. 1145 */ 1146 1147 /* reset the endpoint toggle */ 1148 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), 1149 usb_pipeout(sc->last_pipe), 0); 1150 1151 ub_state_stat(sc, cmd); 1152 1153 } else if (cmd->state == UB_CMDST_CLRRS) { 1154 if (urb->status == -EPIPE) { 1155 /* 1156 * STALL while clearning STALL. 1157 * The control pipe clears itself - nothing to do. 1158 * XXX Might try to reset the device here and retry. 1159 */ 1160 printk(KERN_NOTICE "%s: stall on control pipe\n", 1161 sc->name); 1162 goto Bad_End; 1163 } 1164 1165 /* 1166 * We ignore the result for the halt clear. 1167 */ 1168 1169 /* reset the endpoint toggle */ 1170 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), 1171 usb_pipeout(sc->last_pipe), 0); 1172 1173 ub_state_stat_counted(sc, cmd); 1174 1175 } else if (cmd->state == UB_CMDST_CMD) { 1176 if (urb->status == -EPIPE) { 1177 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1178 if (rc != 0) { 1179 printk(KERN_NOTICE "%s: " 1180 "unable to submit clear (%d)\n", 1181 sc->name, rc); 1182 /* 1183 * This is typically ENOMEM or some other such shit. 1184 * Retrying is pointless. Just do Bad End on it... 1185 */ 1186 goto Bad_End; 1187 } 1188 cmd->state = UB_CMDST_CLEAR; 1189 ub_cmdtr_state(sc, cmd); 1190 return; 1191 } 1192 if (urb->status != 0) { 1193 printk("ub: cmd #%d cmd status (%d)\n", cmd->tag, urb->status); /* P3 */ 1194 goto Bad_End; 1195 } 1196 if (urb->actual_length != US_BULK_CB_WRAP_LEN) { 1197 printk("ub: cmd #%d xferred %d\n", cmd->tag, urb->actual_length); /* P3 */ 1198 /* XXX Must do reset here to unconfuse the device */ 1199 goto Bad_End; 1200 } 1201 1202 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { 1203 ub_state_stat(sc, cmd); 1204 return; 1205 } 1206 1207 // udelay(125); // usb-storage has this 1208 ub_data_start(sc, cmd); 1209 1210 } else if (cmd->state == UB_CMDST_DATA) { 1211 if (urb->status == -EPIPE) { 1212 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1213 if (rc != 0) { 1214 printk(KERN_NOTICE "%s: " 1215 "unable to submit clear (%d)\n", 1216 sc->name, rc); 1217 /* 1218 * This is typically ENOMEM or some other such shit. 1219 * Retrying is pointless. Just do Bad End on it... 1220 */ 1221 goto Bad_End; 1222 } 1223 cmd->state = UB_CMDST_CLR2STS; 1224 ub_cmdtr_state(sc, cmd); 1225 return; 1226 } 1227 if (urb->status == -EOVERFLOW) { 1228 /* 1229 * A babble? Failure, but we must transfer CSW now. 1230 * XXX This is going to end in perpetual babble. Reset. 1231 */ 1232 cmd->error = -EOVERFLOW; /* A cheap trick... */ 1233 ub_state_stat(sc, cmd); 1234 return; 1235 } 1236 if (urb->status != 0) 1237 goto Bad_End; 1238 1239 cmd->act_len += urb->actual_length; 1240 ub_cmdtr_act_len(sc, cmd); 1241 1242 if (++cmd->current_sg < cmd->nsg) { 1243 ub_data_start(sc, cmd); 1244 return; 1245 } 1246 ub_state_stat(sc, cmd); 1247 1248 } else if (cmd->state == UB_CMDST_STAT) { 1249 if (urb->status == -EPIPE) { 1250 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); 1251 if (rc != 0) { 1252 printk(KERN_NOTICE "%s: " 1253 "unable to submit clear (%d)\n", 1254 sc->name, rc); 1255 /* 1256 * This is typically ENOMEM or some other such shit. 1257 * Retrying is pointless. Just do Bad End on it... 1258 */ 1259 goto Bad_End; 1260 } 1261 1262 /* 1263 * Having a stall when getting CSW is an error, so 1264 * make sure uppper levels are not oblivious to it. 1265 */ 1266 cmd->error = -EIO; /* A cheap trick... */ 1267 1268 cmd->state = UB_CMDST_CLRRS; 1269 ub_cmdtr_state(sc, cmd); 1270 return; 1271 } 1272 if (urb->status == -EOVERFLOW) { 1273 /* 1274 * XXX We are screwed here. Retrying is pointless, 1275 * because the pipelined data will not get in until 1276 * we read with a big enough buffer. We must reset XXX. 1277 */ 1278 goto Bad_End; 1279 } 1280 if (urb->status != 0) 1281 goto Bad_End; 1282 1283 if (urb->actual_length == 0) { 1284 ub_state_stat_counted(sc, cmd); 1285 return; 1286 } 1287 1288 /* 1289 * Check the returned Bulk protocol status. 1290 * The status block has to be validated first. 1291 */ 1292 1293 bcs = &sc->work_bcs; 1294 1295 if (sc->signature == cpu_to_le32(0)) { 1296 /* 1297 * This is the first reply, so do not perform the check. 1298 * Instead, remember the signature the device uses 1299 * for future checks. But do not allow a nul. 1300 */ 1301 sc->signature = bcs->Signature; 1302 if (sc->signature == cpu_to_le32(0)) { 1303 ub_state_stat_counted(sc, cmd); 1304 return; 1305 } 1306 } else { 1307 if (bcs->Signature != sc->signature) { 1308 ub_state_stat_counted(sc, cmd); 1309 return; 1310 } 1311 } 1312 1313 if (bcs->Tag != cmd->tag) { 1314 /* 1315 * This usually happens when we disagree with the 1316 * device's microcode about something. For instance, 1317 * a few of them throw this after timeouts. They buffer 1318 * commands and reply at commands we timed out before. 1319 * Without flushing these replies we loop forever. 1320 */ 1321 ub_state_stat_counted(sc, cmd); 1322 return; 1323 } 1324 1325 rc = le32_to_cpu(bcs->Residue); 1326 if (rc != cmd->len - cmd->act_len) { 1327 /* 1328 * It is all right to transfer less, the caller has 1329 * to check. But it's not all right if the device 1330 * counts disagree with our counts. 1331 */ 1332 /* P3 */ printk("%s: resid %d len %d act %d\n", 1333 sc->name, rc, cmd->len, cmd->act_len); 1334 goto Bad_End; 1335 } 1336 1337 switch (bcs->Status) { 1338 case US_BULK_STAT_OK: 1339 break; 1340 case US_BULK_STAT_FAIL: 1341 ub_state_sense(sc, cmd); 1342 return; 1343 case US_BULK_STAT_PHASE: 1344 /* XXX We must reset the transport here */ 1345 /* P3 */ printk("%s: status PHASE\n", sc->name); 1346 goto Bad_End; 1347 default: 1348 printk(KERN_INFO "%s: unknown CSW status 0x%x\n", 1349 sc->name, bcs->Status); 1350 goto Bad_End; 1351 } 1352 1353 /* Not zeroing error to preserve a babble indicator */ 1354 if (cmd->error != 0) { 1355 ub_state_sense(sc, cmd); 1356 return; 1357 } 1358 cmd->state = UB_CMDST_DONE; 1359 ub_cmdtr_state(sc, cmd); 1360 ub_cmdq_pop(sc); 1361 (*cmd->done)(sc, cmd); 1362 1363 } else if (cmd->state == UB_CMDST_SENSE) { 1364 ub_state_done(sc, cmd, -EIO); 1365 1366 } else { 1367 printk(KERN_WARNING "%s: " 1368 "wrong command state %d\n", 1369 sc->name, cmd->state); 1370 goto Bad_End; 1371 } 1372 return; 1373 1374Bad_End: /* Little Excel is dead */ 1375 ub_state_done(sc, cmd, -EIO); 1376} 1377 1378/* 1379 * Factorization helper for the command state machine: 1380 * Initiate a data segment transfer. 1381 */ 1382static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1383{ 1384 struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; 1385 int pipe; 1386 int rc; 1387 1388 UB_INIT_COMPLETION(sc->work_done); 1389 1390 if (cmd->dir == UB_DIR_READ) 1391 pipe = sc->recv_bulk_pipe; 1392 else 1393 pipe = sc->send_bulk_pipe; 1394 sc->last_pipe = pipe; 1395 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, 1396 page_address(sg->page) + sg->offset, sg->length, 1397 ub_urb_complete, sc); 1398 sc->work_urb.transfer_flags = 0; 1399 sc->work_urb.actual_length = 0; 1400 sc->work_urb.error_count = 0; 1401 sc->work_urb.status = 0; 1402 1403 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1404 /* XXX Clear stalls */ 1405 printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */ 1406 ub_complete(&sc->work_done); 1407 ub_state_done(sc, cmd, rc); 1408 return; 1409 } 1410 1411 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; 1412 add_timer(&sc->work_timer); 1413 1414 cmd->state = UB_CMDST_DATA; 1415 ub_cmdtr_state(sc, cmd); 1416} 1417 1418/* 1419 * Factorization helper for the command state machine: 1420 * Finish the command. 1421 */ 1422static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) 1423{ 1424 1425 cmd->error = rc; 1426 cmd->state = UB_CMDST_DONE; 1427 ub_cmdtr_state(sc, cmd); 1428 ub_cmdq_pop(sc); 1429 (*cmd->done)(sc, cmd); 1430} 1431 1432/* 1433 * Factorization helper for the command state machine: 1434 * Submit a CSW read. 1435 */ 1436static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1437{ 1438 int rc; 1439 1440 UB_INIT_COMPLETION(sc->work_done); 1441 1442 sc->last_pipe = sc->recv_bulk_pipe; 1443 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, 1444 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); 1445 sc->work_urb.transfer_flags = 0; 1446 sc->work_urb.actual_length = 0; 1447 sc->work_urb.error_count = 0; 1448 sc->work_urb.status = 0; 1449 1450 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1451 /* XXX Clear stalls */ 1452 ub_complete(&sc->work_done); 1453 ub_state_done(sc, cmd, rc); 1454 return -1; 1455 } 1456 1457 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; 1458 add_timer(&sc->work_timer); 1459 return 0; 1460} 1461 1462/* 1463 * Factorization helper for the command state machine: 1464 * Submit a CSW read and go to STAT state. 1465 */ 1466static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1467{ 1468 1469 if (__ub_state_stat(sc, cmd) != 0) 1470 return; 1471 1472 cmd->stat_count = 0; 1473 cmd->state = UB_CMDST_STAT; 1474 ub_cmdtr_state(sc, cmd); 1475} 1476 1477/* 1478 * Factorization helper for the command state machine: 1479 * Submit a CSW read and go to STAT state with counter (along [C] path). 1480 */ 1481static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1482{ 1483 1484 if (++cmd->stat_count >= 4) { 1485 ub_state_sense(sc, cmd); 1486 return; 1487 } 1488 1489 if (__ub_state_stat(sc, cmd) != 0) 1490 return; 1491 1492 cmd->state = UB_CMDST_STAT; 1493 ub_cmdtr_state(sc, cmd); 1494} 1495 1496/* 1497 * Factorization helper for the command state machine: 1498 * Submit a REQUEST SENSE and go to SENSE state. 1499 */ 1500static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1501{ 1502 struct ub_scsi_cmd *scmd; 1503 struct scatterlist *sg; 1504 int rc; 1505 1506 if (cmd->cdb[0] == REQUEST_SENSE) { 1507 rc = -EPIPE; 1508 goto error; 1509 } 1510 1511 scmd = &sc->top_rqs_cmd; 1512 memset(scmd, 0, sizeof(struct ub_scsi_cmd)); 1513 scmd->cdb[0] = REQUEST_SENSE; 1514 scmd->cdb[4] = UB_SENSE_SIZE; 1515 scmd->cdb_len = 6; 1516 scmd->dir = UB_DIR_READ; 1517 scmd->state = UB_CMDST_INIT; 1518 scmd->nsg = 1; 1519 sg = &scmd->sgv[0]; 1520 sg->page = virt_to_page(sc->top_sense); 1521 sg->offset = (unsigned int)sc->top_sense & (PAGE_SIZE-1); 1522 sg->length = UB_SENSE_SIZE; 1523 scmd->len = UB_SENSE_SIZE; 1524 scmd->lun = cmd->lun; 1525 scmd->done = ub_top_sense_done; 1526 scmd->back = cmd; 1527 1528 scmd->tag = sc->tagcnt++; 1529 1530 cmd->state = UB_CMDST_SENSE; 1531 ub_cmdtr_state(sc, cmd); 1532 1533 ub_cmdq_insert(sc, scmd); 1534 return; 1535 1536error: 1537 ub_state_done(sc, cmd, rc); 1538} 1539 1540/* 1541 * A helper for the command's state machine: 1542 * Submit a stall clear. 1543 */ 1544static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, 1545 int stalled_pipe) 1546{ 1547 int endp; 1548 struct usb_ctrlrequest *cr; 1549 int rc; 1550 1551 endp = usb_pipeendpoint(stalled_pipe); 1552 if (usb_pipein (stalled_pipe)) 1553 endp |= USB_DIR_IN; 1554 1555 cr = &sc->work_cr; 1556 cr->bRequestType = USB_RECIP_ENDPOINT; 1557 cr->bRequest = USB_REQ_CLEAR_FEATURE; 1558 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); 1559 cr->wIndex = cpu_to_le16(endp); 1560 cr->wLength = cpu_to_le16(0); 1561 1562 UB_INIT_COMPLETION(sc->work_done); 1563 1564 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 1565 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); 1566 sc->work_urb.transfer_flags = 0; 1567 sc->work_urb.actual_length = 0; 1568 sc->work_urb.error_count = 0; 1569 sc->work_urb.status = 0; 1570 1571 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1572 ub_complete(&sc->work_done); 1573 return rc; 1574 } 1575 1576 sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT; 1577 add_timer(&sc->work_timer); 1578 return 0; 1579} 1580 1581/* 1582 */ 1583static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) 1584{ 1585 unsigned char *sense = sc->top_sense; 1586 struct ub_scsi_cmd *cmd; 1587 1588 /* 1589 * Ignoring scmd->act_len, because the buffer was pre-zeroed. 1590 */ 1591 ub_cmdtr_sense(sc, scmd, sense); 1592 1593 /* 1594 * Find the command which triggered the unit attention or a check, 1595 * save the sense into it, and advance its state machine. 1596 */ 1597 if ((cmd = ub_cmdq_peek(sc)) == NULL) { 1598 printk(KERN_WARNING "%s: sense done while idle\n", sc->name); 1599 return; 1600 } 1601 if (cmd != scmd->back) { 1602 printk(KERN_WARNING "%s: " 1603 "sense done for wrong command 0x%x\n", 1604 sc->name, cmd->tag); 1605 return; 1606 } 1607 if (cmd->state != UB_CMDST_SENSE) { 1608 printk(KERN_WARNING "%s: " 1609 "sense done with bad cmd state %d\n", 1610 sc->name, cmd->state); 1611 return; 1612 } 1613 1614 cmd->key = sense[2] & 0x0F; 1615 cmd->asc = sense[12]; 1616 cmd->ascq = sense[13]; 1617 1618 ub_scsi_urb_compl(sc, cmd); 1619} 1620 1621/* 1622 * This is called from a process context. 1623 */ 1624static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) 1625{ 1626 1627 lun->readonly = 0; /* XXX Query this from the device */ 1628 1629 lun->capacity.nsec = 0; 1630 lun->capacity.bsize = 512; 1631 lun->capacity.bshift = 0; 1632 1633 if (ub_sync_tur(sc, lun) != 0) 1634 return; /* Not ready */ 1635 lun->changed = 0; 1636 1637 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { 1638 /* 1639 * The retry here means something is wrong, either with the 1640 * device, with the transport, or with our code. 1641 * We keep this because sd.c has retries for capacity. 1642 */ 1643 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { 1644 lun->capacity.nsec = 0; 1645 lun->capacity.bsize = 512; 1646 lun->capacity.bshift = 0; 1647 } 1648 } 1649} 1650 1651/* 1652 * The open funcion. 1653 * This is mostly needed to keep refcounting, but also to support 1654 * media checks on removable media drives. 1655 */ 1656static int ub_bd_open(struct inode *inode, struct file *filp) 1657{ 1658 struct gendisk *disk = inode->i_bdev->bd_disk; 1659 struct ub_lun *lun; 1660 struct ub_dev *sc; 1661 unsigned long flags; 1662 int rc; 1663 1664 if ((lun = disk->private_data) == NULL) 1665 return -ENXIO; 1666 sc = lun->udev; 1667 1668 spin_lock_irqsave(&ub_lock, flags); 1669 if (atomic_read(&sc->poison)) { 1670 spin_unlock_irqrestore(&ub_lock, flags); 1671 return -ENXIO; 1672 } 1673 sc->openc++; 1674 spin_unlock_irqrestore(&ub_lock, flags); 1675 1676 /* 1677 * This is a workaround for a specific problem in our block layer. 1678 * In 2.6.9, register_disk duplicates the code from rescan_partitions. 1679 * However, if we do add_disk with a device which persistently reports 1680 * a changed media, add_disk calls register_disk, which does do_open, 1681 * which will call rescan_paritions for changed media. After that, 1682 * register_disk attempts to do it all again and causes double kobject 1683 * registration and a eventually an oops on module removal. 1684 * 1685 * The bottom line is, Al Viro says that we should not allow 1686 * bdev->bd_invalidated to be set when doing add_disk no matter what. 1687 */ 1688 if (lun->first_open) { 1689 lun->first_open = 0; 1690 if (lun->changed) { 1691 rc = -ENOMEDIUM; 1692 goto err_open; 1693 } 1694 } 1695 1696 if (lun->removable || lun->readonly) 1697 check_disk_change(inode->i_bdev); 1698 1699 /* 1700 * The sd.c considers ->media_present and ->changed not equivalent, 1701 * under some pretty murky conditions (a failure of READ CAPACITY). 1702 * We may need it one day. 1703 */ 1704 if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { 1705 rc = -ENOMEDIUM; 1706 goto err_open; 1707 } 1708 1709 if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { 1710 rc = -EROFS; 1711 goto err_open; 1712 } 1713 1714 return 0; 1715 1716err_open: 1717 ub_put(sc); 1718 return rc; 1719} 1720 1721/* 1722 */ 1723static int ub_bd_release(struct inode *inode, struct file *filp) 1724{ 1725 struct gendisk *disk = inode->i_bdev->bd_disk; 1726 struct ub_lun *lun = disk->private_data; 1727 struct ub_dev *sc = lun->udev; 1728 1729 ub_put(sc); 1730 return 0; 1731} 1732 1733/* 1734 * The ioctl interface. 1735 */ 1736static int ub_bd_ioctl(struct inode *inode, struct file *filp, 1737 unsigned int cmd, unsigned long arg) 1738{ 1739 struct gendisk *disk = inode->i_bdev->bd_disk; 1740 void __user *usermem = (void __user *) arg; 1741 1742 return scsi_cmd_ioctl(filp, disk, cmd, usermem); 1743} 1744 1745/* 1746 * This is called once a new disk was seen by the block layer or by ub_probe(). 1747 * The main onjective here is to discover the features of the media such as 1748 * the capacity, read-only status, etc. USB storage generally does not 1749 * need to be spun up, but if we needed it, this would be the place. 1750 * 1751 * This call can sleep. 1752 * 1753 * The return code is not used. 1754 */ 1755static int ub_bd_revalidate(struct gendisk *disk) 1756{ 1757 struct ub_lun *lun = disk->private_data; 1758 1759 ub_revalidate(lun->udev, lun); 1760 1761 /* XXX Support sector size switching like in sr.c */ 1762 blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); 1763 set_capacity(disk, lun->capacity.nsec); 1764 // set_disk_ro(sdkp->disk, lun->readonly); 1765 1766 return 0; 1767} 1768 1769/* 1770 * The check is called by the block layer to verify if the media 1771 * is still available. It is supposed to be harmless, lightweight and 1772 * non-intrusive in case the media was not changed. 1773 * 1774 * This call can sleep. 1775 * 1776 * The return code is bool! 1777 */ 1778static int ub_bd_media_changed(struct gendisk *disk) 1779{ 1780 struct ub_lun *lun = disk->private_data; 1781 1782 if (!lun->removable) 1783 return 0; 1784 1785 /* 1786 * We clean checks always after every command, so this is not 1787 * as dangerous as it looks. If the TEST_UNIT_READY fails here, 1788 * the device is actually not ready with operator or software 1789 * intervention required. One dangerous item might be a drive which 1790 * spins itself down, and come the time to write dirty pages, this 1791 * will fail, then block layer discards the data. Since we never 1792 * spin drives up, such devices simply cannot be used with ub anyway. 1793 */ 1794 if (ub_sync_tur(lun->udev, lun) != 0) { 1795 lun->changed = 1; 1796 return 1; 1797 } 1798 1799 return lun->changed; 1800} 1801 1802static struct block_device_operations ub_bd_fops = { 1803 .owner = THIS_MODULE, 1804 .open = ub_bd_open, 1805 .release = ub_bd_release, 1806 .ioctl = ub_bd_ioctl, 1807 .media_changed = ub_bd_media_changed, 1808 .revalidate_disk = ub_bd_revalidate, 1809}; 1810 1811/* 1812 * Common ->done routine for commands executed synchronously. 1813 */ 1814static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 1815{ 1816 struct completion *cop = cmd->back; 1817 complete(cop); 1818} 1819 1820/* 1821 * Test if the device has a check condition on it, synchronously. 1822 */ 1823static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) 1824{ 1825 struct ub_scsi_cmd *cmd; 1826 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; 1827 unsigned long flags; 1828 struct completion compl; 1829 int rc; 1830 1831 init_completion(&compl); 1832 1833 rc = -ENOMEM; 1834 if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 1835 goto err_alloc; 1836 memset(cmd, 0, ALLOC_SIZE); 1837 1838 cmd->cdb[0] = TEST_UNIT_READY; 1839 cmd->cdb_len = 6; 1840 cmd->dir = UB_DIR_NONE; 1841 cmd->state = UB_CMDST_INIT; 1842 cmd->lun = lun; /* This may be NULL, but that's ok */ 1843 cmd->done = ub_probe_done; 1844 cmd->back = &compl; 1845 1846 spin_lock_irqsave(&sc->lock, flags); 1847 cmd->tag = sc->tagcnt++; 1848 1849 rc = ub_submit_scsi(sc, cmd); 1850 spin_unlock_irqrestore(&sc->lock, flags); 1851 1852 if (rc != 0) { 1853 printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */ 1854 goto err_submit; 1855 } 1856 1857 wait_for_completion(&compl); 1858 1859 rc = cmd->error; 1860 1861 if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */ 1862 rc = cmd->key; 1863 1864err_submit: 1865 kfree(cmd); 1866err_alloc: 1867 return rc; 1868} 1869 1870/* 1871 * Read the SCSI capacity synchronously (for probing). 1872 */ 1873static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 1874 struct ub_capacity *ret) 1875{ 1876 struct ub_scsi_cmd *cmd; 1877 struct scatterlist *sg; 1878 char *p; 1879 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; 1880 unsigned long flags; 1881 unsigned int bsize, shift; 1882 unsigned long nsec; 1883 struct completion compl; 1884 int rc; 1885 1886 init_completion(&compl); 1887 1888 rc = -ENOMEM; 1889 if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 1890 goto err_alloc; 1891 memset(cmd, 0, ALLOC_SIZE); 1892 p = (char *)cmd + sizeof(struct ub_scsi_cmd); 1893 1894 cmd->cdb[0] = 0x25; 1895 cmd->cdb_len = 10; 1896 cmd->dir = UB_DIR_READ; 1897 cmd->state = UB_CMDST_INIT; 1898 cmd->nsg = 1; 1899 sg = &cmd->sgv[0]; 1900 sg->page = virt_to_page(p); 1901 sg->offset = (unsigned int)p & (PAGE_SIZE-1); 1902 sg->length = 8; 1903 cmd->len = 8; 1904 cmd->lun = lun; 1905 cmd->done = ub_probe_done; 1906 cmd->back = &compl; 1907 1908 spin_lock_irqsave(&sc->lock, flags); 1909 cmd->tag = sc->tagcnt++; 1910 1911 rc = ub_submit_scsi(sc, cmd); 1912 spin_unlock_irqrestore(&sc->lock, flags); 1913 1914 if (rc != 0) { 1915 printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */ 1916 goto err_submit; 1917 } 1918 1919 wait_for_completion(&compl); 1920 1921 if (cmd->error != 0) { 1922 printk("ub: reading capacity: error %d\n", cmd->error); /* P3 */ 1923 rc = -EIO; 1924 goto err_read; 1925 } 1926 if (cmd->act_len != 8) { 1927 printk("ub: reading capacity: size %d\n", cmd->act_len); /* P3 */ 1928 rc = -EIO; 1929 goto err_read; 1930 } 1931 1932 /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */ 1933 nsec = be32_to_cpu(*(__be32 *)p) + 1; 1934 bsize = be32_to_cpu(*(__be32 *)(p + 4)); 1935 switch (bsize) { 1936 case 512: shift = 0; break; 1937 case 1024: shift = 1; break; 1938 case 2048: shift = 2; break; 1939 case 4096: shift = 3; break; 1940 default: 1941 printk("ub: Bad sector size %u\n", bsize); /* P3 */ 1942 rc = -EDOM; 1943 goto err_inv_bsize; 1944 } 1945 1946 ret->bsize = bsize; 1947 ret->bshift = shift; 1948 ret->nsec = nsec << shift; 1949 rc = 0; 1950 1951err_inv_bsize: 1952err_read: 1953err_submit: 1954 kfree(cmd); 1955err_alloc: 1956 return rc; 1957} 1958 1959/* 1960 */ 1961static void ub_probe_urb_complete(struct urb *urb, struct pt_regs *pt) 1962{ 1963 struct completion *cop = urb->context; 1964 complete(cop); 1965} 1966 1967static void ub_probe_timeout(unsigned long arg) 1968{ 1969 struct completion *cop = (struct completion *) arg; 1970 complete(cop); 1971} 1972 1973/* 1974 * Get number of LUNs by the way of Bulk GetMaxLUN command. 1975 */ 1976static int ub_sync_getmaxlun(struct ub_dev *sc) 1977{ 1978 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; 1979 unsigned char *p; 1980 enum { ALLOC_SIZE = 1 }; 1981 struct usb_ctrlrequest *cr; 1982 struct completion compl; 1983 struct timer_list timer; 1984 int nluns; 1985 int rc; 1986 1987 init_completion(&compl); 1988 1989 rc = -ENOMEM; 1990 if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) 1991 goto err_alloc; 1992 *p = 55; 1993 1994 cr = &sc->work_cr; 1995 cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; 1996 cr->bRequest = US_BULK_GET_MAX_LUN; 1997 cr->wValue = cpu_to_le16(0); 1998 cr->wIndex = cpu_to_le16(ifnum); 1999 cr->wLength = cpu_to_le16(1); 2000 2001 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, 2002 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); 2003 sc->work_urb.transfer_flags = 0; 2004 sc->work_urb.actual_length = 0; 2005 sc->work_urb.error_count = 0; 2006 sc->work_urb.status = 0; 2007 2008 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 2009 if (rc == -EPIPE) { 2010 printk("%s: Stall at GetMaxLUN, using 1 LUN\n", 2011 sc->name); /* P3 */ 2012 } else { 2013 printk(KERN_WARNING 2014 "%s: Unable to submit GetMaxLUN (%d)\n", 2015 sc->name, rc); 2016 } 2017 goto err_submit; 2018 } 2019 2020 init_timer(&timer); 2021 timer.function = ub_probe_timeout; 2022 timer.data = (unsigned long) &compl; 2023 timer.expires = jiffies + UB_CTRL_TIMEOUT; 2024 add_timer(&timer); 2025 2026 wait_for_completion(&compl); 2027 2028 del_timer_sync(&timer); 2029 usb_kill_urb(&sc->work_urb); 2030 2031 if (sc->work_urb.actual_length != 1) { 2032 printk("%s: GetMaxLUN returned %d bytes\n", sc->name, 2033 sc->work_urb.actual_length); /* P3 */ 2034 nluns = 0; 2035 } else { 2036 if ((nluns = *p) == 55) { 2037 nluns = 0; 2038 } else { 2039 /* GetMaxLUN returns the maximum LUN number */ 2040 nluns += 1; 2041 if (nluns > UB_MAX_LUNS) 2042 nluns = UB_MAX_LUNS; 2043 } 2044 printk("%s: GetMaxLUN returned %d, using %d LUNs\n", sc->name, 2045 *p, nluns); /* P3 */ 2046 } 2047 2048 kfree(p); 2049 return nluns; 2050 2051err_submit: 2052 kfree(p); 2053err_alloc: 2054 return rc; 2055} 2056 2057/* 2058 * Clear initial stalls. 2059 */ 2060static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) 2061{ 2062 int endp; 2063 struct usb_ctrlrequest *cr; 2064 struct completion compl; 2065 struct timer_list timer; 2066 int rc; 2067 2068 init_completion(&compl); 2069 2070 endp = usb_pipeendpoint(stalled_pipe); 2071 if (usb_pipein (stalled_pipe)) 2072 endp |= USB_DIR_IN; 2073 2074 cr = &sc->work_cr; 2075 cr->bRequestType = USB_RECIP_ENDPOINT; 2076 cr->bRequest = USB_REQ_CLEAR_FEATURE; 2077 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); 2078 cr->wIndex = cpu_to_le16(endp); 2079 cr->wLength = cpu_to_le16(0); 2080 2081 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 2082 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); 2083 sc->work_urb.transfer_flags = 0; 2084 sc->work_urb.actual_length = 0; 2085 sc->work_urb.error_count = 0; 2086 sc->work_urb.status = 0; 2087 2088 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 2089 printk(KERN_WARNING 2090 "%s: Unable to submit a probe clear (%d)\n", sc->name, rc); 2091 return rc; 2092 } 2093 2094 init_timer(&timer); 2095 timer.function = ub_probe_timeout; 2096 timer.data = (unsigned long) &compl; 2097 timer.expires = jiffies + UB_CTRL_TIMEOUT; 2098 add_timer(&timer); 2099 2100 wait_for_completion(&compl); 2101 2102 del_timer_sync(&timer); 2103 usb_kill_urb(&sc->work_urb); 2104 2105 /* reset the endpoint toggle */ 2106 usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0); 2107 2108 return 0; 2109} 2110 2111/* 2112 * Get the pipe settings. 2113 */ 2114static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, 2115 struct usb_interface *intf) 2116{ 2117 struct usb_host_interface *altsetting = intf->cur_altsetting; 2118 struct usb_endpoint_descriptor *ep_in = NULL; 2119 struct usb_endpoint_descriptor *ep_out = NULL; 2120 struct usb_endpoint_descriptor *ep; 2121 int i; 2122 2123 /* 2124 * Find the endpoints we need. 2125 * We are expecting a minimum of 2 endpoints - in and out (bulk). 2126 * We will ignore any others. 2127 */ 2128 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { 2129 ep = &altsetting->endpoint[i].desc; 2130 2131 /* Is it a BULK endpoint? */ 2132 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 2133 == USB_ENDPOINT_XFER_BULK) { 2134 /* BULK in or out? */ 2135 if (ep->bEndpointAddress & USB_DIR_IN) 2136 ep_in = ep; 2137 else 2138 ep_out = ep; 2139 } 2140 } 2141 2142 if (ep_in == NULL || ep_out == NULL) { 2143 printk(KERN_NOTICE "%s: failed endpoint check\n", 2144 sc->name); 2145 return -EIO; 2146 } 2147 2148 /* Calculate and store the pipe values */ 2149 sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0); 2150 sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0); 2151 sc->send_bulk_pipe = usb_sndbulkpipe(dev, 2152 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 2153 sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 2154 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 2155 2156 return 0; 2157} 2158 2159/* 2160 * Probing is done in the process context, which allows us to cheat 2161 * and not to build a state machine for the discovery. 2162 */ 2163static int ub_probe(struct usb_interface *intf, 2164 const struct usb_device_id *dev_id) 2165{ 2166 struct ub_dev *sc; 2167 int nluns; 2168 int rc; 2169 int i; 2170 2171 rc = -ENOMEM; 2172 if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) 2173 goto err_core; 2174 memset(sc, 0, sizeof(struct ub_dev)); 2175 spin_lock_init(&sc->lock); 2176 INIT_LIST_HEAD(&sc->luns); 2177 usb_init_urb(&sc->work_urb); 2178 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 2179 atomic_set(&sc->poison, 0); 2180 2181 init_timer(&sc->work_timer); 2182 sc->work_timer.data = (unsigned long) sc; 2183 sc->work_timer.function = ub_urb_timeout; 2184 2185 ub_init_completion(&sc->work_done); 2186 sc->work_done.done = 1; /* A little yuk, but oh well... */ 2187 2188 sc->dev = interface_to_usbdev(intf); 2189 sc->intf = intf; 2190 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; 2191 usb_set_intfdata(intf, sc); 2192 usb_get_dev(sc->dev); 2193 // usb_get_intf(sc->intf); /* Do we need this? */ 2194 2195 snprintf(sc->name, 12, DRV_NAME "(%d.%d)", 2196 sc->dev->bus->busnum, sc->dev->devnum); 2197 2198 /* XXX Verify that we can handle the device (from descriptors) */ 2199 2200 ub_get_pipes(sc, sc->dev, intf); 2201 2202 if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0) 2203 goto err_diag; 2204 2205 /* 2206 * At this point, all USB initialization is done, do upper layer. 2207 * We really hate halfway initialized structures, so from the 2208 * invariants perspective, this ub_dev is fully constructed at 2209 * this point. 2210 */ 2211 2212 /* 2213 * This is needed to clear toggles. It is a problem only if we do 2214 * `rmmod ub && modprobe ub` without disconnects, but we like that. 2215 */ 2216 ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2217 ub_probe_clear_stall(sc, sc->send_bulk_pipe); 2218 2219 /* 2220 * The way this is used by the startup code is a little specific. 2221 * A SCSI check causes a USB stall. Our common case code sees it 2222 * and clears the check, after which the device is ready for use. 2223 * But if a check was not present, any command other than 2224 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE). 2225 * 2226 * If we neglect to clear the SCSI check, the first real command fails 2227 * (which is the capacity readout). We clear that and retry, but why 2228 * causing spurious retries for no reason. 2229 * 2230 * Revalidation may start with its own TEST_UNIT_READY, but that one 2231 * has to succeed, so we clear checks with an additional one here. 2232 * In any case it's not our business how revaliadation is implemented. 2233 */ 2234 for (i = 0; i < 3; i++) { /* Retries for benh's key */ 2235 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; 2236 if (rc != 0x6) break; 2237 msleep(10); 2238 } 2239 2240 nluns = 1; 2241 for (i = 0; i < 3; i++) { 2242 if ((rc = ub_sync_getmaxlun(sc)) < 0) { 2243 /* 2244 * Some devices (i.e. Iomega Zip100) need this -- 2245 * apparently the bulk pipes get STALLed when the 2246 * GetMaxLUN request is processed. 2247 * XXX I have a ZIP-100, verify it does this. 2248 */ 2249 if (rc == -EPIPE) { 2250 ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2251 ub_probe_clear_stall(sc, sc->send_bulk_pipe); 2252 } 2253 break; 2254 } 2255 if (rc != 0) { 2256 nluns = rc; 2257 break; 2258 } 2259 msleep(100); 2260 } 2261 2262 for (i = 0; i < nluns; i++) { 2263 ub_probe_lun(sc, i); 2264 } 2265 return 0; 2266 2267 /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */ 2268err_diag: 2269 usb_set_intfdata(intf, NULL); 2270 // usb_put_intf(sc->intf); 2271 usb_put_dev(sc->dev); 2272 kfree(sc); 2273err_core: 2274 return rc; 2275} 2276 2277static int ub_probe_lun(struct ub_dev *sc, int lnum) 2278{ 2279 struct ub_lun *lun; 2280 request_queue_t *q; 2281 struct gendisk *disk; 2282 int rc; 2283 2284 rc = -ENOMEM; 2285 if ((lun = kmalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) 2286 goto err_alloc; 2287 memset(lun, 0, sizeof(struct ub_lun)); 2288 lun->num = lnum; 2289 2290 rc = -ENOSR; 2291 if ((lun->id = ub_id_get()) == -1) 2292 goto err_id; 2293 2294 lun->udev = sc; 2295 list_add(&lun->link, &sc->luns); 2296 2297 snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", 2298 lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); 2299 2300 lun->removable = 1; /* XXX Query this from the device */ 2301 lun->changed = 1; /* ub_revalidate clears only */ 2302 lun->first_open = 1; 2303 ub_revalidate(sc, lun); 2304 2305 rc = -ENOMEM; 2306 if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL) 2307 goto err_diskalloc; 2308 2309 lun->disk = disk; 2310 sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); 2311 sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a'); 2312 disk->major = UB_MAJOR; 2313 disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; 2314 disk->fops = &ub_bd_fops; 2315 disk->private_data = lun; 2316 disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ 2317 2318 rc = -ENOMEM; 2319 if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL) 2320 goto err_blkqinit; 2321 2322 disk->queue = q; 2323 2324 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 2325 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); 2326 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2327 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2328 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2329 blk_queue_hardsect_size(q, lun->capacity.bsize); 2330 2331 q->queuedata = lun; 2332 2333 set_capacity(disk, lun->capacity.nsec); 2334 if (lun->removable) 2335 disk->flags |= GENHD_FL_REMOVABLE; 2336 2337 add_disk(disk); 2338 2339 return 0; 2340 2341err_blkqinit: 2342 put_disk(disk); 2343err_diskalloc: 2344 list_del(&lun->link); 2345 ub_id_put(lun->id); 2346err_id: 2347 kfree(lun); 2348err_alloc: 2349 return rc; 2350} 2351 2352static void ub_disconnect(struct usb_interface *intf) 2353{ 2354 struct ub_dev *sc = usb_get_intfdata(intf); 2355 struct list_head *p; 2356 struct ub_lun *lun; 2357 struct gendisk *disk; 2358 unsigned long flags; 2359 2360 /* 2361 * Prevent ub_bd_release from pulling the rug from under us. 2362 * XXX This is starting to look like a kref. 2363 * XXX Why not to take this ref at probe time? 2364 */ 2365 spin_lock_irqsave(&ub_lock, flags); 2366 sc->openc++; 2367 spin_unlock_irqrestore(&ub_lock, flags); 2368 2369 /* 2370 * Fence stall clearnings, operations triggered by unlinkings and so on. 2371 * We do not attempt to unlink any URBs, because we do not trust the 2372 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway. 2373 */ 2374 atomic_set(&sc->poison, 1); 2375 2376 /* 2377 * Blow away queued commands. 2378 * 2379 * Actually, this never works, because before we get here 2380 * the HCD terminates outstanding URB(s). It causes our 2381 * SCSI command queue to advance, commands fail to submit, 2382 * and the whole queue drains. So, we just use this code to 2383 * print warnings. 2384 */ 2385 spin_lock_irqsave(&sc->lock, flags); 2386 { 2387 struct ub_scsi_cmd *cmd; 2388 int cnt = 0; 2389 while ((cmd = ub_cmdq_pop(sc)) != NULL) { 2390 cmd->error = -ENOTCONN; 2391 cmd->state = UB_CMDST_DONE; 2392 ub_cmdtr_state(sc, cmd); 2393 ub_cmdq_pop(sc); 2394 (*cmd->done)(sc, cmd); 2395 cnt++; 2396 } 2397 if (cnt != 0) { 2398 printk(KERN_WARNING "%s: " 2399 "%d was queued after shutdown\n", sc->name, cnt); 2400 } 2401 } 2402 spin_unlock_irqrestore(&sc->lock, flags); 2403 2404 /* 2405 * Unregister the upper layer. 2406 */ 2407 list_for_each (p, &sc->luns) { 2408 lun = list_entry(p, struct ub_lun, link); 2409 disk = lun->disk; 2410 if (disk->flags & GENHD_FL_UP) 2411 del_gendisk(disk); 2412 /* 2413 * I wish I could do: 2414 * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 2415 * As it is, we rely on our internal poisoning and let 2416 * the upper levels to spin furiously failing all the I/O. 2417 */ 2418 } 2419 2420 /* 2421 * Taking a lock on a structure which is about to be freed 2422 * is very nonsensual. Here it is largely a way to do a debug freeze, 2423 * and a bracket which shows where the nonsensual code segment ends. 2424 * 2425 * Testing for -EINPROGRESS is always a bug, so we are bending 2426 * the rules a little. 2427 */ 2428 spin_lock_irqsave(&sc->lock, flags); 2429 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ 2430 printk(KERN_WARNING "%s: " 2431 "URB is active after disconnect\n", sc->name); 2432 } 2433 spin_unlock_irqrestore(&sc->lock, flags); 2434 2435 /* 2436 * There is virtually no chance that other CPU runs times so long 2437 * after ub_urb_complete should have called del_timer, but only if HCD 2438 * didn't forget to deliver a callback on unlink. 2439 */ 2440 del_timer_sync(&sc->work_timer); 2441 2442 /* 2443 * At this point there must be no commands coming from anyone 2444 * and no URBs left in transit. 2445 */ 2446 2447 device_remove_file(&sc->intf->dev, &dev_attr_diag); 2448 usb_set_intfdata(intf, NULL); 2449 // usb_put_intf(sc->intf); 2450 sc->intf = NULL; 2451 usb_put_dev(sc->dev); 2452 sc->dev = NULL; 2453 2454 ub_put(sc); 2455} 2456 2457static struct usb_driver ub_driver = { 2458 .owner = THIS_MODULE, 2459 .name = "ub", 2460 .probe = ub_probe, 2461 .disconnect = ub_disconnect, 2462 .id_table = ub_usb_ids, 2463}; 2464 2465static int __init ub_init(void) 2466{ 2467 int rc; 2468 2469 /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n", 2470 sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun)); 2471 2472 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) 2473 goto err_regblkdev; 2474 devfs_mk_dir(DEVFS_NAME); 2475 2476 if ((rc = usb_register(&ub_driver)) != 0) 2477 goto err_register; 2478 2479 return 0; 2480 2481err_register: 2482 devfs_remove(DEVFS_NAME); 2483 unregister_blkdev(UB_MAJOR, DRV_NAME); 2484err_regblkdev: 2485 return rc; 2486} 2487 2488static void __exit ub_exit(void) 2489{ 2490 usb_deregister(&ub_driver); 2491 2492 devfs_remove(DEVFS_NAME); 2493 unregister_blkdev(UB_MAJOR, DRV_NAME); 2494} 2495 2496module_init(ub_init); 2497module_exit(ub_exit); 2498 2499MODULE_LICENSE("GPL");