Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at eedd726efbc439dbed94fb8577e5533a986b341f 1098 lines 27 kB view raw
1/* 2 * blkfront.c 3 * 4 * XenLinux virtual block device driver. 5 * 6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand 7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge 8 * Copyright (c) 2004, Christian Limpach 9 * Copyright (c) 2004, Andrew Warfield 10 * Copyright (c) 2005, Christopher Clark 11 * Copyright (c) 2005, XenSource Ltd 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License version 2 15 * as published by the Free Software Foundation; or, when distributed 16 * separately from the Linux kernel or incorporated into other 17 * software packages, subject to the following license: 18 * 19 * Permission is hereby granted, free of charge, to any person obtaining a copy 20 * of this source file (the "Software"), to deal in the Software without 21 * restriction, including without limitation the rights to use, copy, modify, 22 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 23 * and to permit persons to whom the Software is furnished to do so, subject to 24 * the following conditions: 25 * 26 * The above copyright notice and this permission notice shall be included in 27 * all copies or substantial portions of the Software. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 35 * IN THE SOFTWARE. 36 */ 37 38#include <linux/interrupt.h> 39#include <linux/blkdev.h> 40#include <linux/hdreg.h> 41#include <linux/cdrom.h> 42#include <linux/module.h> 43 44#include <xen/xenbus.h> 45#include <xen/grant_table.h> 46#include <xen/events.h> 47#include <xen/page.h> 48 49#include <xen/interface/grant_table.h> 50#include <xen/interface/io/blkif.h> 51#include <xen/interface/io/protocols.h> 52 53#include <asm/xen/hypervisor.h> 54 55enum blkif_state { 56 BLKIF_STATE_DISCONNECTED, 57 BLKIF_STATE_CONNECTED, 58 BLKIF_STATE_SUSPENDED, 59}; 60 61struct blk_shadow { 62 struct blkif_request req; 63 unsigned long request; 64 unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 65}; 66 67static struct block_device_operations xlvbd_block_fops; 68 69#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) 70 71/* 72 * We have one of these per vbd, whether ide, scsi or 'other'. They 73 * hang in private_data off the gendisk structure. We may end up 74 * putting all kinds of interesting stuff here :-) 75 */ 76struct blkfront_info 77{ 78 struct xenbus_device *xbdev; 79 struct gendisk *gd; 80 int vdevice; 81 blkif_vdev_t handle; 82 enum blkif_state connected; 83 int ring_ref; 84 struct blkif_front_ring ring; 85 unsigned int evtchn, irq; 86 struct request_queue *rq; 87 struct work_struct work; 88 struct gnttab_free_callback callback; 89 struct blk_shadow shadow[BLK_RING_SIZE]; 90 unsigned long shadow_free; 91 int feature_barrier; 92 int is_ready; 93 94 /** 95 * The number of people holding this device open. We won't allow a 96 * hot-unplug unless this is 0. 97 */ 98 int users; 99}; 100 101static DEFINE_SPINLOCK(blkif_io_lock); 102 103#define MAXIMUM_OUTSTANDING_BLOCK_REQS \ 104 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) 105#define GRANT_INVALID_REF 0 106 107#define PARTS_PER_DISK 16 108#define PARTS_PER_EXT_DISK 256 109 110#define BLKIF_MAJOR(dev) ((dev)>>8) 111#define BLKIF_MINOR(dev) ((dev) & 0xff) 112 113#define EXT_SHIFT 28 114#define EXTENDED (1<<EXT_SHIFT) 115#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED)) 116#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) 117 118#define DEV_NAME "xvd" /* name in /dev */ 119 120static int get_id_from_freelist(struct blkfront_info *info) 121{ 122 unsigned long free = info->shadow_free; 123 BUG_ON(free > BLK_RING_SIZE); 124 info->shadow_free = info->shadow[free].req.id; 125 info->shadow[free].req.id = 0x0fffffee; /* debug */ 126 return free; 127} 128 129static void add_id_to_freelist(struct blkfront_info *info, 130 unsigned long id) 131{ 132 info->shadow[id].req.id = info->shadow_free; 133 info->shadow[id].request = 0; 134 info->shadow_free = id; 135} 136 137static void blkif_restart_queue_callback(void *arg) 138{ 139 struct blkfront_info *info = (struct blkfront_info *)arg; 140 schedule_work(&info->work); 141} 142 143static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) 144{ 145 /* We don't have real geometry info, but let's at least return 146 values consistent with the size of the device */ 147 sector_t nsect = get_capacity(bd->bd_disk); 148 sector_t cylinders = nsect; 149 150 hg->heads = 0xff; 151 hg->sectors = 0x3f; 152 sector_div(cylinders, hg->heads * hg->sectors); 153 hg->cylinders = cylinders; 154 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) 155 hg->cylinders = 0xffff; 156 return 0; 157} 158 159static int blkif_ioctl(struct block_device *bdev, fmode_t mode, 160 unsigned command, unsigned long argument) 161{ 162 struct blkfront_info *info = bdev->bd_disk->private_data; 163 int i; 164 165 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", 166 command, (long)argument); 167 168 switch (command) { 169 case CDROMMULTISESSION: 170 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); 171 for (i = 0; i < sizeof(struct cdrom_multisession); i++) 172 if (put_user(0, (char __user *)(argument + i))) 173 return -EFAULT; 174 return 0; 175 176 case CDROM_GET_CAPABILITY: { 177 struct gendisk *gd = info->gd; 178 if (gd->flags & GENHD_FL_CD) 179 return 0; 180 return -EINVAL; 181 } 182 183 default: 184 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", 185 command);*/ 186 return -EINVAL; /* same return as native Linux */ 187 } 188 189 return 0; 190} 191 192/* 193 * blkif_queue_request 194 * 195 * request block io 196 * 197 * id: for guest use only. 198 * operation: BLKIF_OP_{READ,WRITE,PROBE} 199 * buffer: buffer to read/write into. this should be a 200 * virtual address in the guest os. 201 */ 202static int blkif_queue_request(struct request *req) 203{ 204 struct blkfront_info *info = req->rq_disk->private_data; 205 unsigned long buffer_mfn; 206 struct blkif_request *ring_req; 207 struct req_iterator iter; 208 struct bio_vec *bvec; 209 unsigned long id; 210 unsigned int fsect, lsect; 211 int ref; 212 grant_ref_t gref_head; 213 214 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 215 return 1; 216 217 if (gnttab_alloc_grant_references( 218 BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { 219 gnttab_request_free_callback( 220 &info->callback, 221 blkif_restart_queue_callback, 222 info, 223 BLKIF_MAX_SEGMENTS_PER_REQUEST); 224 return 1; 225 } 226 227 /* Fill out a communications ring structure. */ 228 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 229 id = get_id_from_freelist(info); 230 info->shadow[id].request = (unsigned long)req; 231 232 ring_req->id = id; 233 ring_req->sector_number = (blkif_sector_t)req->sector; 234 ring_req->handle = info->handle; 235 236 ring_req->operation = rq_data_dir(req) ? 237 BLKIF_OP_WRITE : BLKIF_OP_READ; 238 if (blk_barrier_rq(req)) 239 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 240 241 ring_req->nr_segments = 0; 242 rq_for_each_segment(bvec, req, iter) { 243 BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST); 244 buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page)); 245 fsect = bvec->bv_offset >> 9; 246 lsect = fsect + (bvec->bv_len >> 9) - 1; 247 /* install a grant reference. */ 248 ref = gnttab_claim_grant_reference(&gref_head); 249 BUG_ON(ref == -ENOSPC); 250 251 gnttab_grant_foreign_access_ref( 252 ref, 253 info->xbdev->otherend_id, 254 buffer_mfn, 255 rq_data_dir(req) ); 256 257 info->shadow[id].frame[ring_req->nr_segments] = 258 mfn_to_pfn(buffer_mfn); 259 260 ring_req->seg[ring_req->nr_segments] = 261 (struct blkif_request_segment) { 262 .gref = ref, 263 .first_sect = fsect, 264 .last_sect = lsect }; 265 266 ring_req->nr_segments++; 267 } 268 269 info->ring.req_prod_pvt++; 270 271 /* Keep a private copy so we can reissue requests when recovering. */ 272 info->shadow[id].req = *ring_req; 273 274 gnttab_free_grant_references(gref_head); 275 276 return 0; 277} 278 279 280static inline void flush_requests(struct blkfront_info *info) 281{ 282 int notify; 283 284 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); 285 286 if (notify) 287 notify_remote_via_irq(info->irq); 288} 289 290/* 291 * do_blkif_request 292 * read a block; request is in a request queue 293 */ 294static void do_blkif_request(struct request_queue *rq) 295{ 296 struct blkfront_info *info = NULL; 297 struct request *req; 298 int queued; 299 300 pr_debug("Entered do_blkif_request\n"); 301 302 queued = 0; 303 304 while ((req = elv_next_request(rq)) != NULL) { 305 info = req->rq_disk->private_data; 306 if (!blk_fs_request(req)) { 307 end_request(req, 0); 308 continue; 309 } 310 311 if (RING_FULL(&info->ring)) 312 goto wait; 313 314 pr_debug("do_blk_req %p: cmd %p, sec %lx, " 315 "(%u/%li) buffer:%p [%s]\n", 316 req, req->cmd, (unsigned long)req->sector, 317 req->current_nr_sectors, 318 req->nr_sectors, req->buffer, 319 rq_data_dir(req) ? "write" : "read"); 320 321 322 blkdev_dequeue_request(req); 323 if (blkif_queue_request(req)) { 324 blk_requeue_request(rq, req); 325wait: 326 /* Avoid pointless unplugs. */ 327 blk_stop_queue(rq); 328 break; 329 } 330 331 queued++; 332 } 333 334 if (queued != 0) 335 flush_requests(info); 336} 337 338static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 339{ 340 struct request_queue *rq; 341 elevator_t *old_e; 342 343 rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 344 if (rq == NULL) 345 return -1; 346 347 old_e = rq->elevator; 348 if (IS_ERR_VALUE(elevator_init(rq, "noop"))) 349 printk(KERN_WARNING 350 "blkfront: Switch elevator failed, use default\n"); 351 else 352 elevator_exit(old_e); 353 354 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 355 blk_queue_hardsect_size(rq, sector_size); 356 blk_queue_max_sectors(rq, 512); 357 358 /* Each segment in a request is up to an aligned page in size. */ 359 blk_queue_segment_boundary(rq, PAGE_SIZE - 1); 360 blk_queue_max_segment_size(rq, PAGE_SIZE); 361 362 /* Ensure a merged request will fit in a single I/O ring slot. */ 363 blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 364 blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 365 366 /* Make sure buffer addresses are sector-aligned. */ 367 blk_queue_dma_alignment(rq, 511); 368 369 /* Make sure we don't use bounce buffers. */ 370 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); 371 372 gd->queue = rq; 373 374 return 0; 375} 376 377 378static int xlvbd_barrier(struct blkfront_info *info) 379{ 380 int err; 381 382 err = blk_queue_ordered(info->rq, 383 info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, 384 NULL); 385 386 if (err) 387 return err; 388 389 printk(KERN_INFO "blkfront: %s: barriers %s\n", 390 info->gd->disk_name, 391 info->feature_barrier ? "enabled" : "disabled"); 392 return 0; 393} 394 395 396static int xlvbd_alloc_gendisk(blkif_sector_t capacity, 397 struct blkfront_info *info, 398 u16 vdisk_info, u16 sector_size) 399{ 400 struct gendisk *gd; 401 int nr_minors = 1; 402 int err = -ENODEV; 403 unsigned int offset; 404 int minor; 405 int nr_parts; 406 407 BUG_ON(info->gd != NULL); 408 BUG_ON(info->rq != NULL); 409 410 if ((info->vdevice>>EXT_SHIFT) > 1) { 411 /* this is above the extended range; something is wrong */ 412 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice); 413 return -ENODEV; 414 } 415 416 if (!VDEV_IS_EXTENDED(info->vdevice)) { 417 minor = BLKIF_MINOR(info->vdevice); 418 nr_parts = PARTS_PER_DISK; 419 } else { 420 minor = BLKIF_MINOR_EXT(info->vdevice); 421 nr_parts = PARTS_PER_EXT_DISK; 422 } 423 424 if ((minor % nr_parts) == 0) 425 nr_minors = nr_parts; 426 427 gd = alloc_disk(nr_minors); 428 if (gd == NULL) 429 goto out; 430 431 offset = minor / nr_parts; 432 433 if (nr_minors > 1) { 434 if (offset < 26) 435 sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset); 436 else 437 sprintf(gd->disk_name, "%s%c%c", DEV_NAME, 438 'a' + ((offset / 26)-1), 'a' + (offset % 26)); 439 } else { 440 if (offset < 26) 441 sprintf(gd->disk_name, "%s%c%d", DEV_NAME, 442 'a' + offset, 443 minor & (nr_parts - 1)); 444 else 445 sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME, 446 'a' + ((offset / 26) - 1), 447 'a' + (offset % 26), 448 minor & (nr_parts - 1)); 449 } 450 451 gd->major = XENVBD_MAJOR; 452 gd->first_minor = minor; 453 gd->fops = &xlvbd_block_fops; 454 gd->private_data = info; 455 gd->driverfs_dev = &(info->xbdev->dev); 456 set_capacity(gd, capacity); 457 458 if (xlvbd_init_blk_queue(gd, sector_size)) { 459 del_gendisk(gd); 460 goto out; 461 } 462 463 info->rq = gd->queue; 464 info->gd = gd; 465 466 if (info->feature_barrier) 467 xlvbd_barrier(info); 468 469 if (vdisk_info & VDISK_READONLY) 470 set_disk_ro(gd, 1); 471 472 if (vdisk_info & VDISK_REMOVABLE) 473 gd->flags |= GENHD_FL_REMOVABLE; 474 475 if (vdisk_info & VDISK_CDROM) 476 gd->flags |= GENHD_FL_CD; 477 478 return 0; 479 480 out: 481 return err; 482} 483 484static void kick_pending_request_queues(struct blkfront_info *info) 485{ 486 if (!RING_FULL(&info->ring)) { 487 /* Re-enable calldowns. */ 488 blk_start_queue(info->rq); 489 /* Kick things off immediately. */ 490 do_blkif_request(info->rq); 491 } 492} 493 494static void blkif_restart_queue(struct work_struct *work) 495{ 496 struct blkfront_info *info = container_of(work, struct blkfront_info, work); 497 498 spin_lock_irq(&blkif_io_lock); 499 if (info->connected == BLKIF_STATE_CONNECTED) 500 kick_pending_request_queues(info); 501 spin_unlock_irq(&blkif_io_lock); 502} 503 504static void blkif_free(struct blkfront_info *info, int suspend) 505{ 506 /* Prevent new requests being issued until we fix things up. */ 507 spin_lock_irq(&blkif_io_lock); 508 info->connected = suspend ? 509 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; 510 /* No more blkif_request(). */ 511 if (info->rq) 512 blk_stop_queue(info->rq); 513 /* No more gnttab callback work. */ 514 gnttab_cancel_free_callback(&info->callback); 515 spin_unlock_irq(&blkif_io_lock); 516 517 /* Flush gnttab callback work. Must be done with no locks held. */ 518 flush_scheduled_work(); 519 520 /* Free resources associated with old device channel. */ 521 if (info->ring_ref != GRANT_INVALID_REF) { 522 gnttab_end_foreign_access(info->ring_ref, 0, 523 (unsigned long)info->ring.sring); 524 info->ring_ref = GRANT_INVALID_REF; 525 info->ring.sring = NULL; 526 } 527 if (info->irq) 528 unbind_from_irqhandler(info->irq, info); 529 info->evtchn = info->irq = 0; 530 531} 532 533static void blkif_completion(struct blk_shadow *s) 534{ 535 int i; 536 for (i = 0; i < s->req.nr_segments; i++) 537 gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL); 538} 539 540static irqreturn_t blkif_interrupt(int irq, void *dev_id) 541{ 542 struct request *req; 543 struct blkif_response *bret; 544 RING_IDX i, rp; 545 unsigned long flags; 546 struct blkfront_info *info = (struct blkfront_info *)dev_id; 547 int error; 548 549 spin_lock_irqsave(&blkif_io_lock, flags); 550 551 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { 552 spin_unlock_irqrestore(&blkif_io_lock, flags); 553 return IRQ_HANDLED; 554 } 555 556 again: 557 rp = info->ring.sring->rsp_prod; 558 rmb(); /* Ensure we see queued responses up to 'rp'. */ 559 560 for (i = info->ring.rsp_cons; i != rp; i++) { 561 unsigned long id; 562 int ret; 563 564 bret = RING_GET_RESPONSE(&info->ring, i); 565 id = bret->id; 566 req = (struct request *)info->shadow[id].request; 567 568 blkif_completion(&info->shadow[id]); 569 570 add_id_to_freelist(info, id); 571 572 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 573 switch (bret->operation) { 574 case BLKIF_OP_WRITE_BARRIER: 575 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 576 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 577 info->gd->disk_name); 578 error = -EOPNOTSUPP; 579 info->feature_barrier = 0; 580 xlvbd_barrier(info); 581 } 582 /* fall through */ 583 case BLKIF_OP_READ: 584 case BLKIF_OP_WRITE: 585 if (unlikely(bret->status != BLKIF_RSP_OKAY)) 586 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 587 "request: %x\n", bret->status); 588 589 ret = __blk_end_request(req, error, blk_rq_bytes(req)); 590 BUG_ON(ret); 591 break; 592 default: 593 BUG(); 594 } 595 } 596 597 info->ring.rsp_cons = i; 598 599 if (i != info->ring.req_prod_pvt) { 600 int more_to_do; 601 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); 602 if (more_to_do) 603 goto again; 604 } else 605 info->ring.sring->rsp_event = i + 1; 606 607 kick_pending_request_queues(info); 608 609 spin_unlock_irqrestore(&blkif_io_lock, flags); 610 611 return IRQ_HANDLED; 612} 613 614 615static int setup_blkring(struct xenbus_device *dev, 616 struct blkfront_info *info) 617{ 618 struct blkif_sring *sring; 619 int err; 620 621 info->ring_ref = GRANT_INVALID_REF; 622 623 sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); 624 if (!sring) { 625 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 626 return -ENOMEM; 627 } 628 SHARED_RING_INIT(sring); 629 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 630 631 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 632 if (err < 0) { 633 free_page((unsigned long)sring); 634 info->ring.sring = NULL; 635 goto fail; 636 } 637 info->ring_ref = err; 638 639 err = xenbus_alloc_evtchn(dev, &info->evtchn); 640 if (err) 641 goto fail; 642 643 err = bind_evtchn_to_irqhandler(info->evtchn, 644 blkif_interrupt, 645 IRQF_SAMPLE_RANDOM, "blkif", info); 646 if (err <= 0) { 647 xenbus_dev_fatal(dev, err, 648 "bind_evtchn_to_irqhandler failed"); 649 goto fail; 650 } 651 info->irq = err; 652 653 return 0; 654fail: 655 blkif_free(info, 0); 656 return err; 657} 658 659 660/* Common code used when first setting up, and when resuming. */ 661static int talk_to_backend(struct xenbus_device *dev, 662 struct blkfront_info *info) 663{ 664 const char *message = NULL; 665 struct xenbus_transaction xbt; 666 int err; 667 668 /* Create shared ring, alloc event channel. */ 669 err = setup_blkring(dev, info); 670 if (err) 671 goto out; 672 673again: 674 err = xenbus_transaction_start(&xbt); 675 if (err) { 676 xenbus_dev_fatal(dev, err, "starting transaction"); 677 goto destroy_blkring; 678 } 679 680 err = xenbus_printf(xbt, dev->nodename, 681 "ring-ref", "%u", info->ring_ref); 682 if (err) { 683 message = "writing ring-ref"; 684 goto abort_transaction; 685 } 686 err = xenbus_printf(xbt, dev->nodename, 687 "event-channel", "%u", info->evtchn); 688 if (err) { 689 message = "writing event-channel"; 690 goto abort_transaction; 691 } 692 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", 693 XEN_IO_PROTO_ABI_NATIVE); 694 if (err) { 695 message = "writing protocol"; 696 goto abort_transaction; 697 } 698 699 err = xenbus_transaction_end(xbt, 0); 700 if (err) { 701 if (err == -EAGAIN) 702 goto again; 703 xenbus_dev_fatal(dev, err, "completing transaction"); 704 goto destroy_blkring; 705 } 706 707 xenbus_switch_state(dev, XenbusStateInitialised); 708 709 return 0; 710 711 abort_transaction: 712 xenbus_transaction_end(xbt, 1); 713 if (message) 714 xenbus_dev_fatal(dev, err, "%s", message); 715 destroy_blkring: 716 blkif_free(info, 0); 717 out: 718 return err; 719} 720 721 722/** 723 * Entry point to this code when a new device is created. Allocate the basic 724 * structures and the ring buffer for communication with the backend, and 725 * inform the backend of the appropriate details for those. Switch to 726 * Initialised state. 727 */ 728static int blkfront_probe(struct xenbus_device *dev, 729 const struct xenbus_device_id *id) 730{ 731 int err, vdevice, i; 732 struct blkfront_info *info; 733 734 /* FIXME: Use dynamic device id if this is not set. */ 735 err = xenbus_scanf(XBT_NIL, dev->nodename, 736 "virtual-device", "%i", &vdevice); 737 if (err != 1) { 738 /* go looking in the extended area instead */ 739 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", 740 "%i", &vdevice); 741 if (err != 1) { 742 xenbus_dev_fatal(dev, err, "reading virtual-device"); 743 return err; 744 } 745 } 746 747 info = kzalloc(sizeof(*info), GFP_KERNEL); 748 if (!info) { 749 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); 750 return -ENOMEM; 751 } 752 753 info->xbdev = dev; 754 info->vdevice = vdevice; 755 info->connected = BLKIF_STATE_DISCONNECTED; 756 INIT_WORK(&info->work, blkif_restart_queue); 757 758 for (i = 0; i < BLK_RING_SIZE; i++) 759 info->shadow[i].req.id = i+1; 760 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 761 762 /* Front end dir is a number, which is used as the id. */ 763 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 764 dev->dev.driver_data = info; 765 766 err = talk_to_backend(dev, info); 767 if (err) { 768 kfree(info); 769 dev->dev.driver_data = NULL; 770 return err; 771 } 772 773 return 0; 774} 775 776 777static int blkif_recover(struct blkfront_info *info) 778{ 779 int i; 780 struct blkif_request *req; 781 struct blk_shadow *copy; 782 int j; 783 784 /* Stage 1: Make a safe copy of the shadow state. */ 785 copy = kmalloc(sizeof(info->shadow), 786 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); 787 if (!copy) 788 return -ENOMEM; 789 memcpy(copy, info->shadow, sizeof(info->shadow)); 790 791 /* Stage 2: Set up free list. */ 792 memset(&info->shadow, 0, sizeof(info->shadow)); 793 for (i = 0; i < BLK_RING_SIZE; i++) 794 info->shadow[i].req.id = i+1; 795 info->shadow_free = info->ring.req_prod_pvt; 796 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 797 798 /* Stage 3: Find pending requests and requeue them. */ 799 for (i = 0; i < BLK_RING_SIZE; i++) { 800 /* Not in use? */ 801 if (copy[i].request == 0) 802 continue; 803 804 /* Grab a request slot and copy shadow state into it. */ 805 req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 806 *req = copy[i].req; 807 808 /* We get a new request id, and must reset the shadow state. */ 809 req->id = get_id_from_freelist(info); 810 memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i])); 811 812 /* Rewrite any grant references invalidated by susp/resume. */ 813 for (j = 0; j < req->nr_segments; j++) 814 gnttab_grant_foreign_access_ref( 815 req->seg[j].gref, 816 info->xbdev->otherend_id, 817 pfn_to_mfn(info->shadow[req->id].frame[j]), 818 rq_data_dir( 819 (struct request *) 820 info->shadow[req->id].request)); 821 info->shadow[req->id].req = *req; 822 823 info->ring.req_prod_pvt++; 824 } 825 826 kfree(copy); 827 828 xenbus_switch_state(info->xbdev, XenbusStateConnected); 829 830 spin_lock_irq(&blkif_io_lock); 831 832 /* Now safe for us to use the shared ring */ 833 info->connected = BLKIF_STATE_CONNECTED; 834 835 /* Send off requeued requests */ 836 flush_requests(info); 837 838 /* Kick any other new requests queued since we resumed */ 839 kick_pending_request_queues(info); 840 841 spin_unlock_irq(&blkif_io_lock); 842 843 return 0; 844} 845 846/** 847 * We are reconnecting to the backend, due to a suspend/resume, or a backend 848 * driver restart. We tear down our blkif structure and recreate it, but 849 * leave the device-layer structures intact so that this is transparent to the 850 * rest of the kernel. 851 */ 852static int blkfront_resume(struct xenbus_device *dev) 853{ 854 struct blkfront_info *info = dev->dev.driver_data; 855 int err; 856 857 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 858 859 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 860 861 err = talk_to_backend(dev, info); 862 if (info->connected == BLKIF_STATE_SUSPENDED && !err) 863 err = blkif_recover(info); 864 865 return err; 866} 867 868 869/* 870 * Invoked when the backend is finally 'ready' (and has told produced 871 * the details about the physical device - #sectors, size, etc). 872 */ 873static void blkfront_connect(struct blkfront_info *info) 874{ 875 unsigned long long sectors; 876 unsigned long sector_size; 877 unsigned int binfo; 878 int err; 879 880 if ((info->connected == BLKIF_STATE_CONNECTED) || 881 (info->connected == BLKIF_STATE_SUSPENDED) ) 882 return; 883 884 dev_dbg(&info->xbdev->dev, "%s:%s.\n", 885 __func__, info->xbdev->otherend); 886 887 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 888 "sectors", "%llu", &sectors, 889 "info", "%u", &binfo, 890 "sector-size", "%lu", &sector_size, 891 NULL); 892 if (err) { 893 xenbus_dev_fatal(info->xbdev, err, 894 "reading backend fields at %s", 895 info->xbdev->otherend); 896 return; 897 } 898 899 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 900 "feature-barrier", "%lu", &info->feature_barrier, 901 NULL); 902 if (err) 903 info->feature_barrier = 0; 904 905 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 906 if (err) { 907 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 908 info->xbdev->otherend); 909 return; 910 } 911 912 xenbus_switch_state(info->xbdev, XenbusStateConnected); 913 914 /* Kick pending requests. */ 915 spin_lock_irq(&blkif_io_lock); 916 info->connected = BLKIF_STATE_CONNECTED; 917 kick_pending_request_queues(info); 918 spin_unlock_irq(&blkif_io_lock); 919 920 add_disk(info->gd); 921 922 info->is_ready = 1; 923} 924 925/** 926 * Handle the change of state of the backend to Closing. We must delete our 927 * device-layer structures now, to ensure that writes are flushed through to 928 * the backend. Once is this done, we can switch to Closed in 929 * acknowledgement. 930 */ 931static void blkfront_closing(struct xenbus_device *dev) 932{ 933 struct blkfront_info *info = dev->dev.driver_data; 934 unsigned long flags; 935 936 dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename); 937 938 if (info->rq == NULL) 939 goto out; 940 941 spin_lock_irqsave(&blkif_io_lock, flags); 942 943 del_gendisk(info->gd); 944 945 /* No more blkif_request(). */ 946 blk_stop_queue(info->rq); 947 948 /* No more gnttab callback work. */ 949 gnttab_cancel_free_callback(&info->callback); 950 spin_unlock_irqrestore(&blkif_io_lock, flags); 951 952 /* Flush gnttab callback work. Must be done with no locks held. */ 953 flush_scheduled_work(); 954 955 blk_cleanup_queue(info->rq); 956 info->rq = NULL; 957 958 out: 959 xenbus_frontend_closed(dev); 960} 961 962/** 963 * Callback received when the backend's state changes. 964 */ 965static void backend_changed(struct xenbus_device *dev, 966 enum xenbus_state backend_state) 967{ 968 struct blkfront_info *info = dev->dev.driver_data; 969 struct block_device *bd; 970 971 dev_dbg(&dev->dev, "blkfront:backend_changed.\n"); 972 973 switch (backend_state) { 974 case XenbusStateInitialising: 975 case XenbusStateInitWait: 976 case XenbusStateInitialised: 977 case XenbusStateUnknown: 978 case XenbusStateClosed: 979 break; 980 981 case XenbusStateConnected: 982 blkfront_connect(info); 983 break; 984 985 case XenbusStateClosing: 986 bd = bdget_disk(info->gd, 0); 987 if (bd == NULL) 988 xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); 989 990 mutex_lock(&bd->bd_mutex); 991 if (info->users > 0) 992 xenbus_dev_error(dev, -EBUSY, 993 "Device in use; refusing to close"); 994 else 995 blkfront_closing(dev); 996 mutex_unlock(&bd->bd_mutex); 997 bdput(bd); 998 break; 999 } 1000} 1001 1002static int blkfront_remove(struct xenbus_device *dev) 1003{ 1004 struct blkfront_info *info = dev->dev.driver_data; 1005 1006 dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename); 1007 1008 blkif_free(info, 0); 1009 1010 kfree(info); 1011 1012 return 0; 1013} 1014 1015static int blkfront_is_ready(struct xenbus_device *dev) 1016{ 1017 struct blkfront_info *info = dev->dev.driver_data; 1018 1019 return info->is_ready; 1020} 1021 1022static int blkif_open(struct block_device *bdev, fmode_t mode) 1023{ 1024 struct blkfront_info *info = bdev->bd_disk->private_data; 1025 info->users++; 1026 return 0; 1027} 1028 1029static int blkif_release(struct gendisk *disk, fmode_t mode) 1030{ 1031 struct blkfront_info *info = disk->private_data; 1032 info->users--; 1033 if (info->users == 0) { 1034 /* Check whether we have been instructed to close. We will 1035 have ignored this request initially, as the device was 1036 still mounted. */ 1037 struct xenbus_device *dev = info->xbdev; 1038 enum xenbus_state state = xenbus_read_driver_state(dev->otherend); 1039 1040 if (state == XenbusStateClosing && info->is_ready) 1041 blkfront_closing(dev); 1042 } 1043 return 0; 1044} 1045 1046static struct block_device_operations xlvbd_block_fops = 1047{ 1048 .owner = THIS_MODULE, 1049 .open = blkif_open, 1050 .release = blkif_release, 1051 .getgeo = blkif_getgeo, 1052 .locked_ioctl = blkif_ioctl, 1053}; 1054 1055 1056static struct xenbus_device_id blkfront_ids[] = { 1057 { "vbd" }, 1058 { "" } 1059}; 1060 1061static struct xenbus_driver blkfront = { 1062 .name = "vbd", 1063 .owner = THIS_MODULE, 1064 .ids = blkfront_ids, 1065 .probe = blkfront_probe, 1066 .remove = blkfront_remove, 1067 .resume = blkfront_resume, 1068 .otherend_changed = backend_changed, 1069 .is_ready = blkfront_is_ready, 1070}; 1071 1072static int __init xlblk_init(void) 1073{ 1074 if (!xen_domain()) 1075 return -ENODEV; 1076 1077 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { 1078 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", 1079 XENVBD_MAJOR, DEV_NAME); 1080 return -ENODEV; 1081 } 1082 1083 return xenbus_register_frontend(&blkfront); 1084} 1085module_init(xlblk_init); 1086 1087 1088static void __exit xlblk_exit(void) 1089{ 1090 return xenbus_unregister_driver(&blkfront); 1091} 1092module_exit(xlblk_exit); 1093 1094MODULE_DESCRIPTION("Xen virtual block device frontend"); 1095MODULE_LICENSE("GPL"); 1096MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); 1097MODULE_ALIAS("xen:vbd"); 1098MODULE_ALIAS("xenblk");