Merge tag 'for-linus-20130331' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"Alright, this time from 10K up in the air.

Collection of fixes that have been queued up since the merge window
opened, hence postponed until later in the cycle. The pull request
contains:

- A bunch of fixes for the xen blk front/back driver.

- A round of fixes for the new IBM RamSan driver, fixing various
nasty issues.

- Fixes for multiple drives from Wei Yongjun, bad handling of return
values and wrong pointer math.

- A fix for loop properly killing partitions when being detached."

* tag 'for-linus-20130331' of git://git.kernel.dk/linux-block: (25 commits)
mg_disk: fix error return code in mg_probe()
rsxx: remove unused variable
rsxx: enable error return of rsxx_eeh_save_issued_dmas()
block: removes dynamic allocation on stack
Block: blk-flush: Fixed indent code style
cciss: fix invalid use of sizeof in cciss_find_cfgtables()
loop: cleanup partitions when detaching loop device
loop: fix error return code in loop_add()
mtip32xx: fix error return code in mtip_pci_probe()
xen-blkfront: remove frame list from blk_shadow
xen-blkfront: pre-allocate pages for requests
xen-blkback: don't store dev_bus_addr
xen-blkfront: switch from llist to list
xen-blkback: fix foreach_grant_safe to handle empty lists
xen-blkfront: replace kmalloc and then memcpy with kmemdup
xen-blkback: fix dispatch_rw_block_io() error path
rsxx: fix missing unlock on error return in rsxx_eeh_remap_dmas()
Adding in EEH support to the IBM FlashSystem 70/80 device driver
block: IBM RamSan 70/80 error message bug fix.
block: IBM RamSan 70/80 branding changes.
...

+6 -6
MAINTAINERS
··· 3242 3242 F: drivers/base/firmware*.c 3243 3243 F: include/linux/firmware.h 3244 3244 3245 + FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card) 3246 + M: Joshua Morris <josh.h.morris@us.ibm.com> 3247 + M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> 3248 + S: Maintained 3249 + F: drivers/block/rsxx/ 3250 + 3245 3251 FLOPPY DRIVER 3246 3252 M: Jiri Kosina <jkosina@suse.cz> 3247 3253 T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git ··· 6556 6550 S: Maintained 6557 6551 F: Documentation/blockdev/ramdisk.txt 6558 6552 F: drivers/block/brd.c 6559 - 6560 - RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card) 6561 - M: Joshua Morris <josh.h.morris@us.ibm.com> 6562 - M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> 6563 - S: Maintained 6564 - F: drivers/block/rsxx/ 6565 6553 6566 6554 RANDOM NUMBER DRIVER 6567 6555 M: Theodore Ts'o" <tytso@mit.edu>
+1 -1
block/blk-flush.c
··· 444 444 * copied from blk_rq_pos(rq). 445 445 */ 446 446 if (error_sector) 447 - *error_sector = bio->bi_sector; 447 + *error_sector = bio->bi_sector; 448 448 449 449 if (!bio_flagged(bio, BIO_UPTODATE)) 450 450 ret = -EIO;
+1
block/partition-generic.c
··· 257 257 258 258 hd_struct_put(part); 259 259 } 260 + EXPORT_SYMBOL(delete_partition); 260 261 261 262 static ssize_t whole_disk_show(struct device *dev, 262 263 struct device_attribute *attr, char *buf)
+2 -2
drivers/block/Kconfig
··· 532 532 If unsure, say N. 533 533 534 534 config BLK_DEV_RSXX 535 - tristate "RamSam PCIe Flash SSD Device Driver" 535 + tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver" 536 536 depends on PCI 537 537 help 538 538 Device driver for IBM's high speed PCIe SSD 539 - storage devices: RamSan-70 and RamSan-80. 539 + storage devices: FlashSystem-70 and FlashSystem-80. 540 540 541 541 To compile this driver as a module, choose M here: the 542 542 module will be called rsxx.
+1 -1
drivers/block/cciss.c
··· 4206 4206 if (rc) 4207 4207 return rc; 4208 4208 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 4209 - cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); 4209 + cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 4210 4210 if (!h->cfgtable) 4211 4211 return -ENOMEM; 4212 4212 rc = write_driver_ver_to_cfgtable(h->cfgtable);
+20 -2
drivers/block/loop.c
··· 1044 1044 lo->lo_state = Lo_unbound; 1045 1045 /* This is safe: open() is still holding a reference. */ 1046 1046 module_put(THIS_MODULE); 1047 - if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) 1048 - ioctl_by_bdev(bdev, BLKRRPART, 0); 1049 1047 lo->lo_flags = 0; 1050 1048 if (!part_shift) 1051 1049 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; 1052 1050 mutex_unlock(&lo->lo_ctl_mutex); 1051 + 1052 + /* 1053 + * Remove all partitions, since BLKRRPART won't remove user 1054 + * added partitions when max_part=0 1055 + */ 1056 + if (bdev) { 1057 + struct disk_part_iter piter; 1058 + struct hd_struct *part; 1059 + 1060 + mutex_lock_nested(&bdev->bd_mutex, 1); 1061 + invalidate_partition(bdev->bd_disk, 0); 1062 + disk_part_iter_init(&piter, bdev->bd_disk, 1063 + DISK_PITER_INCL_EMPTY); 1064 + while ((part = disk_part_iter_next(&piter))) 1065 + delete_partition(bdev->bd_disk, part->partno); 1066 + disk_part_iter_exit(&piter); 1067 + mutex_unlock(&bdev->bd_mutex); 1068 + } 1069 + 1053 1070 /* 1054 1071 * Need not hold lo_ctl_mutex to fput backing file. 1055 1072 * Calling fput holding lo_ctl_mutex triggers a circular ··· 1640 1623 goto out_free_dev; 1641 1624 i = err; 1642 1625 1626 + err = -ENOMEM; 1643 1627 lo->lo_queue = blk_alloc_queue(GFP_KERNEL); 1644 1628 if (!lo->lo_queue) 1645 1629 goto out_free_dev;
+3 -1
drivers/block/mg_disk.c
··· 890 890 gpio_direction_output(host->rst, 1); 891 891 892 892 /* reset out pin */ 893 - if (!(prv_data->dev_attr & MG_DEV_MASK)) 893 + if (!(prv_data->dev_attr & MG_DEV_MASK)) { 894 + err = -EINVAL; 894 895 goto probe_err_3a; 896 + } 895 897 896 898 if (prv_data->dev_attr != MG_BOOT_DEV) { 897 899 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
+3 -1
drivers/block/mtip32xx/mtip32xx.c
··· 4224 4224 dd->isr_workq = create_workqueue(dd->workq_name); 4225 4225 if (!dd->isr_workq) { 4226 4226 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); 4227 + rv = -ENOMEM; 4227 4228 goto block_initialize_err; 4228 4229 } 4229 4230 ··· 4283 4282 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); 4284 4283 4285 4284 pci_set_master(pdev); 4286 - if (pci_enable_msi(pdev)) { 4285 + rv = pci_enable_msi(pdev); 4286 + if (rv) { 4287 4287 dev_warn(&pdev->dev, 4288 4288 "Unable to enable MSI interrupt.\n"); 4289 4289 goto block_initialize_err;
+1 -1
drivers/block/rsxx/Makefile
··· 1 1 obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o 2 - rsxx-y := config.o core.o cregs.o dev.o dma.o 2 + rsxx-objs := config.o core.o cregs.o dev.o dma.o
+3 -5
drivers/block/rsxx/config.c
··· 29 29 #include "rsxx_priv.h" 30 30 #include "rsxx_cfg.h" 31 31 32 - static void initialize_config(void *config) 32 + static void initialize_config(struct rsxx_card_cfg *cfg) 33 33 { 34 - struct rsxx_card_cfg *cfg = config; 35 - 36 34 cfg->hdr.version = RSXX_CFG_VERSION; 37 35 38 36 cfg->data.block_size = RSXX_HW_BLK_SIZE; 39 37 cfg->data.stripe_size = RSXX_HW_BLK_SIZE; 40 - cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM; 38 + cfg->data.vendor_id = RSXX_VENDOR_ID_IBM; 41 39 cfg->data.cache_order = (-1); 42 40 cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED; 43 41 cfg->data.intr_coal.count = 0; ··· 179 181 } else { 180 182 dev_info(CARD_TO_DEV(card), 181 183 "Initializing card configuration.\n"); 182 - initialize_config(card); 184 + initialize_config(&card->config); 183 185 st = rsxx_save_config(card); 184 186 if (st) 185 187 return st;
+224 -13
drivers/block/rsxx/core.c
··· 30 30 #include <linux/reboot.h> 31 31 #include <linux/slab.h> 32 32 #include <linux/bitops.h> 33 + #include <linux/delay.h> 33 34 34 35 #include <linux/genhd.h> 35 36 #include <linux/idr.h> ··· 40 39 41 40 #define NO_LEGACY 0 42 41 43 - MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver"); 44 - MODULE_AUTHOR("IBM <support@ramsan.com>"); 42 + MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver"); 43 + MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM"); 45 44 MODULE_LICENSE("GPL"); 46 45 MODULE_VERSION(DRIVER_VERSION); 47 46 ··· 53 52 static DEFINE_SPINLOCK(rsxx_ida_lock); 54 53 55 54 /*----------------- Interrupt Control & Handling -------------------*/ 55 + 56 + static void rsxx_mask_interrupts(struct rsxx_cardinfo *card) 57 + { 58 + card->isr_mask = 0; 59 + card->ier_mask = 0; 60 + } 61 + 56 62 static void __enable_intr(unsigned int *mask, unsigned int intr) 57 63 { 58 64 *mask |= intr; ··· 79 71 */ 80 72 void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) 81 73 { 82 - if (unlikely(card->halt)) 74 + if (unlikely(card->halt) || 75 + unlikely(card->eeh_state)) 83 76 return; 84 77 85 78 __enable_intr(&card->ier_mask, intr); ··· 89 80 90 81 void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) 91 82 { 83 + if (unlikely(card->eeh_state)) 84 + return; 85 + 92 86 __disable_intr(&card->ier_mask, intr); 93 87 iowrite32(card->ier_mask, card->regmap + IER); 94 88 } ··· 99 87 void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, 100 88 unsigned int intr) 101 89 { 102 - if (unlikely(card->halt)) 90 + if (unlikely(card->halt) || 91 + unlikely(card->eeh_state)) 103 92 return; 104 93 105 94 __enable_intr(&card->isr_mask, intr); ··· 110 97 void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, 111 98 unsigned int intr) 112 99 { 100 + if (unlikely(card->eeh_state)) 101 + return; 102 + 113 103 __disable_intr(&card->isr_mask, intr); 114 104 __disable_intr(&card->ier_mask, intr); 115 105 iowrite32(card->ier_mask, card->regmap + IER); ··· 130 114 131 115 do { 132 116 reread_isr = 0; 117 + 118 + if (unlikely(card->eeh_state)) 119 + break; 133 120 134 121 isr = ioread32(card->regmap + ISR); 135 122 if (isr == 0xffffffff) { ··· 180 161 } 181 162 182 163 /*----------------- Card Event Handler -------------------*/ 183 - static char *rsxx_card_state_to_str(unsigned int state) 164 + static const char * const rsxx_card_state_to_str(unsigned int state) 184 165 { 185 - static char *state_strings[] = { 166 + static const char * const state_strings[] = { 186 167 "Unknown", "Shutdown", "Starting", "Formatting", 187 168 "Uninitialized", "Good", "Shutting Down", 188 169 "Fault", "Read Only Fault", "dStroying" ··· 323 304 return 0; 324 305 } 325 306 307 + static int rsxx_eeh_frozen(struct pci_dev *dev) 308 + { 309 + struct rsxx_cardinfo *card = pci_get_drvdata(dev); 310 + int i; 311 + int st; 312 + 313 + dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n"); 314 + 315 + card->eeh_state = 1; 316 + rsxx_mask_interrupts(card); 317 + 318 + /* 319 + * We need to guarantee that the write for eeh_state and masking 320 + * interrupts does not become reordered. This will prevent a possible 321 + * race condition with the EEH code. 322 + */ 323 + wmb(); 324 + 325 + pci_disable_device(dev); 326 + 327 + st = rsxx_eeh_save_issued_dmas(card); 328 + if (st) 329 + return st; 330 + 331 + rsxx_eeh_save_issued_creg(card); 332 + 333 + for (i = 0; i < card->n_targets; i++) { 334 + if (card->ctrl[i].status.buf) 335 + pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, 336 + card->ctrl[i].status.buf, 337 + card->ctrl[i].status.dma_addr); 338 + if (card->ctrl[i].cmd.buf) 339 + pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, 340 + card->ctrl[i].cmd.buf, 341 + card->ctrl[i].cmd.dma_addr); 342 + } 343 + 344 + return 0; 345 + } 346 + 347 + static void rsxx_eeh_failure(struct pci_dev *dev) 348 + { 349 + struct rsxx_cardinfo *card = pci_get_drvdata(dev); 350 + int i; 351 + 352 + dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n"); 353 + 354 + card->eeh_state = 1; 355 + 356 + for (i = 0; i < card->n_targets; i++) 357 + del_timer_sync(&card->ctrl[i].activity_timer); 358 + 359 + rsxx_eeh_cancel_dmas(card); 360 + } 361 + 362 + static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) 363 + { 364 + unsigned int status; 365 + int iter = 0; 366 + 367 + /* We need to wait for the hardware to reset */ 368 + while (iter++ < 10) { 369 + status = ioread32(card->regmap + PCI_RECONFIG); 370 + 371 + if (status & RSXX_FLUSH_BUSY) { 372 + ssleep(1); 373 + continue; 374 + } 375 + 376 + if (status & RSXX_FLUSH_TIMEOUT) 377 + dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n"); 378 + return 0; 379 + } 380 + 381 + /* Hardware failed resetting itself. */ 382 + return -1; 383 + } 384 + 385 + static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev, 386 + enum pci_channel_state error) 387 + { 388 + int st; 389 + 390 + if (dev->revision < RSXX_EEH_SUPPORT) 391 + return PCI_ERS_RESULT_NONE; 392 + 393 + if (error == pci_channel_io_perm_failure) { 394 + rsxx_eeh_failure(dev); 395 + return PCI_ERS_RESULT_DISCONNECT; 396 + } 397 + 398 + st = rsxx_eeh_frozen(dev); 399 + if (st) { 400 + dev_err(&dev->dev, "Slot reset setup failed\n"); 401 + rsxx_eeh_failure(dev); 402 + return PCI_ERS_RESULT_DISCONNECT; 403 + } 404 + 405 + return PCI_ERS_RESULT_NEED_RESET; 406 + } 407 + 408 + static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) 409 + { 410 + struct rsxx_cardinfo *card = pci_get_drvdata(dev); 411 + unsigned long flags; 412 + int i; 413 + int st; 414 + 415 + dev_warn(&dev->dev, 416 + "IBM FlashSystem PCI: recovering from slot reset.\n"); 417 + 418 + st = pci_enable_device(dev); 419 + if (st) 420 + goto failed_hw_setup; 421 + 422 + pci_set_master(dev); 423 + 424 + st = rsxx_eeh_fifo_flush_poll(card); 425 + if (st) 426 + goto failed_hw_setup; 427 + 428 + rsxx_dma_queue_reset(card); 429 + 430 + for (i = 0; i < card->n_targets; i++) { 431 + st = rsxx_hw_buffers_init(dev, &card->ctrl[i]); 432 + if (st) 433 + goto failed_hw_buffers_init; 434 + } 435 + 436 + if (card->config_valid) 437 + rsxx_dma_configure(card); 438 + 439 + /* Clears the ISR register from spurious interrupts */ 440 + st = ioread32(card->regmap + ISR); 441 + 442 + card->eeh_state = 0; 443 + 444 + st = rsxx_eeh_remap_dmas(card); 445 + if (st) 446 + goto failed_remap_dmas; 447 + 448 + spin_lock_irqsave(&card->irq_lock, flags); 449 + if (card->n_targets & RSXX_MAX_TARGETS) 450 + rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); 451 + else 452 + rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C); 453 + spin_unlock_irqrestore(&card->irq_lock, flags); 454 + 455 + rsxx_kick_creg_queue(card); 456 + 457 + for (i = 0; i < card->n_targets; i++) { 458 + spin_lock(&card->ctrl[i].queue_lock); 459 + if (list_empty(&card->ctrl[i].queue)) { 460 + spin_unlock(&card->ctrl[i].queue_lock); 461 + continue; 462 + } 463 + spin_unlock(&card->ctrl[i].queue_lock); 464 + 465 + queue_work(card->ctrl[i].issue_wq, 466 + &card->ctrl[i].issue_dma_work); 467 + } 468 + 469 + dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n"); 470 + 471 + return PCI_ERS_RESULT_RECOVERED; 472 + 473 + failed_hw_buffers_init: 474 + failed_remap_dmas: 475 + for (i = 0; i < card->n_targets; i++) { 476 + if (card->ctrl[i].status.buf) 477 + pci_free_consistent(card->dev, 478 + STATUS_BUFFER_SIZE8, 479 + card->ctrl[i].status.buf, 480 + card->ctrl[i].status.dma_addr); 481 + if (card->ctrl[i].cmd.buf) 482 + pci_free_consistent(card->dev, 483 + COMMAND_BUFFER_SIZE8, 484 + card->ctrl[i].cmd.buf, 485 + card->ctrl[i].cmd.dma_addr); 486 + } 487 + failed_hw_setup: 488 + rsxx_eeh_failure(dev); 489 + return PCI_ERS_RESULT_DISCONNECT; 490 + 491 + } 492 + 326 493 /*----------------- Driver Initialization & Setup -------------------*/ 327 494 /* Returns: 0 if the driver is compatible with the device 328 495 -1 if the driver is NOT compatible with the device */ ··· 588 383 589 384 spin_lock_init(&card->irq_lock); 590 385 card->halt = 0; 386 + card->eeh_state = 0; 591 387 592 388 spin_lock_irq(&card->irq_lock); 593 389 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); ··· 744 538 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); 745 539 spin_unlock_irqrestore(&card->irq_lock, flags); 746 540 747 - /* Prevent work_structs from re-queuing themselves. */ 748 - card->halt = 1; 749 - 750 541 cancel_work_sync(&card->event_work); 751 542 752 543 rsxx_destroy_dev(card); ··· 752 549 spin_lock_irqsave(&card->irq_lock, flags); 753 550 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 754 551 spin_unlock_irqrestore(&card->irq_lock, flags); 552 + 553 + /* Prevent work_structs from re-queuing themselves. */ 554 + card->halt = 1; 555 + 755 556 free_irq(dev->irq, card); 756 557 757 558 if (!force_legacy) ··· 799 592 card_shutdown(card); 800 593 } 801 594 595 + static const struct pci_error_handlers rsxx_err_handler = { 596 + .error_detected = rsxx_error_detected, 597 + .slot_reset = rsxx_slot_reset, 598 + }; 599 + 802 600 static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { 803 - {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)}, 804 - {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)}, 805 - {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)}, 806 - {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)}, 601 + {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)}, 602 + {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)}, 807 603 {0,}, 808 604 }; 809 605 ··· 819 609 .remove = rsxx_pci_remove, 820 610 .suspend = rsxx_pci_suspend, 821 611 .shutdown = rsxx_pci_shutdown, 612 + .err_handler = &rsxx_err_handler, 822 613 }; 823 614 824 615 static int __init rsxx_core_init(void)
+72 -40
drivers/block/rsxx/cregs.c
··· 58 58 #error Unknown endianess!!! Aborting... 59 59 #endif 60 60 61 - static void copy_to_creg_data(struct rsxx_cardinfo *card, 61 + static int copy_to_creg_data(struct rsxx_cardinfo *card, 62 62 int cnt8, 63 63 void *buf, 64 64 unsigned int stream) 65 65 { 66 66 int i = 0; 67 67 u32 *data = buf; 68 + 69 + if (unlikely(card->eeh_state)) 70 + return -EIO; 68 71 69 72 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { 70 73 /* ··· 79 76 else 80 77 iowrite32(data[i], card->regmap + CREG_DATA(i)); 81 78 } 79 + 80 + return 0; 82 81 } 83 82 84 83 85 - static void copy_from_creg_data(struct rsxx_cardinfo *card, 84 + static int copy_from_creg_data(struct rsxx_cardinfo *card, 86 85 int cnt8, 87 86 void *buf, 88 87 unsigned int stream) 89 88 { 90 89 int i = 0; 91 90 u32 *data = buf; 91 + 92 + if (unlikely(card->eeh_state)) 93 + return -EIO; 92 94 93 95 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { 94 96 /* ··· 105 97 else 106 98 data[i] = ioread32(card->regmap + CREG_DATA(i)); 107 99 } 108 - } 109 100 110 - static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card) 111 - { 112 - struct creg_cmd *cmd; 113 - 114 - /* 115 - * Spin lock is needed because this can be called in atomic/interrupt 116 - * context. 117 - */ 118 - spin_lock_bh(&card->creg_ctrl.lock); 119 - cmd = card->creg_ctrl.active_cmd; 120 - card->creg_ctrl.active_cmd = NULL; 121 - spin_unlock_bh(&card->creg_ctrl.lock); 122 - 123 - return cmd; 101 + return 0; 124 102 } 125 103 126 104 static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd) 127 105 { 106 + int st; 107 + 108 + if (unlikely(card->eeh_state)) 109 + return; 110 + 128 111 iowrite32(cmd->addr, card->regmap + CREG_ADD); 129 112 iowrite32(cmd->cnt8, card->regmap + CREG_CNT); 130 113 131 114 if (cmd->op == CREG_OP_WRITE) { 132 - if (cmd->buf) 133 - copy_to_creg_data(card, cmd->cnt8, 134 - cmd->buf, cmd->stream); 115 + if (cmd->buf) { 116 + st = copy_to_creg_data(card, cmd->cnt8, 117 + cmd->buf, cmd->stream); 118 + if (st) 119 + return; 120 + } 135 121 } 136 122 137 - /* 138 - * Data copy must complete before initiating the command. This is 139 - * needed for weakly ordered processors (i.e. PowerPC), so that all 140 - * neccessary registers are written before we kick the hardware. 141 - */ 142 - wmb(); 123 + if (unlikely(card->eeh_state)) 124 + return; 143 125 144 126 /* Setting the valid bit will kick off the command. */ 145 127 iowrite32(cmd->op, card->regmap + CREG_CMD); ··· 194 196 cmd->cb_private = cb_private; 195 197 cmd->status = 0; 196 198 197 - spin_lock(&card->creg_ctrl.lock); 199 + spin_lock_bh(&card->creg_ctrl.lock); 198 200 list_add_tail(&cmd->list, &card->creg_ctrl.queue); 199 201 card->creg_ctrl.q_depth++; 200 202 creg_kick_queue(card); 201 - spin_unlock(&card->creg_ctrl.lock); 203 + spin_unlock_bh(&card->creg_ctrl.lock); 202 204 203 205 return 0; 204 206 } ··· 208 210 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; 209 211 struct creg_cmd *cmd; 210 212 211 - cmd = pop_active_cmd(card); 213 + spin_lock(&card->creg_ctrl.lock); 214 + cmd = card->creg_ctrl.active_cmd; 215 + card->creg_ctrl.active_cmd = NULL; 216 + spin_unlock(&card->creg_ctrl.lock); 217 + 212 218 if (cmd == NULL) { 213 219 card->creg_ctrl.creg_stats.creg_timeout++; 214 220 dev_warn(CARD_TO_DEV(card), ··· 249 247 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0) 250 248 card->creg_ctrl.creg_stats.failed_cancel_timer++; 251 249 252 - cmd = pop_active_cmd(card); 250 + spin_lock_bh(&card->creg_ctrl.lock); 251 + cmd = card->creg_ctrl.active_cmd; 252 + card->creg_ctrl.active_cmd = NULL; 253 + spin_unlock_bh(&card->creg_ctrl.lock); 254 + 253 255 if (cmd == NULL) { 254 256 dev_err(CARD_TO_DEV(card), 255 257 "Spurious creg interrupt!\n"); ··· 293 287 goto creg_done; 294 288 } 295 289 296 - copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); 290 + st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); 297 291 } 298 292 299 293 creg_done: ··· 302 296 303 297 kmem_cache_free(creg_cmd_pool, cmd); 304 298 305 - spin_lock(&card->creg_ctrl.lock); 299 + spin_lock_bh(&card->creg_ctrl.lock); 306 300 card->creg_ctrl.active = 0; 307 301 creg_kick_queue(card); 308 - spin_unlock(&card->creg_ctrl.lock); 302 + spin_unlock_bh(&card->creg_ctrl.lock); 309 303 } 310 304 311 305 static void creg_reset(struct rsxx_cardinfo *card) ··· 330 324 "Resetting creg interface for recovery\n"); 331 325 332 326 /* Cancel outstanding commands */ 333 - spin_lock(&card->creg_ctrl.lock); 327 + spin_lock_bh(&card->creg_ctrl.lock); 334 328 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { 335 329 list_del(&cmd->list); 336 330 card->creg_ctrl.q_depth--; ··· 351 345 352 346 card->creg_ctrl.active = 0; 353 347 } 354 - spin_unlock(&card->creg_ctrl.lock); 348 + spin_unlock_bh(&card->creg_ctrl.lock); 355 349 356 350 card->creg_ctrl.reset = 0; 357 351 spin_lock_irqsave(&card->irq_lock, flags); ··· 405 399 return st; 406 400 407 401 /* 408 - * This timeout is neccessary for unresponsive hardware. The additional 402 + * This timeout is necessary for unresponsive hardware. The additional 409 403 * 20 seconds to used to guarantee that each cregs requests has time to 410 404 * complete. 411 405 */ 412 - timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC * 413 - card->creg_ctrl.q_depth) + 20000); 406 + timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC * 407 + card->creg_ctrl.q_depth + 20000); 414 408 415 409 /* 416 410 * The creg interface is guaranteed to complete. It has a timeout ··· 696 690 return 0; 697 691 } 698 692 693 + void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card) 694 + { 695 + struct creg_cmd *cmd = NULL; 696 + 697 + cmd = card->creg_ctrl.active_cmd; 698 + card->creg_ctrl.active_cmd = NULL; 699 + 700 + if (cmd) { 701 + del_timer_sync(&card->creg_ctrl.cmd_timer); 702 + 703 + spin_lock_bh(&card->creg_ctrl.lock); 704 + list_add(&cmd->list, &card->creg_ctrl.queue); 705 + card->creg_ctrl.q_depth++; 706 + card->creg_ctrl.active = 0; 707 + spin_unlock_bh(&card->creg_ctrl.lock); 708 + } 709 + } 710 + 711 + void rsxx_kick_creg_queue(struct rsxx_cardinfo *card) 712 + { 713 + spin_lock_bh(&card->creg_ctrl.lock); 714 + if (!list_empty(&card->creg_ctrl.queue)) 715 + creg_kick_queue(card); 716 + spin_unlock_bh(&card->creg_ctrl.lock); 717 + } 718 + 699 719 /*------------ Initialization & Setup --------------*/ 700 720 int rsxx_creg_setup(struct rsxx_cardinfo *card) 701 721 { ··· 744 712 int cnt = 0; 745 713 746 714 /* Cancel outstanding commands */ 747 - spin_lock(&card->creg_ctrl.lock); 715 + spin_lock_bh(&card->creg_ctrl.lock); 748 716 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { 749 717 list_del(&cmd->list); 750 718 if (cmd->cb) ··· 769 737 "Canceled active creg command\n"); 770 738 kmem_cache_free(creg_cmd_pool, cmd); 771 739 } 772 - spin_unlock(&card->creg_ctrl.lock); 740 + spin_unlock_bh(&card->creg_ctrl.lock); 773 741 774 742 cancel_work_sync(&card->creg_ctrl.done_work); 775 743 }
+168 -71
drivers/block/rsxx/dma.c
··· 28 28 struct rsxx_dma { 29 29 struct list_head list; 30 30 u8 cmd; 31 - unsigned int laddr; /* Logical address on the ramsan */ 31 + unsigned int laddr; /* Logical address */ 32 32 struct { 33 33 u32 off; 34 34 u32 cnt; ··· 81 81 HW_STATUS_FAULT = 0x08, 82 82 }; 83 83 84 - #define STATUS_BUFFER_SIZE8 4096 85 - #define COMMAND_BUFFER_SIZE8 4096 86 - 87 84 static struct kmem_cache *rsxx_dma_pool; 88 85 89 86 struct dma_tracker { ··· 119 122 return tgt; 120 123 } 121 124 122 - static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) 125 + void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) 123 126 { 124 127 /* Reset all DMA Command/Status Queues */ 125 128 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); ··· 207 210 u32 q_depth = 0; 208 211 u32 intr_coal; 209 212 210 - if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE) 213 + if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || 214 + unlikely(card->eeh_state)) 211 215 return; 212 216 213 217 for (i = 0; i < card->n_targets; i++) ··· 221 223 } 222 224 223 225 /*----------------- RSXX DMA Handling -------------------*/ 224 - static void rsxx_complete_dma(struct rsxx_cardinfo *card, 226 + static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, 225 227 struct rsxx_dma *dma, 226 228 unsigned int status) 227 229 { 228 230 if (status & DMA_SW_ERR) 229 - printk_ratelimited(KERN_ERR 230 - "SW Error in DMA(cmd x%02x, laddr x%08x)\n", 231 - dma->cmd, dma->laddr); 231 + ctrl->stats.dma_sw_err++; 232 232 if (status & DMA_HW_FAULT) 233 - printk_ratelimited(KERN_ERR 234 - "HW Fault in DMA(cmd x%02x, laddr x%08x)\n", 235 - dma->cmd, dma->laddr); 233 + ctrl->stats.dma_hw_fault++; 236 234 if (status & DMA_CANCELLED) 237 - printk_ratelimited(KERN_ERR 238 - "DMA Cancelled(cmd x%02x, laddr x%08x)\n", 239 - dma->cmd, dma->laddr); 235 + ctrl->stats.dma_cancelled++; 240 236 241 237 if (dma->dma_addr) 242 - pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma), 238 + pci_unmap_page(ctrl->card->dev, dma->dma_addr, 239 + get_dma_size(dma), 243 240 dma->cmd == HW_CMD_BLK_WRITE ? 244 241 PCI_DMA_TODEVICE : 245 242 PCI_DMA_FROMDEVICE); 246 243 247 244 if (dma->cb) 248 - dma->cb(card, dma->cb_data, status ? 1 : 0); 245 + dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); 249 246 250 247 kmem_cache_free(rsxx_dma_pool, dma); 251 248 } ··· 323 330 if (requeue_cmd) 324 331 rsxx_requeue_dma(ctrl, dma); 325 332 else 326 - rsxx_complete_dma(ctrl->card, dma, status); 333 + rsxx_complete_dma(ctrl, dma, status); 327 334 } 328 335 329 336 static void dma_engine_stalled(unsigned long data) 330 337 { 331 338 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; 332 339 333 - if (atomic_read(&ctrl->stats.hw_q_depth) == 0) 340 + if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || 341 + unlikely(ctrl->card->eeh_state)) 334 342 return; 335 343 336 344 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { ··· 363 369 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); 364 370 hw_cmd_buf = ctrl->cmd.buf; 365 371 366 - if (unlikely(ctrl->card->halt)) 372 + if (unlikely(ctrl->card->halt) || 373 + unlikely(ctrl->card->eeh_state)) 367 374 return; 368 375 369 376 while (1) { ··· 392 397 */ 393 398 if (unlikely(ctrl->card->dma_fault)) { 394 399 push_tracker(ctrl->trackers, tag); 395 - rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED); 400 + rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); 396 401 continue; 397 402 } 398 403 ··· 427 432 428 433 /* Let HW know we've queued commands. */ 429 434 if (cmds_pending) { 430 - /* 431 - * We must guarantee that the CPU writes to 'ctrl->cmd.buf' 432 - * (which is in PCI-consistent system-memory) from the loop 433 - * above make it into the coherency domain before the 434 - * following PIO "trigger" updating the cmd.idx. A WMB is 435 - * sufficient. We need not explicitly CPU cache-flush since 436 - * the memory is a PCI-consistent (ie; coherent) mapping. 437 - */ 438 - wmb(); 439 - 440 435 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); 441 436 mod_timer(&ctrl->activity_timer, 442 437 jiffies + DMA_ACTIVITY_TIMEOUT); 438 + 439 + if (unlikely(ctrl->card->eeh_state)) { 440 + del_timer_sync(&ctrl->activity_timer); 441 + return; 442 + } 443 + 443 444 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); 444 445 } 445 446 } ··· 454 463 hw_st_buf = ctrl->status.buf; 455 464 456 465 if (unlikely(ctrl->card->halt) || 457 - unlikely(ctrl->card->dma_fault)) 466 + unlikely(ctrl->card->dma_fault) || 467 + unlikely(ctrl->card->eeh_state)) 458 468 return; 459 469 460 470 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); ··· 500 508 if (status) 501 509 rsxx_handle_dma_error(ctrl, dma, status); 502 510 else 503 - rsxx_complete_dma(ctrl->card, dma, 0); 511 + rsxx_complete_dma(ctrl, dma, 0); 504 512 505 513 push_tracker(ctrl->trackers, tag); 506 514 ··· 719 727 720 728 721 729 /*----------------- DMA Engine Initialization & Setup -------------------*/ 730 + int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) 731 + { 732 + ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, 733 + &ctrl->status.dma_addr); 734 + ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, 735 + &ctrl->cmd.dma_addr); 736 + if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) 737 + return -ENOMEM; 738 + 739 + memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); 740 + iowrite32(lower_32_bits(ctrl->status.dma_addr), 741 + ctrl->regmap + SB_ADD_LO); 742 + iowrite32(upper_32_bits(ctrl->status.dma_addr), 743 + ctrl->regmap + SB_ADD_HI); 744 + 745 + memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); 746 + iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); 747 + iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); 748 + 749 + ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); 750 + if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { 751 + dev_crit(&dev->dev, "Failed reading status cnt x%x\n", 752 + ctrl->status.idx); 753 + return -EINVAL; 754 + } 755 + iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); 756 + iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); 757 + 758 + ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); 759 + if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { 760 + dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", 761 + ctrl->status.idx); 762 + return -EINVAL; 763 + } 764 + iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); 765 + iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); 766 + 767 + return 0; 768 + } 769 + 722 770 static int rsxx_dma_ctrl_init(struct pci_dev *dev, 723 771 struct rsxx_dma_ctrl *ctrl) 724 772 { 725 773 int i; 774 + int st; 726 775 727 776 memset(&ctrl->stats, 0, sizeof(ctrl->stats)); 728 - 729 - ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, 730 - &ctrl->status.dma_addr); 731 - ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, 732 - &ctrl->cmd.dma_addr); 733 - if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) 734 - return -ENOMEM; 735 777 736 778 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); 737 779 if (!ctrl->trackers) ··· 796 770 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); 797 771 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); 798 772 799 - memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); 800 - iowrite32(lower_32_bits(ctrl->status.dma_addr), 801 - ctrl->regmap + SB_ADD_LO); 802 - iowrite32(upper_32_bits(ctrl->status.dma_addr), 803 - ctrl->regmap + SB_ADD_HI); 804 - 805 - memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); 806 - iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); 807 - iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); 808 - 809 - ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); 810 - if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { 811 - dev_crit(&dev->dev, "Failed reading status cnt x%x\n", 812 - ctrl->status.idx); 813 - return -EINVAL; 814 - } 815 - iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); 816 - iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); 817 - 818 - ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); 819 - if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { 820 - dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", 821 - ctrl->status.idx); 822 - return -EINVAL; 823 - } 824 - iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); 825 - iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); 826 - 827 - wmb(); 773 + st = rsxx_hw_buffers_init(dev, ctrl); 774 + if (st) 775 + return st; 828 776 829 777 return 0; 830 778 } ··· 834 834 return 0; 835 835 } 836 836 837 - static int rsxx_dma_configure(struct rsxx_cardinfo *card) 837 + int rsxx_dma_configure(struct rsxx_cardinfo *card) 838 838 { 839 839 u32 intr_coal; 840 840 ··· 980 980 } 981 981 } 982 982 983 + int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) 984 + { 985 + int i; 986 + int j; 987 + int cnt; 988 + struct rsxx_dma *dma; 989 + struct list_head *issued_dmas; 990 + 991 + issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets, 992 + GFP_KERNEL); 993 + if (!issued_dmas) 994 + return -ENOMEM; 995 + 996 + for (i = 0; i < card->n_targets; i++) { 997 + INIT_LIST_HEAD(&issued_dmas[i]); 998 + cnt = 0; 999 + for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { 1000 + dma = get_tracker_dma(card->ctrl[i].trackers, j); 1001 + if (dma == NULL) 1002 + continue; 1003 + 1004 + if (dma->cmd == HW_CMD_BLK_WRITE) 1005 + card->ctrl[i].stats.writes_issued--; 1006 + else if (dma->cmd == HW_CMD_BLK_DISCARD) 1007 + card->ctrl[i].stats.discards_issued--; 1008 + else 1009 + card->ctrl[i].stats.reads_issued--; 1010 + 1011 + list_add_tail(&dma->list, &issued_dmas[i]); 1012 + push_tracker(card->ctrl[i].trackers, j); 1013 + cnt++; 1014 + } 1015 + 1016 + spin_lock(&card->ctrl[i].queue_lock); 1017 + list_splice(&issued_dmas[i], &card->ctrl[i].queue); 1018 + 1019 + atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); 1020 + card->ctrl[i].stats.sw_q_depth += cnt; 1021 + card->ctrl[i].e_cnt = 0; 1022 + 1023 + list_for_each_entry(dma, &card->ctrl[i].queue, list) { 1024 + if (dma->dma_addr) 1025 + pci_unmap_page(card->dev, dma->dma_addr, 1026 + get_dma_size(dma), 1027 + dma->cmd == HW_CMD_BLK_WRITE ? 1028 + PCI_DMA_TODEVICE : 1029 + PCI_DMA_FROMDEVICE); 1030 + } 1031 + spin_unlock(&card->ctrl[i].queue_lock); 1032 + } 1033 + 1034 + kfree(issued_dmas); 1035 + 1036 + return 0; 1037 + } 1038 + 1039 + void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card) 1040 + { 1041 + struct rsxx_dma *dma; 1042 + struct rsxx_dma *tmp; 1043 + int i; 1044 + 1045 + for (i = 0; i < card->n_targets; i++) { 1046 + spin_lock(&card->ctrl[i].queue_lock); 1047 + list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { 1048 + list_del(&dma->list); 1049 + 1050 + rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); 1051 + } 1052 + spin_unlock(&card->ctrl[i].queue_lock); 1053 + } 1054 + } 1055 + 1056 + int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) 1057 + { 1058 + struct rsxx_dma *dma; 1059 + int i; 1060 + 1061 + for (i = 0; i < card->n_targets; i++) { 1062 + spin_lock(&card->ctrl[i].queue_lock); 1063 + list_for_each_entry(dma, &card->ctrl[i].queue, list) { 1064 + dma->dma_addr = pci_map_page(card->dev, dma->page, 1065 + dma->pg_off, get_dma_size(dma), 1066 + dma->cmd == HW_CMD_BLK_WRITE ? 1067 + PCI_DMA_TODEVICE : 1068 + PCI_DMA_FROMDEVICE); 1069 + if (!dma->dma_addr) { 1070 + spin_unlock(&card->ctrl[i].queue_lock); 1071 + kmem_cache_free(rsxx_dma_pool, dma); 1072 + return -ENOMEM; 1073 + } 1074 + } 1075 + spin_unlock(&card->ctrl[i].queue_lock); 1076 + } 1077 + 1078 + return 0; 1079 + } 983 1080 984 1081 int rsxx_dma_init(void) 985 1082 {
+4 -2
drivers/block/rsxx/rsxx.h
··· 27 27 28 28 /*----------------- IOCTL Definitions -------------------*/ 29 29 30 + #define RSXX_MAX_DATA 8 31 + 30 32 struct rsxx_reg_access { 31 33 __u32 addr; 32 34 __u32 cnt; 33 35 __u32 stat; 34 36 __u32 stream; 35 - __u32 data[8]; 37 + __u32 data[RSXX_MAX_DATA]; 36 38 }; 37 39 38 - #define RSXX_MAX_REG_CNT (8 * (sizeof(__u32))) 40 + #define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32))) 39 41 40 42 #define RSXX_IOC_MAGIC 'r' 41 43
+1 -1
drivers/block/rsxx/rsxx_cfg.h
··· 58 58 }; 59 59 60 60 /* Vendor ID Values */ 61 - #define RSXX_VENDOR_ID_TMS_IBM 0 61 + #define RSXX_VENDOR_ID_IBM 0 62 62 #define RSXX_VENDOR_ID_DSI 1 63 63 #define RSXX_VENDOR_COUNT 2 64 64
+27 -7
drivers/block/rsxx/rsxx_priv.h
··· 45 45 46 46 struct proc_cmd; 47 47 48 - #define PCI_VENDOR_ID_TMS_IBM 0x15B6 49 - #define PCI_DEVICE_ID_RS70_FLASH 0x0019 50 - #define PCI_DEVICE_ID_RS70D_FLASH 0x001A 51 - #define PCI_DEVICE_ID_RS80_FLASH 0x001C 52 - #define PCI_DEVICE_ID_RS81_FLASH 0x001E 48 + #define PCI_DEVICE_ID_FS70_FLASH 0x04A9 49 + #define PCI_DEVICE_ID_FS80_FLASH 0x04AA 53 50 54 51 #define RS70_PCI_REV_SUPPORTED 4 55 52 56 53 #define DRIVER_NAME "rsxx" 57 - #define DRIVER_VERSION "3.7" 54 + #define DRIVER_VERSION "4.0" 58 55 59 56 /* Block size is 4096 */ 60 57 #define RSXX_HW_BLK_SHIFT 12 ··· 63 66 64 67 #define RSXX_MAX_OUTSTANDING_CMDS 255 65 68 #define RSXX_CS_IDX_MASK 0xff 69 + 70 + #define STATUS_BUFFER_SIZE8 4096 71 + #define COMMAND_BUFFER_SIZE8 4096 66 72 67 73 #define RSXX_MAX_TARGETS 8 68 74 ··· 91 91 u32 discards_failed; 92 92 u32 done_rescheduled; 93 93 u32 issue_rescheduled; 94 + u32 dma_sw_err; 95 + u32 dma_hw_fault; 96 + u32 dma_cancelled; 94 97 u32 sw_q_depth; /* Number of DMAs on the SW queue. */ 95 98 atomic_t hw_q_depth; /* Number of DMAs queued to HW. */ 96 99 }; ··· 119 116 struct rsxx_cardinfo { 120 117 struct pci_dev *dev; 121 118 unsigned int halt; 119 + unsigned int eeh_state; 122 120 123 121 void __iomem *regmap; 124 122 spinlock_t irq_lock; ··· 228 224 PERF_RD512_HI = 0xac, 229 225 PERF_WR512_LO = 0xb0, 230 226 PERF_WR512_HI = 0xb4, 227 + PCI_RECONFIG = 0xb8, 231 228 }; 232 229 233 230 enum rsxx_intr { ··· 242 237 CR_INTR_DMA5 = 0x00000080, 243 238 CR_INTR_DMA6 = 0x00000100, 244 239 CR_INTR_DMA7 = 0x00000200, 240 + CR_INTR_ALL_C = 0x0000003f, 241 + CR_INTR_ALL_G = 0x000003ff, 245 242 CR_INTR_DMA_ALL = 0x000003f5, 246 243 CR_INTR_ALL = 0xffffffff, 247 244 }; ··· 260 253 DMA_QUEUE_RESET = 0x00000001, 261 254 }; 262 255 256 + enum rsxx_hw_fifo_flush { 257 + RSXX_FLUSH_BUSY = 0x00000002, 258 + RSXX_FLUSH_TIMEOUT = 0x00000004, 259 + }; 260 + 263 261 enum rsxx_pci_revision { 264 262 RSXX_DISCARD_SUPPORT = 2, 263 + RSXX_EEH_SUPPORT = 3, 265 264 }; 266 265 267 266 enum rsxx_creg_cmd { ··· 373 360 void rsxx_dma_destroy(struct rsxx_cardinfo *card); 374 361 int rsxx_dma_init(void); 375 362 void rsxx_dma_cleanup(void); 363 + void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); 364 + int rsxx_dma_configure(struct rsxx_cardinfo *card); 376 365 int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, 377 366 struct bio *bio, 378 367 atomic_t *n_dmas, 379 368 rsxx_dma_cb cb, 380 369 void *cb_data); 370 + int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); 371 + int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); 372 + void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card); 373 + int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); 381 374 382 375 /***** cregs.c *****/ 383 376 int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr, ··· 408 389 void rsxx_creg_destroy(struct rsxx_cardinfo *card); 409 390 int rsxx_creg_init(void); 410 391 void rsxx_creg_cleanup(void); 411 - 412 392 int rsxx_reg_access(struct rsxx_cardinfo *card, 413 393 struct rsxx_reg_access __user *ucmd, 414 394 int read); 395 + void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card); 396 + void rsxx_kick_creg_queue(struct rsxx_cardinfo *card); 415 397 416 398 417 399
+39 -29
drivers/block/xen-blkback/blkback.c
··· 164 164 165 165 #define foreach_grant_safe(pos, n, rbtree, node) \ 166 166 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ 167 - (n) = rb_next(&(pos)->node); \ 167 + (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ 168 168 &(pos)->node != NULL; \ 169 169 (pos) = container_of(n, typeof(*(pos)), node), \ 170 170 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) ··· 381 381 382 382 static void print_stats(struct xen_blkif *blkif) 383 383 { 384 - pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d" 385 - " | ds %4d\n", 384 + pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" 385 + " | ds %4llu\n", 386 386 current->comm, blkif->st_oo_req, 387 387 blkif->st_rd_req, blkif->st_wr_req, 388 388 blkif->st_f_req, blkif->st_ds_req); ··· 442 442 } 443 443 444 444 struct seg_buf { 445 - unsigned long buf; 445 + unsigned int offset; 446 446 unsigned int nsec; 447 447 }; 448 448 /* ··· 621 621 * If this is a new persistent grant 622 622 * save the handler 623 623 */ 624 - persistent_gnts[i]->handle = map[j].handle; 625 - persistent_gnts[i]->dev_bus_addr = 626 - map[j++].dev_bus_addr; 624 + persistent_gnts[i]->handle = map[j++].handle; 627 625 } 628 626 pending_handle(pending_req, i) = 629 627 persistent_gnts[i]->handle; 630 628 631 629 if (ret) 632 630 continue; 633 - 634 - seg[i].buf = persistent_gnts[i]->dev_bus_addr | 635 - (req->u.rw.seg[i].first_sect << 9); 636 631 } else { 637 - pending_handle(pending_req, i) = map[j].handle; 632 + pending_handle(pending_req, i) = map[j++].handle; 638 633 bitmap_set(pending_req->unmap_seg, i, 1); 639 634 640 - if (ret) { 641 - j++; 635 + if (ret) 642 636 continue; 643 - } 644 - 645 - seg[i].buf = map[j++].dev_bus_addr | 646 - (req->u.rw.seg[i].first_sect << 9); 647 637 } 638 + seg[i].offset = (req->u.rw.seg[i].first_sect << 9); 648 639 } 649 640 return ret; 650 641 } ··· 668 677 make_response(blkif, req->u.discard.id, req->operation, status); 669 678 xen_blkif_put(blkif); 670 679 return err; 680 + } 681 + 682 + static int dispatch_other_io(struct xen_blkif *blkif, 683 + struct blkif_request *req, 684 + struct pending_req *pending_req) 685 + { 686 + free_req(pending_req); 687 + make_response(blkif, req->u.other.id, req->operation, 688 + BLKIF_RSP_EOPNOTSUPP); 689 + return -EIO; 671 690 } 672 691 673 692 static void xen_blk_drain_io(struct xen_blkif *blkif) ··· 801 800 802 801 /* Apply all sanity checks to /private copy/ of request. */ 803 802 barrier(); 804 - if (unlikely(req.operation == BLKIF_OP_DISCARD)) { 803 + 804 + switch (req.operation) { 805 + case BLKIF_OP_READ: 806 + case BLKIF_OP_WRITE: 807 + case BLKIF_OP_WRITE_BARRIER: 808 + case BLKIF_OP_FLUSH_DISKCACHE: 809 + if (dispatch_rw_block_io(blkif, &req, pending_req)) 810 + goto done; 811 + break; 812 + case BLKIF_OP_DISCARD: 805 813 free_req(pending_req); 806 814 if (dispatch_discard_io(blkif, &req)) 807 - break; 808 - } else if (dispatch_rw_block_io(blkif, &req, pending_req)) 815 + goto done; 809 816 break; 817 + default: 818 + if (dispatch_other_io(blkif, &req, pending_req)) 819 + goto done; 820 + break; 821 + } 810 822 811 823 /* Yield point for this unbounded loop. */ 812 824 cond_resched(); 813 825 } 814 - 826 + done: 815 827 return more_to_do; 816 828 } 817 829 ··· 918 904 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", 919 905 operation == READ ? "read" : "write", 920 906 preq.sector_number, 921 - preq.sector_number + preq.nr_sects, preq.dev); 907 + preq.sector_number + preq.nr_sects, 908 + blkif->vbd.pdevice); 922 909 goto fail_response; 923 910 } 924 911 ··· 962 947 (bio_add_page(bio, 963 948 pages[i], 964 949 seg[i].nsec << 9, 965 - seg[i].buf & ~PAGE_MASK) == 0)) { 950 + seg[i].offset) == 0)) { 966 951 967 952 bio = bio_alloc(GFP_KERNEL, nseg-i); 968 953 if (unlikely(bio == NULL)) ··· 992 977 bio->bi_end_io = end_block_io_op; 993 978 } 994 979 995 - /* 996 - * We set it one so that the last submit_bio does not have to call 997 - * atomic_inc. 998 - */ 999 980 atomic_set(&pending_req->pendcnt, nbio); 1000 - 1001 - /* Get a reference count for the disk queue and start sending I/O */ 1002 981 blk_start_plug(&plug); 1003 982 1004 983 for (i = 0; i < nbio; i++) ··· 1020 1011 fail_put_bio: 1021 1012 for (i = 0; i < nbio; i++) 1022 1013 bio_put(biolist[i]); 1014 + atomic_set(&pending_req->pendcnt, 1); 1023 1015 __end_block_io_op(pending_req, -EINVAL); 1024 1016 msleep(1); /* back off a bit */ 1025 1017 return -EIO;
+32 -8
drivers/block/xen-blkback/common.h
··· 77 77 uint64_t nr_sectors; 78 78 } __attribute__((__packed__)); 79 79 80 + struct blkif_x86_32_request_other { 81 + uint8_t _pad1; 82 + blkif_vdev_t _pad2; 83 + uint64_t id; /* private guest value, echoed in resp */ 84 + } __attribute__((__packed__)); 85 + 80 86 struct blkif_x86_32_request { 81 87 uint8_t operation; /* BLKIF_OP_??? */ 82 88 union { 83 89 struct blkif_x86_32_request_rw rw; 84 90 struct blkif_x86_32_request_discard discard; 91 + struct blkif_x86_32_request_other other; 85 92 } u; 86 93 } __attribute__((__packed__)); 87 94 ··· 120 113 uint64_t nr_sectors; 121 114 } __attribute__((__packed__)); 122 115 116 + struct blkif_x86_64_request_other { 117 + uint8_t _pad1; 118 + blkif_vdev_t _pad2; 119 + uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */ 120 + uint64_t id; /* private guest value, echoed in resp */ 121 + } __attribute__((__packed__)); 122 + 123 123 struct blkif_x86_64_request { 124 124 uint8_t operation; /* BLKIF_OP_??? */ 125 125 union { 126 126 struct blkif_x86_64_request_rw rw; 127 127 struct blkif_x86_64_request_discard discard; 128 + struct blkif_x86_64_request_other other; 128 129 } u; 129 130 } __attribute__((__packed__)); 130 131 ··· 187 172 struct page *page; 188 173 grant_ref_t gnt; 189 174 grant_handle_t handle; 190 - uint64_t dev_bus_addr; 191 175 struct rb_node node; 192 176 }; 193 177 ··· 222 208 223 209 /* statistics */ 224 210 unsigned long st_print; 225 - int st_rd_req; 226 - int st_wr_req; 227 - int st_oo_req; 228 - int st_f_req; 229 - int st_ds_req; 230 - int st_rd_sect; 231 - int st_wr_sect; 211 + unsigned long long st_rd_req; 212 + unsigned long long st_wr_req; 213 + unsigned long long st_oo_req; 214 + unsigned long long st_f_req; 215 + unsigned long long st_ds_req; 216 + unsigned long long st_rd_sect; 217 + unsigned long long st_wr_sect; 232 218 233 219 wait_queue_head_t waiting_to_free; 234 220 }; ··· 292 278 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 293 279 break; 294 280 default: 281 + /* 282 + * Don't know how to translate this op. Only get the 283 + * ID so failure can be reported to the frontend. 284 + */ 285 + dst->u.other.id = src->u.other.id; 295 286 break; 296 287 } 297 288 } ··· 328 309 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 329 310 break; 330 311 default: 312 + /* 313 + * Don't know how to translate this op. Only get the 314 + * ID so failure can be reported to the frontend. 315 + */ 316 + dst->u.other.id = src->u.other.id; 331 317 break; 332 318 } 333 319 }
+7 -7
drivers/block/xen-blkback/xenbus.c
··· 230 230 } \ 231 231 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) 232 232 233 - VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req); 234 - VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req); 235 - VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req); 236 - VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req); 237 - VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req); 238 - VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect); 239 - VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect); 233 + VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req); 234 + VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req); 235 + VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req); 236 + VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req); 237 + VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req); 238 + VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect); 239 + VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect); 240 240 241 241 static struct attribute *xen_vbdstat_attrs[] = { 242 242 &dev_attr_oo_req.attr,
+91 -63
drivers/block/xen-blkfront.c
··· 44 44 #include <linux/mutex.h> 45 45 #include <linux/scatterlist.h> 46 46 #include <linux/bitmap.h> 47 - #include <linux/llist.h> 47 + #include <linux/list.h> 48 48 49 49 #include <xen/xen.h> 50 50 #include <xen/xenbus.h> ··· 68 68 struct grant { 69 69 grant_ref_t gref; 70 70 unsigned long pfn; 71 - struct llist_node node; 71 + struct list_head node; 72 72 }; 73 73 74 74 struct blk_shadow { 75 75 struct blkif_request req; 76 76 struct request *request; 77 - unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 78 77 struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 79 78 }; 80 79 ··· 104 105 struct work_struct work; 105 106 struct gnttab_free_callback callback; 106 107 struct blk_shadow shadow[BLK_RING_SIZE]; 107 - struct llist_head persistent_gnts; 108 + struct list_head persistent_gnts; 108 109 unsigned int persistent_gnts_c; 109 110 unsigned long shadow_free; 110 111 unsigned int feature_flush; ··· 162 163 info->shadow[id].request = NULL; 163 164 info->shadow_free = id; 164 165 return 0; 166 + } 167 + 168 + static int fill_grant_buffer(struct blkfront_info *info, int num) 169 + { 170 + struct page *granted_page; 171 + struct grant *gnt_list_entry, *n; 172 + int i = 0; 173 + 174 + while(i < num) { 175 + gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); 176 + if (!gnt_list_entry) 177 + goto out_of_memory; 178 + 179 + granted_page = alloc_page(GFP_NOIO); 180 + if (!granted_page) { 181 + kfree(gnt_list_entry); 182 + goto out_of_memory; 183 + } 184 + 185 + gnt_list_entry->pfn = page_to_pfn(granted_page); 186 + gnt_list_entry->gref = GRANT_INVALID_REF; 187 + list_add(&gnt_list_entry->node, &info->persistent_gnts); 188 + i++; 189 + } 190 + 191 + return 0; 192 + 193 + out_of_memory: 194 + list_for_each_entry_safe(gnt_list_entry, n, 195 + &info->persistent_gnts, node) { 196 + list_del(&gnt_list_entry->node); 197 + __free_page(pfn_to_page(gnt_list_entry->pfn)); 198 + kfree(gnt_list_entry); 199 + i--; 200 + } 201 + BUG_ON(i != 0); 202 + return -ENOMEM; 203 + } 204 + 205 + static struct grant *get_grant(grant_ref_t *gref_head, 206 + struct blkfront_info *info) 207 + { 208 + struct grant *gnt_list_entry; 209 + unsigned long buffer_mfn; 210 + 211 + BUG_ON(list_empty(&info->persistent_gnts)); 212 + gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant, 213 + node); 214 + list_del(&gnt_list_entry->node); 215 + 216 + if (gnt_list_entry->gref != GRANT_INVALID_REF) { 217 + info->persistent_gnts_c--; 218 + return gnt_list_entry; 219 + } 220 + 221 + /* Assign a gref to this page */ 222 + gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); 223 + BUG_ON(gnt_list_entry->gref == -ENOSPC); 224 + buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); 225 + gnttab_grant_foreign_access_ref(gnt_list_entry->gref, 226 + info->xbdev->otherend_id, 227 + buffer_mfn, 0); 228 + return gnt_list_entry; 165 229 } 166 230 167 231 static const char *op_name(int op) ··· 355 293 static int blkif_queue_request(struct request *req) 356 294 { 357 295 struct blkfront_info *info = req->rq_disk->private_data; 358 - unsigned long buffer_mfn; 359 296 struct blkif_request *ring_req; 360 297 unsigned long id; 361 298 unsigned int fsect, lsect; ··· 367 306 */ 368 307 bool new_persistent_gnts; 369 308 grant_ref_t gref_head; 370 - struct page *granted_page; 371 309 struct grant *gnt_list_entry = NULL; 372 310 struct scatterlist *sg; 373 311 ··· 430 370 fsect = sg->offset >> 9; 431 371 lsect = fsect + (sg->length >> 9) - 1; 432 372 433 - if (info->persistent_gnts_c) { 434 - BUG_ON(llist_empty(&info->persistent_gnts)); 435 - gnt_list_entry = llist_entry( 436 - llist_del_first(&info->persistent_gnts), 437 - struct grant, node); 438 - 439 - ref = gnt_list_entry->gref; 440 - buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); 441 - info->persistent_gnts_c--; 442 - } else { 443 - ref = gnttab_claim_grant_reference(&gref_head); 444 - BUG_ON(ref == -ENOSPC); 445 - 446 - gnt_list_entry = 447 - kmalloc(sizeof(struct grant), 448 - GFP_ATOMIC); 449 - if (!gnt_list_entry) 450 - return -ENOMEM; 451 - 452 - granted_page = alloc_page(GFP_ATOMIC); 453 - if (!granted_page) { 454 - kfree(gnt_list_entry); 455 - return -ENOMEM; 456 - } 457 - 458 - gnt_list_entry->pfn = 459 - page_to_pfn(granted_page); 460 - gnt_list_entry->gref = ref; 461 - 462 - buffer_mfn = pfn_to_mfn(page_to_pfn( 463 - granted_page)); 464 - gnttab_grant_foreign_access_ref(ref, 465 - info->xbdev->otherend_id, 466 - buffer_mfn, 0); 467 - } 373 + gnt_list_entry = get_grant(&gref_head, info); 374 + ref = gnt_list_entry->gref; 468 375 469 376 info->shadow[id].grants_used[i] = gnt_list_entry; 470 377 ··· 462 435 kunmap_atomic(shared_data); 463 436 } 464 437 465 - info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); 466 438 ring_req->u.rw.seg[i] = 467 439 (struct blkif_request_segment) { 468 440 .gref = ref, ··· 816 790 817 791 static void blkif_free(struct blkfront_info *info, int suspend) 818 792 { 819 - struct llist_node *all_gnts; 820 - struct grant *persistent_gnt, *tmp; 821 - struct llist_node *n; 793 + struct grant *persistent_gnt; 794 + struct grant *n; 822 795 823 796 /* Prevent new requests being issued until we fix things up. */ 824 797 spin_lock_irq(&info->io_lock); ··· 828 803 blk_stop_queue(info->rq); 829 804 830 805 /* Remove all persistent grants */ 831 - if (info->persistent_gnts_c) { 832 - all_gnts = llist_del_all(&info->persistent_gnts); 833 - persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node); 834 - while (persistent_gnt) { 835 - gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 806 + if (!list_empty(&info->persistent_gnts)) { 807 + list_for_each_entry_safe(persistent_gnt, n, 808 + &info->persistent_gnts, node) { 809 + list_del(&persistent_gnt->node); 810 + if (persistent_gnt->gref != GRANT_INVALID_REF) { 811 + gnttab_end_foreign_access(persistent_gnt->gref, 812 + 0, 0UL); 813 + info->persistent_gnts_c--; 814 + } 836 815 __free_page(pfn_to_page(persistent_gnt->pfn)); 837 - tmp = persistent_gnt; 838 - n = persistent_gnt->node.next; 839 - if (n) 840 - persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node); 841 - else 842 - persistent_gnt = NULL; 843 - kfree(tmp); 816 + kfree(persistent_gnt); 844 817 } 845 - info->persistent_gnts_c = 0; 846 818 } 819 + BUG_ON(info->persistent_gnts_c != 0); 847 820 848 821 /* No more gnttab callback work. */ 849 822 gnttab_cancel_free_callback(&info->callback); ··· 898 875 } 899 876 /* Add the persistent grant into the list of free grants */ 900 877 for (i = 0; i < s->req.u.rw.nr_segments; i++) { 901 - llist_add(&s->grants_used[i]->node, &info->persistent_gnts); 878 + list_add(&s->grants_used[i]->node, &info->persistent_gnts); 902 879 info->persistent_gnts_c++; 903 880 } 904 881 } ··· 1035 1012 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 1036 1013 1037 1014 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); 1015 + 1016 + /* Allocate memory for grants */ 1017 + err = fill_grant_buffer(info, BLK_RING_SIZE * 1018 + BLKIF_MAX_SEGMENTS_PER_REQUEST); 1019 + if (err) 1020 + goto fail; 1038 1021 1039 1022 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 1040 1023 if (err < 0) { ··· 1200 1171 spin_lock_init(&info->io_lock); 1201 1172 info->xbdev = dev; 1202 1173 info->vdevice = vdevice; 1203 - init_llist_head(&info->persistent_gnts); 1174 + INIT_LIST_HEAD(&info->persistent_gnts); 1204 1175 info->persistent_gnts_c = 0; 1205 1176 info->connected = BLKIF_STATE_DISCONNECTED; 1206 1177 INIT_WORK(&info->work, blkif_restart_queue); ··· 1232 1203 int j; 1233 1204 1234 1205 /* Stage 1: Make a safe copy of the shadow state. */ 1235 - copy = kmalloc(sizeof(info->shadow), 1206 + copy = kmemdup(info->shadow, sizeof(info->shadow), 1236 1207 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); 1237 1208 if (!copy) 1238 1209 return -ENOMEM; 1239 - memcpy(copy, info->shadow, sizeof(info->shadow)); 1240 1210 1241 1211 /* Stage 2: Set up free list. */ 1242 1212 memset(&info->shadow, 0, sizeof(info->shadow)); ··· 1264 1236 gnttab_grant_foreign_access_ref( 1265 1237 req->u.rw.seg[j].gref, 1266 1238 info->xbdev->otherend_id, 1267 - pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), 1239 + pfn_to_mfn(copy[i].grants_used[j]->pfn), 1268 1240 0); 1269 1241 } 1270 1242 info->shadow[req->u.rw.id].req = *req;
+10
include/xen/interface/io/blkif.h
··· 138 138 uint8_t _pad3; 139 139 } __attribute__((__packed__)); 140 140 141 + struct blkif_request_other { 142 + uint8_t _pad1; 143 + blkif_vdev_t _pad2; /* only for read/write requests */ 144 + #ifdef CONFIG_X86_64 145 + uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ 146 + #endif 147 + uint64_t id; /* private guest value, echoed in resp */ 148 + } __attribute__((__packed__)); 149 + 141 150 struct blkif_request { 142 151 uint8_t operation; /* BLKIF_OP_??? */ 143 152 union { 144 153 struct blkif_request_rw rw; 145 154 struct blkif_request_discard discard; 155 + struct blkif_request_other other; 146 156 } u; 147 157 } __attribute__((__packed__)); 148 158