Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-4.1/drivers' of git://git.kernel.dk/linux-block

Pull block driver updates from Jens Axboe:
"This is the block driver pull request for 4.1. As with the core bits,
this is a relatively slow round. This pull request contains:

- Various fixes and cleanups for NVMe, from Alexey Khoroshilov, Chong
Yuan, myself, Keith Busch, and Murali Iyer.

- Documentation and code cleanups for nbd from Markus Pargmann.

- Change of brd maintainer to me, from Ross Zwisler. At least the
email doesn't bounce anymore then.

- Two xen-blkback fixes from Tao Chen"

* 'for-4.1/drivers' of git://git.kernel.dk/linux-block: (23 commits)
NVMe: Meta data handling through submit io ioctl
NVMe: Add translation for block limits
NVMe: Remove check for null
NVMe: Fix error handling of class_create("nvme")
xen-blkback: define pr_fmt macro to avoid the duplication of DRV_PFX
xen-blkback: enlarge the array size of blkback name
nbd: Return error pointer directly
nbd: Return error code directly
nbd: Remove fixme that was already fixed
nbd: Restructure debugging prints
nbd: Fix device bytesize type
nbd: Replace kthread_create with kthread_run
nbd: Remove kernel internal header
Documentation: nbd: Add list of module parameters
Documentation: nbd: Reformat to allow more documentation
NVMe: increase depth of admin queue
nvme: Fix PRP list calculation for non-4k system page size
NVMe: Fix blk-mq hot cpu notification
NVMe: embedded iod mask cleanup
NVMe: Freeze admin queue on device failure
...

+240 -300
+29 -15
Documentation/blockdev/nbd.txt
··· 1 - Network Block Device (TCP version) 2 - 3 - What is it: With this compiled in the kernel (or as a module), Linux 4 - can use a remote server as one of its block devices. So every time 5 - the client computer wants to read, e.g., /dev/nb0, it sends a 6 - request over TCP to the server, which will reply with the data read. 7 - This can be used for stations with low disk space (or even diskless) 8 - to borrow disk space from another computer. 9 - Unlike NFS, it is possible to put any filesystem on it, etc. 1 + Network Block Device (TCP version) 2 + ================================== 10 3 11 - For more information, or to download the nbd-client and nbd-server 12 - tools, go to http://nbd.sf.net/. 4 + 1) Overview 5 + ----------- 13 6 14 - The nbd kernel module need only be installed on the client 15 - system, as the nbd-server is completely in userspace. In fact, 16 - the nbd-server has been successfully ported to other operating 17 - systems, including Windows. 7 + What is it: With this compiled in the kernel (or as a module), Linux 8 + can use a remote server as one of its block devices. So every time 9 + the client computer wants to read, e.g., /dev/nb0, it sends a 10 + request over TCP to the server, which will reply with the data read. 11 + This can be used for stations with low disk space (or even diskless) 12 + to borrow disk space from another computer. 13 + Unlike NFS, it is possible to put any filesystem on it, etc. 14 + 15 + For more information, or to download the nbd-client and nbd-server 16 + tools, go to http://nbd.sf.net/. 17 + 18 + The nbd kernel module need only be installed on the client 19 + system, as the nbd-server is completely in userspace. In fact, 20 + the nbd-server has been successfully ported to other operating 21 + systems, including Windows. 22 + 23 + A) NBD parameters 24 + ----------------- 25 + 26 + max_part 27 + Number of partitions per device (default: 0). 28 + 29 + nbds_max 30 + Number of block devices that should be initialized (default: 16). 31 +
+1 -1
MAINTAINERS
··· 8107 8107 F: drivers/net/wireless/rt2x00/ 8108 8108 8109 8109 RAMDISK RAM BLOCK DEVICE DRIVER 8110 - M: Nick Piggin <npiggin@kernel.dk> 8110 + M: Jens Axboe <axboe@kernel.dk> 8111 8111 S: Maintained 8112 8112 F: Documentation/blockdev/ramdisk.txt 8113 8113 F: drivers/block/brd.c
+3 -4
drivers/block/drbd/drbd_main.c
··· 2107 2107 if (drbd_md_io_page_pool == NULL) 2108 2108 goto Enomem; 2109 2109 2110 - drbd_request_mempool = mempool_create(number, 2111 - mempool_alloc_slab, mempool_free_slab, drbd_request_cache); 2110 + drbd_request_mempool = mempool_create_slab_pool(number, 2111 + drbd_request_cache); 2112 2112 if (drbd_request_mempool == NULL) 2113 2113 goto Enomem; 2114 2114 2115 - drbd_ee_mempool = mempool_create(number, 2116 - mempool_alloc_slab, mempool_free_slab, drbd_ee_cache); 2115 + drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache); 2117 2116 if (drbd_ee_mempool == NULL) 2118 2117 goto Enomem; 2119 2118
+2 -1
drivers/block/drbd/drbd_req.c
··· 52 52 { 53 53 struct drbd_request *req; 54 54 55 - req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO); 55 + req = mempool_alloc(drbd_request_mempool, GFP_NOIO); 56 56 if (!req) 57 57 return NULL; 58 + memset(req, 0, sizeof(*req)); 58 59 59 60 drbd_req_make_private_bio(req, bio_src); 60 61 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
+55 -85
drivers/block/nbd.c
··· 32 32 #include <net/sock.h> 33 33 #include <linux/net.h> 34 34 #include <linux/kthread.h> 35 + #include <linux/types.h> 35 36 36 37 #include <asm/uaccess.h> 37 38 #include <asm/types.h> 38 39 39 40 #include <linux/nbd.h> 40 41 41 - #define NBD_MAGIC 0x68797548 42 + struct nbd_device { 43 + int flags; 44 + int harderror; /* Code of hard error */ 45 + struct socket * sock; /* If == NULL, device is not ready, yet */ 46 + int magic; 42 47 43 - #ifdef NDEBUG 44 - #define dprintk(flags, fmt...) 45 - #else /* NDEBUG */ 46 - #define dprintk(flags, fmt...) do { \ 47 - if (debugflags & (flags)) printk(KERN_DEBUG fmt); \ 48 - } while (0) 49 - #define DBG_IOCTL 0x0004 50 - #define DBG_INIT 0x0010 51 - #define DBG_EXIT 0x0020 52 - #define DBG_BLKDEV 0x0100 53 - #define DBG_RX 0x0200 54 - #define DBG_TX 0x0400 55 - static unsigned int debugflags; 56 - #endif /* NDEBUG */ 48 + spinlock_t queue_lock; 49 + struct list_head queue_head; /* Requests waiting result */ 50 + struct request *active_req; 51 + wait_queue_head_t active_wq; 52 + struct list_head waiting_queue; /* Requests to be sent */ 53 + wait_queue_head_t waiting_wq; 54 + 55 + struct mutex tx_lock; 56 + struct gendisk *disk; 57 + int blksize; 58 + loff_t bytesize; 59 + pid_t pid; /* pid of nbd-client, if attached */ 60 + int xmit_timeout; 61 + int disconnect; /* a disconnect has been requested by user */ 62 + }; 63 + 64 + #define NBD_MAGIC 0x68797548 57 65 58 66 static unsigned int nbds_max = 16; 59 67 static struct nbd_device *nbd_dev; ··· 79 71 */ 80 72 static DEFINE_SPINLOCK(nbd_lock); 81 73 82 - #ifndef NDEBUG 83 - static const char *ioctl_cmd_to_ascii(int cmd) 74 + static inline struct device *nbd_to_dev(struct nbd_device *nbd) 84 75 { 85 - switch (cmd) { 86 - case NBD_SET_SOCK: return "set-sock"; 87 - case NBD_SET_BLKSIZE: return "set-blksize"; 88 - case NBD_SET_SIZE: return "set-size"; 89 - case NBD_SET_TIMEOUT: return "set-timeout"; 90 - case NBD_SET_FLAGS: return "set-flags"; 91 - case NBD_DO_IT: return "do-it"; 92 - case NBD_CLEAR_SOCK: return "clear-sock"; 93 - case NBD_CLEAR_QUE: return "clear-que"; 94 - case NBD_PRINT_DEBUG: return "print-debug"; 95 - case NBD_SET_SIZE_BLOCKS: return "set-size-blocks"; 96 - case NBD_DISCONNECT: return "disconnect"; 97 - case BLKROSET: return "set-read-only"; 98 - case BLKFLSBUF: return "flush-buffer-cache"; 99 - } 100 - return "unknown"; 76 + return disk_to_dev(nbd->disk); 101 77 } 102 78 103 79 static const char *nbdcmd_to_ascii(int cmd) ··· 95 103 } 96 104 return "invalid"; 97 105 } 98 - #endif /* NDEBUG */ 99 106 100 - static void nbd_end_request(struct request *req) 107 + static void nbd_end_request(struct nbd_device *nbd, struct request *req) 101 108 { 102 109 int error = req->errors ? -EIO : 0; 103 110 struct request_queue *q = req->q; 104 111 unsigned long flags; 105 112 106 - dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, 107 - req, error ? "failed" : "done"); 113 + dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req, 114 + error ? "failed" : "done"); 108 115 109 116 spin_lock_irqsave(q->queue_lock, flags); 110 117 __blk_end_request_all(req, error); 111 118 spin_unlock_irqrestore(q->queue_lock, flags); 112 119 } 113 120 121 + /* 122 + * Forcibly shutdown the socket causing all listeners to error 123 + */ 114 124 static void sock_shutdown(struct nbd_device *nbd, int lock) 115 125 { 116 - /* Forcibly shutdown the socket causing all listeners 117 - * to error 118 - * 119 - * FIXME: This code is duplicated from sys_shutdown, but 120 - * there should be a more generic interface rather than 121 - * calling socket ops directly here */ 122 126 if (lock) 123 127 mutex_lock(&nbd->tx_lock); 124 128 if (nbd->sock) { ··· 241 253 } 242 254 memcpy(request.handle, &req, sizeof(req)); 243 255 244 - dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", 245 - nbd->disk->disk_name, req, 246 - nbdcmd_to_ascii(nbd_cmd(req)), 247 - (unsigned long long)blk_rq_pos(req) << 9, 248 - blk_rq_bytes(req)); 256 + dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 257 + req, nbdcmd_to_ascii(nbd_cmd(req)), 258 + (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 249 259 result = sock_xmit(nbd, 1, &request, sizeof(request), 250 260 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 251 261 if (result <= 0) { 252 262 dev_err(disk_to_dev(nbd->disk), 253 263 "Send control failed (result %d)\n", result); 254 - goto error_out; 264 + return -EIO; 255 265 } 256 266 257 267 if (nbd_cmd(req) == NBD_CMD_WRITE) { ··· 263 277 flags = 0; 264 278 if (!rq_iter_last(bvec, iter)) 265 279 flags = MSG_MORE; 266 - dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 267 - nbd->disk->disk_name, req, bvec.bv_len); 280 + dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 281 + req, bvec.bv_len); 268 282 result = sock_send_bvec(nbd, &bvec, flags); 269 283 if (result <= 0) { 270 284 dev_err(disk_to_dev(nbd->disk), 271 285 "Send data failed (result %d)\n", 272 286 result); 273 - goto error_out; 287 + return -EIO; 274 288 } 275 289 } 276 290 } 277 291 return 0; 278 - 279 - error_out: 280 - return -EIO; 281 292 } 282 293 283 294 static struct request *nbd_find_request(struct nbd_device *nbd, ··· 285 302 286 303 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); 287 304 if (unlikely(err)) 288 - goto out; 305 + return ERR_PTR(err); 289 306 290 307 spin_lock(&nbd->queue_lock); 291 308 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { ··· 297 314 } 298 315 spin_unlock(&nbd->queue_lock); 299 316 300 - err = -ENOENT; 301 - 302 - out: 303 - return ERR_PTR(err); 317 + return ERR_PTR(-ENOENT); 304 318 } 305 319 306 320 static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) ··· 351 371 return req; 352 372 } 353 373 354 - dprintk(DBG_RX, "%s: request %p: got reply\n", 355 - nbd->disk->disk_name, req); 374 + dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); 356 375 if (nbd_cmd(req) == NBD_CMD_READ) { 357 376 struct req_iterator iter; 358 377 struct bio_vec bvec; ··· 364 385 req->errors++; 365 386 return req; 366 387 } 367 - dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", 368 - nbd->disk->disk_name, req, bvec.bv_len); 388 + dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 389 + req, bvec.bv_len); 369 390 } 370 391 } 371 392 return req; ··· 405 426 } 406 427 407 428 while ((req = nbd_read_stat(nbd)) != NULL) 408 - nbd_end_request(req); 429 + nbd_end_request(nbd, req); 409 430 410 431 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 411 432 nbd->pid = 0; ··· 434 455 queuelist); 435 456 list_del_init(&req->queuelist); 436 457 req->errors++; 437 - nbd_end_request(req); 458 + nbd_end_request(nbd, req); 438 459 } 439 460 440 461 while (!list_empty(&nbd->waiting_queue)) { ··· 442 463 queuelist); 443 464 list_del_init(&req->queuelist); 444 465 req->errors++; 445 - nbd_end_request(req); 466 + nbd_end_request(nbd, req); 446 467 } 447 468 } 448 469 ··· 486 507 if (nbd_send_req(nbd, req) != 0) { 487 508 dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); 488 509 req->errors++; 489 - nbd_end_request(req); 510 + nbd_end_request(nbd, req); 490 511 } else { 491 512 spin_lock(&nbd->queue_lock); 492 513 list_add_tail(&req->queuelist, &nbd->queue_head); ··· 501 522 502 523 error_out: 503 524 req->errors++; 504 - nbd_end_request(req); 525 + nbd_end_request(nbd, req); 505 526 } 506 527 507 528 static int nbd_thread(void *data) ··· 549 570 550 571 spin_unlock_irq(q->queue_lock); 551 572 552 - dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", 553 - req->rq_disk->disk_name, req, req->cmd_type); 554 - 555 573 nbd = req->rq_disk->private_data; 556 574 557 575 BUG_ON(nbd->magic != NBD_MAGIC); 576 + 577 + dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n", 578 + req, req->cmd_type); 558 579 559 580 if (unlikely(!nbd->sock)) { 560 581 dev_err(disk_to_dev(nbd->disk), 561 582 "Attempted send on closed socket\n"); 562 583 req->errors++; 563 - nbd_end_request(req); 584 + nbd_end_request(nbd, req); 564 585 spin_lock_irq(q->queue_lock); 565 586 continue; 566 587 } ··· 685 706 else 686 707 blk_queue_flush(nbd->disk->queue, 0); 687 708 688 - thread = kthread_create(nbd_thread, nbd, "%s", 689 - nbd->disk->disk_name); 709 + thread = kthread_run(nbd_thread, nbd, "%s", 710 + nbd->disk->disk_name); 690 711 if (IS_ERR(thread)) { 691 712 mutex_lock(&nbd->tx_lock); 692 713 return PTR_ERR(thread); 693 714 } 694 - wake_up_process(thread); 715 + 695 716 error = nbd_do_it(nbd); 696 717 kthread_stop(thread); 697 718 ··· 746 767 return -EPERM; 747 768 748 769 BUG_ON(nbd->magic != NBD_MAGIC); 749 - 750 - /* Anyone capable of this syscall can do *real bad* things */ 751 - dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", 752 - nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); 753 770 754 771 mutex_lock(&nbd->tx_lock); 755 772 error = __nbd_ioctl(bdev, nbd, cmd, arg); ··· 836 861 } 837 862 838 863 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); 839 - dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags); 840 864 841 865 for (i = 0; i < nbds_max; i++) { 842 866 struct gendisk *disk = nbd_dev[i].disk; ··· 894 920 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 895 921 module_param(max_part, int, 0444); 896 922 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); 897 - #ifndef NDEBUG 898 - module_param(debugflags, int, 0644); 899 - MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); 900 - #endif
+66 -93
drivers/block/nvme-core.c
··· 44 44 45 45 #define NVME_MINORS (1U << MINORBITS) 46 46 #define NVME_Q_DEPTH 1024 47 - #define NVME_AQ_DEPTH 64 47 + #define NVME_AQ_DEPTH 256 48 48 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 49 49 #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 50 50 #define ADMIN_TIMEOUT (admin_timeout * HZ) ··· 152 152 */ 153 153 #define NVME_INT_PAGES 2 154 154 #define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->page_size) 155 + #define NVME_INT_MASK 0x01 155 156 156 157 /* 157 158 * Will slightly overestimate the number of pages needed. This is OK ··· 258 257 */ 259 258 static bool iod_should_kfree(struct nvme_iod *iod) 260 259 { 261 - return (iod->private & 0x01) == 0; 260 + return (iod->private & NVME_INT_MASK) == 0; 262 261 } 263 262 264 263 /* Special values must be less than 0x1000 */ ··· 302 301 static void async_req_completion(struct nvme_queue *nvmeq, void *ctx, 303 302 struct nvme_completion *cqe) 304 303 { 305 - struct request *req = ctx; 306 - 307 304 u32 result = le32_to_cpup(&cqe->result); 308 305 u16 status = le16_to_cpup(&cqe->status) >> 1; 309 306 ··· 310 311 if (status == NVME_SC_SUCCESS) 311 312 dev_warn(nvmeq->q_dmadev, 312 313 "async event result %08x\n", result); 313 - 314 - blk_mq_free_hctx_request(nvmeq->hctx, req); 315 314 } 316 315 317 316 static void abort_completion(struct nvme_queue *nvmeq, void *ctx, ··· 429 432 { 430 433 unsigned size = !(rq->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(rq) : 431 434 sizeof(struct nvme_dsm_range); 432 - unsigned long mask = 0; 433 435 struct nvme_iod *iod; 434 436 435 437 if (rq->nr_phys_segments <= NVME_INT_PAGES && ··· 436 440 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq); 437 441 438 442 iod = cmd->iod; 439 - mask = 0x01; 440 443 iod_init(iod, size, rq->nr_phys_segments, 441 - (unsigned long) rq | 0x01); 444 + (unsigned long) rq | NVME_INT_MASK); 442 445 return iod; 443 446 } 444 447 ··· 517 522 return; 518 523 519 524 pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; 520 - if (!pmap) 521 - return; 522 525 523 526 p = pmap; 524 527 virt = bip_get_seed(bip); ··· 638 645 struct scatterlist *sg = iod->sg; 639 646 int dma_len = sg_dma_len(sg); 640 647 u64 dma_addr = sg_dma_address(sg); 641 - int offset = offset_in_page(dma_addr); 648 + u32 page_size = dev->page_size; 649 + int offset = dma_addr & (page_size - 1); 642 650 __le64 *prp_list; 643 651 __le64 **list = iod_list(iod); 644 652 dma_addr_t prp_dma; 645 653 int nprps, i; 646 - u32 page_size = dev->page_size; 647 654 648 655 length -= (page_size - offset); 649 656 if (length <= 0) ··· 1021 1028 struct nvme_cmd_info *cmd_info; 1022 1029 struct request *req; 1023 1030 1024 - req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, false); 1031 + req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_ATOMIC, true); 1025 1032 if (IS_ERR(req)) 1026 1033 return PTR_ERR(req); 1027 1034 1028 1035 req->cmd_flags |= REQ_NO_TIMEOUT; 1029 1036 cmd_info = blk_mq_rq_to_pdu(req); 1030 - nvme_set_info(cmd_info, req, async_req_completion); 1037 + nvme_set_info(cmd_info, NULL, async_req_completion); 1031 1038 1032 1039 memset(&c, 0, sizeof(c)); 1033 1040 c.common.opcode = nvme_admin_async_event; 1034 1041 c.common.command_id = req->tag; 1035 1042 1043 + blk_mq_free_hctx_request(nvmeq->hctx, req); 1036 1044 return __nvme_submit_cmd(nvmeq, &c); 1037 1045 } 1038 1046 ··· 1341 1347 nvmeq->cq_vector = -1; 1342 1348 spin_unlock_irq(&nvmeq->q_lock); 1343 1349 1350 + if (!nvmeq->qid && nvmeq->dev->admin_q) 1351 + blk_mq_freeze_queue_start(nvmeq->dev->admin_q); 1352 + 1344 1353 irq_set_affinity_hint(vector, NULL); 1345 1354 free_irq(vector, nvmeq); 1346 1355 ··· 1375 1378 adapter_delete_sq(dev, qid); 1376 1379 adapter_delete_cq(dev, qid); 1377 1380 } 1378 - if (!qid && dev->admin_q) 1379 - blk_mq_freeze_queue_start(dev->admin_q); 1380 1381 1381 1382 spin_lock_irq(&nvmeq->q_lock); 1382 1383 nvme_process_cq(nvmeq); ··· 1578 1583 dev->admin_tagset.ops = &nvme_mq_admin_ops; 1579 1584 dev->admin_tagset.nr_hw_queues = 1; 1580 1585 dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1; 1586 + dev->admin_tagset.reserved_tags = 1; 1581 1587 dev->admin_tagset.timeout = ADMIN_TIMEOUT; 1582 1588 dev->admin_tagset.numa_node = dev_to_node(&dev->pci_dev->dev); 1583 1589 dev->admin_tagset.cmd_size = nvme_cmd_size(dev); ··· 1745 1749 struct nvme_dev *dev = ns->dev; 1746 1750 struct nvme_user_io io; 1747 1751 struct nvme_command c; 1748 - unsigned length, meta_len; 1749 - int status, i; 1750 - struct nvme_iod *iod, *meta_iod = NULL; 1751 - dma_addr_t meta_dma_addr; 1752 - void *meta, *uninitialized_var(meta_mem); 1752 + unsigned length, meta_len, prp_len; 1753 + int status, write; 1754 + struct nvme_iod *iod; 1755 + dma_addr_t meta_dma = 0; 1756 + void *meta = NULL; 1753 1757 1754 1758 if (copy_from_user(&io, uio, sizeof(io))) 1755 1759 return -EFAULT; 1756 1760 length = (io.nblocks + 1) << ns->lba_shift; 1757 1761 meta_len = (io.nblocks + 1) * ns->ms; 1758 1762 1759 - if (meta_len && ((io.metadata & 3) || !io.metadata)) 1763 + if (meta_len && ((io.metadata & 3) || !io.metadata) && !ns->ext) 1760 1764 return -EINVAL; 1765 + else if (meta_len && ns->ext) { 1766 + length += meta_len; 1767 + meta_len = 0; 1768 + } 1769 + 1770 + write = io.opcode & 1; 1761 1771 1762 1772 switch (io.opcode) { 1763 1773 case nvme_cmd_write: 1764 1774 case nvme_cmd_read: 1765 1775 case nvme_cmd_compare: 1766 - iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); 1776 + iod = nvme_map_user_pages(dev, write, io.addr, length); 1767 1777 break; 1768 1778 default: 1769 1779 return -EINVAL; ··· 1777 1775 1778 1776 if (IS_ERR(iod)) 1779 1777 return PTR_ERR(iod); 1778 + 1779 + prp_len = nvme_setup_prps(dev, iod, length, GFP_KERNEL); 1780 + if (length != prp_len) { 1781 + status = -ENOMEM; 1782 + goto unmap; 1783 + } 1784 + if (meta_len) { 1785 + meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, 1786 + &meta_dma, GFP_KERNEL); 1787 + if (!meta) { 1788 + status = -ENOMEM; 1789 + goto unmap; 1790 + } 1791 + if (write) { 1792 + if (copy_from_user(meta, (void __user *)io.metadata, 1793 + meta_len)) { 1794 + status = -EFAULT; 1795 + goto unmap; 1796 + } 1797 + } 1798 + } 1780 1799 1781 1800 memset(&c, 0, sizeof(c)); 1782 1801 c.rw.opcode = io.opcode; ··· 1810 1787 c.rw.reftag = cpu_to_le32(io.reftag); 1811 1788 c.rw.apptag = cpu_to_le16(io.apptag); 1812 1789 c.rw.appmask = cpu_to_le16(io.appmask); 1813 - 1814 - if (meta_len) { 1815 - meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, 1816 - meta_len); 1817 - if (IS_ERR(meta_iod)) { 1818 - status = PTR_ERR(meta_iod); 1819 - meta_iod = NULL; 1820 - goto unmap; 1821 - } 1822 - 1823 - meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, 1824 - &meta_dma_addr, GFP_KERNEL); 1825 - if (!meta_mem) { 1826 - status = -ENOMEM; 1827 - goto unmap; 1828 - } 1829 - 1830 - if (io.opcode & 1) { 1831 - int meta_offset = 0; 1832 - 1833 - for (i = 0; i < meta_iod->nents; i++) { 1834 - meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + 1835 - meta_iod->sg[i].offset; 1836 - memcpy(meta_mem + meta_offset, meta, 1837 - meta_iod->sg[i].length); 1838 - kunmap_atomic(meta); 1839 - meta_offset += meta_iod->sg[i].length; 1840 - } 1841 - } 1842 - 1843 - c.rw.metadata = cpu_to_le64(meta_dma_addr); 1844 - } 1845 - 1846 - length = nvme_setup_prps(dev, iod, length, GFP_KERNEL); 1847 1790 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 1848 1791 c.rw.prp2 = cpu_to_le64(iod->first_dma); 1849 - 1850 - if (length != (io.nblocks + 1) << ns->lba_shift) 1851 - status = -ENOMEM; 1852 - else 1853 - status = nvme_submit_io_cmd(dev, ns, &c, NULL); 1854 - 1855 - if (meta_len) { 1856 - if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { 1857 - int meta_offset = 0; 1858 - 1859 - for (i = 0; i < meta_iod->nents; i++) { 1860 - meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + 1861 - meta_iod->sg[i].offset; 1862 - memcpy(meta, meta_mem + meta_offset, 1863 - meta_iod->sg[i].length); 1864 - kunmap_atomic(meta); 1865 - meta_offset += meta_iod->sg[i].length; 1866 - } 1867 - } 1868 - 1869 - dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem, 1870 - meta_dma_addr); 1871 - } 1872 - 1792 + c.rw.metadata = cpu_to_le64(meta_dma); 1793 + status = nvme_submit_io_cmd(dev, ns, &c, NULL); 1873 1794 unmap: 1874 - nvme_unmap_user_pages(dev, io.opcode & 1, iod); 1795 + nvme_unmap_user_pages(dev, write, iod); 1875 1796 nvme_free_iod(dev, iod); 1876 - 1877 - if (meta_iod) { 1878 - nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); 1879 - nvme_free_iod(dev, meta_iod); 1797 + if (meta) { 1798 + if (status == NVME_SC_SUCCESS && !write) { 1799 + if (copy_to_user((void __user *)io.metadata, meta, 1800 + meta_len)) 1801 + status = -EFAULT; 1802 + } 1803 + dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma); 1880 1804 } 1881 - 1882 1805 return status; 1883 1806 } 1884 1807 ··· 1987 2018 struct nvme_dev *dev = ns->dev; 1988 2019 struct nvme_id_ns *id; 1989 2020 dma_addr_t dma_addr; 1990 - int lbaf, pi_type, old_ms; 2021 + u8 lbaf, pi_type; 2022 + u16 old_ms; 1991 2023 unsigned short bs; 1992 2024 1993 2025 id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, ··· 2009 2039 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; 2010 2040 ns->lba_shift = id->lbaf[lbaf].ds; 2011 2041 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 2042 + ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); 2012 2043 2013 2044 /* 2014 2045 * If identify namespace failed, use default 512 byte block size so ··· 2026 2055 if (blk_get_integrity(disk) && (ns->pi_type != pi_type || 2027 2056 ns->ms != old_ms || 2028 2057 bs != queue_logical_block_size(disk->queue) || 2029 - (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT))) 2058 + (ns->ms && ns->ext))) 2030 2059 blk_integrity_unregister(disk); 2031 2060 2032 2061 ns->pi_type = pi_type; 2033 2062 blk_queue_logical_block_size(ns->queue, bs); 2034 2063 2035 2064 if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) && 2036 - !(id->flbas & NVME_NS_FLBAS_META_EXT)) 2065 + !ns->ext) 2037 2066 nvme_init_integrity(ns); 2038 2067 2039 2068 if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk))) ··· 2305 2334 dev->oncs = le16_to_cpup(&ctrl->oncs); 2306 2335 dev->abort_limit = ctrl->acl + 1; 2307 2336 dev->vwc = ctrl->vwc; 2308 - dev->event_limit = min(ctrl->aerl + 1, 8); 2309 2337 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 2310 2338 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 2311 2339 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); ··· 2851 2881 2852 2882 nvme_set_irq_hints(dev); 2853 2883 2884 + dev->event_limit = 1; 2854 2885 return result; 2855 2886 2856 2887 free_tags: ··· 3137 3166 nvme_char_major = result; 3138 3167 3139 3168 nvme_class = class_create(THIS_MODULE, "nvme"); 3140 - if (!nvme_class) 3169 + if (IS_ERR(nvme_class)) { 3170 + result = PTR_ERR(nvme_class); 3141 3171 goto unregister_chrdev; 3172 + } 3142 3173 3143 3174 result = pci_register_driver(&nvme_driver); 3144 3175 if (result)
+27 -1
drivers/block/nvme-scsi.c
··· 55 55 #define VPD_SERIAL_NUMBER 0x80 56 56 #define VPD_DEVICE_IDENTIFIERS 0x83 57 57 #define VPD_EXTENDED_INQUIRY 0x86 58 + #define VPD_BLOCK_LIMITS 0xB0 58 59 #define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1 59 60 60 61 /* CDB offsets */ ··· 133 132 #define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80 134 133 #define INQ_DEVICE_IDENTIFICATION_PAGE 0x83 135 134 #define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86 135 + #define INQ_BDEV_LIMITS_PAGE 0xB0 136 136 #define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1 137 137 #define INQ_SERIAL_NUMBER_LENGTH 0x14 138 - #define INQ_NUM_SUPPORTED_VPD_PAGES 5 138 + #define INQ_NUM_SUPPORTED_VPD_PAGES 6 139 139 #define VERSION_SPC_4 0x06 140 140 #define ACA_UNSUPPORTED 0 141 141 #define STANDARD_INQUIRY_LENGTH 36 ··· 749 747 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE; 750 748 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE; 751 749 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE; 750 + inq_response[9] = INQ_BDEV_LIMITS_PAGE; 752 751 753 752 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); 754 753 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); ··· 939 936 kfree(inq_response); 940 937 out_mem: 941 938 return res; 939 + } 940 + 941 + static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 942 + u8 *inq_response, int alloc_len) 943 + { 944 + __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue)); 945 + __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); 946 + __be32 discard_desc_count = cpu_to_be32(0x100); 947 + 948 + memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); 949 + inq_response[1] = VPD_BLOCK_LIMITS; 950 + inq_response[3] = 0x3c; /* Page Length */ 951 + memcpy(&inq_response[8], &max_sectors, sizeof(u32)); 952 + memcpy(&inq_response[20], &max_discard, sizeof(u32)); 953 + 954 + if (max_discard) 955 + memcpy(&inq_response[24], &discard_desc_count, sizeof(u32)); 956 + 957 + return nvme_trans_copy_to_user(hdr, inq_response, 0x3c); 942 958 } 943 959 944 960 static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, ··· 2289 2267 break; 2290 2268 case VPD_EXTENDED_INQUIRY: 2291 2269 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); 2270 + break; 2271 + case VPD_BLOCK_LIMITS: 2272 + res = nvme_trans_bdev_limits_page(ns, hdr, inq_response, 2273 + alloc_len); 2292 2274 break; 2293 2275 case VPD_BLOCK_DEV_CHARACTERISTICS: 2294 2276 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
+31 -31
drivers/block/xen-blkback/blkback.c
··· 34 34 * IN THE SOFTWARE. 35 35 */ 36 36 37 + #define pr_fmt(fmt) "xen-blkback: " fmt 38 + 37 39 #include <linux/spinlock.h> 38 40 #include <linux/kthread.h> 39 41 #include <linux/list.h> ··· 213 211 else if (persistent_gnt->gnt > this->gnt) 214 212 new = &((*new)->rb_right); 215 213 else { 216 - pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); 214 + pr_alert_ratelimited("trying to add a gref that's already in the tree\n"); 217 215 return -EINVAL; 218 216 } 219 217 } ··· 244 242 node = node->rb_right; 245 243 else { 246 244 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { 247 - pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); 245 + pr_alert_ratelimited("requesting a grant already in use\n"); 248 246 return NULL; 249 247 } 250 248 set_bit(PERSISTENT_GNT_ACTIVE, data->flags); ··· 259 257 struct persistent_gnt *persistent_gnt) 260 258 { 261 259 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) 262 - pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); 260 + pr_alert_ratelimited("freeing a grant already unused\n"); 263 261 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); 264 262 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); 265 263 atomic_dec(&blkif->persistent_gnt_in_use); ··· 376 374 } 377 375 378 376 if (work_pending(&blkif->persistent_purge_work)) { 379 - pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); 377 + pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); 380 378 return; 381 379 } 382 380 ··· 398 396 399 397 total = num_clean; 400 398 401 - pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); 399 + pr_debug("Going to purge %u persistent grants\n", num_clean); 402 400 403 401 BUG_ON(!list_empty(&blkif->persistent_purge_list)); 404 402 root = &blkif->persistent_gnts; ··· 430 428 * with the requested num 431 429 */ 432 430 if (!scan_used && !clean_used) { 433 - pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); 431 + pr_debug("Still missing %u purged frames\n", num_clean); 434 432 scan_used = true; 435 433 goto purge_list; 436 434 } 437 435 finished: 438 436 if (!clean_used) { 439 - pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n"); 437 + pr_debug("Finished scanning for grants to clean, removing used flag\n"); 440 438 clean_used = true; 441 439 goto purge_list; 442 440 } ··· 446 444 447 445 /* We can defer this work */ 448 446 schedule_work(&blkif->persistent_purge_work); 449 - pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); 447 + pr_debug("Purged %u/%u\n", (total - num_clean), total); 450 448 return; 451 449 } 452 450 ··· 522 520 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); 523 521 unsigned long long new_size = vbd_sz(vbd); 524 522 525 - pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", 523 + pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n", 526 524 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); 527 - pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); 525 + pr_info("VBD Resize: new size %llu\n", new_size); 528 526 vbd->size = new_size; 529 527 again: 530 528 err = xenbus_transaction_start(&xbt); 531 529 if (err) { 532 - pr_warn(DRV_PFX "Error starting transaction"); 530 + pr_warn("Error starting transaction\n"); 533 531 return; 534 532 } 535 533 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", 536 534 (unsigned long long)vbd_sz(vbd)); 537 535 if (err) { 538 - pr_warn(DRV_PFX "Error writing new size"); 536 + pr_warn("Error writing new size\n"); 539 537 goto abort; 540 538 } 541 539 /* ··· 545 543 */ 546 544 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); 547 545 if (err) { 548 - pr_warn(DRV_PFX "Error writing the state"); 546 + pr_warn("Error writing the state\n"); 549 547 goto abort; 550 548 } 551 549 ··· 553 551 if (err == -EAGAIN) 554 552 goto again; 555 553 if (err) 556 - pr_warn(DRV_PFX "Error ending transaction"); 554 + pr_warn("Error ending transaction\n"); 557 555 return; 558 556 abort: 559 557 xenbus_transaction_end(xbt, 1); ··· 580 578 581 579 static void print_stats(struct xen_blkif *blkif) 582 580 { 583 - pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" 581 + pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" 584 582 " | ds %4llu | pg: %4u/%4d\n", 585 583 current->comm, blkif->st_oo_req, 586 584 blkif->st_rd_req, blkif->st_wr_req, ··· 857 855 /* This is a newly mapped grant */ 858 856 BUG_ON(new_map_idx >= segs_to_map); 859 857 if (unlikely(map[new_map_idx].status != 0)) { 860 - pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); 858 + pr_debug("invalid buffer -- could not remap it\n"); 861 859 put_free_pages(blkif, &pages[seg_idx]->page, 1); 862 860 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; 863 861 ret |= 1; ··· 893 891 goto next; 894 892 } 895 893 pages[seg_idx]->persistent_gnt = persistent_gnt; 896 - pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", 894 + pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n", 897 895 persistent_gnt->gnt, blkif->persistent_gnt_c, 898 896 xen_blkif_max_pgrants); 899 897 goto next; 900 898 } 901 899 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { 902 900 blkif->vbd.overflow_max_grants = 1; 903 - pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", 901 + pr_debug("domain %u, device %#x is using maximum number of persistent grants\n", 904 902 blkif->domid, blkif->vbd.handle); 905 903 } 906 904 /* ··· 918 916 return ret; 919 917 920 918 out_of_memory: 921 - pr_alert(DRV_PFX "%s: out of memory\n", __func__); 919 + pr_alert("%s: out of memory\n", __func__); 922 920 put_free_pages(blkif, pages_to_gnt, segs_to_map); 923 921 return -ENOMEM; 924 922 } ··· 998 996 999 997 err = xen_vbd_translate(&preq, blkif, WRITE); 1000 998 if (err) { 1001 - pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n", 999 + pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", 1002 1000 preq.sector_number, 1003 1001 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); 1004 1002 goto fail_response; ··· 1014 1012 GFP_KERNEL, secure); 1015 1013 fail_response: 1016 1014 if (err == -EOPNOTSUPP) { 1017 - pr_debug(DRV_PFX "discard op failed, not supported\n"); 1015 + pr_debug("discard op failed, not supported\n"); 1018 1016 status = BLKIF_RSP_EOPNOTSUPP; 1019 1017 } else if (err) 1020 1018 status = BLKIF_RSP_ERROR; ··· 1058 1056 /* An error fails the entire request. */ 1059 1057 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && 1060 1058 (error == -EOPNOTSUPP)) { 1061 - pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); 1059 + pr_debug("flush diskcache op failed, not supported\n"); 1062 1060 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); 1063 1061 pending_req->status = BLKIF_RSP_EOPNOTSUPP; 1064 1062 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && 1065 1063 (error == -EOPNOTSUPP)) { 1066 - pr_debug(DRV_PFX "write barrier op failed, not supported\n"); 1064 + pr_debug("write barrier op failed, not supported\n"); 1067 1065 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); 1068 1066 pending_req->status = BLKIF_RSP_EOPNOTSUPP; 1069 1067 } else if (error) { 1070 - pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," 1068 + pr_debug("Buffer not up-to-date at end of operation," 1071 1069 " error=%d\n", error); 1072 1070 pending_req->status = BLKIF_RSP_ERROR; 1073 1071 } ··· 1112 1110 1113 1111 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) { 1114 1112 rc = blk_rings->common.rsp_prod_pvt; 1115 - pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", 1113 + pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", 1116 1114 rp, rc, rp - rc, blkif->vbd.pdevice); 1117 1115 return -EACCES; 1118 1116 } ··· 1219 1217 if ((req->operation == BLKIF_OP_INDIRECT) && 1220 1218 (req_operation != BLKIF_OP_READ) && 1221 1219 (req_operation != BLKIF_OP_WRITE)) { 1222 - pr_debug(DRV_PFX "Invalid indirect operation (%u)\n", 1223 - req_operation); 1220 + pr_debug("Invalid indirect operation (%u)\n", req_operation); 1224 1221 goto fail_response; 1225 1222 } 1226 1223 ··· 1253 1252 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || 1254 1253 unlikely((req->operation == BLKIF_OP_INDIRECT) && 1255 1254 (nseg > MAX_INDIRECT_SEGMENTS))) { 1256 - pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", 1257 - nseg); 1255 + pr_debug("Bad number of segments in request (%d)\n", nseg); 1258 1256 /* Haven't submitted any bio's yet. */ 1259 1257 goto fail_response; 1260 1258 } ··· 1288 1288 } 1289 1289 1290 1290 if (xen_vbd_translate(&preq, blkif, operation) != 0) { 1291 - pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", 1291 + pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", 1292 1292 operation == READ ? "read" : "write", 1293 1293 preq.sector_number, 1294 1294 preq.sector_number + preq.nr_sects, ··· 1303 1303 for (i = 0; i < nseg; i++) { 1304 1304 if (((int)preq.sector_number|(int)seg[i].nsec) & 1305 1305 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { 1306 - pr_debug(DRV_PFX "Misaligned I/O request from domain %d", 1306 + pr_debug("Misaligned I/O request from domain %d\n", 1307 1307 blkif->domid); 1308 1308 goto fail_response; 1309 1309 }
-6
drivers/block/xen-blkback/common.h
··· 44 44 #include <xen/interface/io/blkif.h> 45 45 #include <xen/interface/io/protocols.h> 46 46 47 - #define DRV_PFX "xen-blkback:" 48 - #define DPRINTK(fmt, args...) \ 49 - pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ 50 - __func__, __LINE__, ##args) 51 - 52 - 53 47 /* 54 48 * This is the maximum number of segments that would be allowed in indirect 55 49 * requests. This value will also be passed to the frontend.
+23 -15
drivers/block/xen-blkback/xenbus.c
··· 14 14 15 15 */ 16 16 17 + #define pr_fmt(fmt) "xen-blkback: " fmt 18 + 17 19 #include <stdarg.h> 18 20 #include <linux/module.h> 19 21 #include <linux/kthread.h> 20 22 #include <xen/events.h> 21 23 #include <xen/grant_table.h> 22 24 #include "common.h" 25 + 26 + /* Enlarge the array size in order to fully show blkback name. */ 27 + #define BLKBACK_NAME_LEN (20) 23 28 24 29 struct backend_info { 25 30 struct xenbus_device *dev; ··· 75 70 else 76 71 devname = devpath; 77 72 78 - snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname); 73 + snprintf(buf, BLKBACK_NAME_LEN, "blkback.%d.%s", blkif->domid, devname); 79 74 kfree(devpath); 80 75 81 76 return 0; ··· 84 79 static void xen_update_blkif_status(struct xen_blkif *blkif) 85 80 { 86 81 int err; 87 - char name[TASK_COMM_LEN]; 82 + char name[BLKBACK_NAME_LEN]; 88 83 89 84 /* Not ready to connect? */ 90 85 if (!blkif->irq || !blkif->vbd.bdev) ··· 429 424 FMODE_READ : FMODE_WRITE, NULL); 430 425 431 426 if (IS_ERR(bdev)) { 432 - DPRINTK("xen_vbd_create: device %08x could not be opened.\n", 427 + pr_warn("xen_vbd_create: device %08x could not be opened\n", 433 428 vbd->pdevice); 434 429 return -ENOENT; 435 430 } 436 431 437 432 vbd->bdev = bdev; 438 433 if (vbd->bdev->bd_disk == NULL) { 439 - DPRINTK("xen_vbd_create: device %08x doesn't exist.\n", 434 + pr_warn("xen_vbd_create: device %08x doesn't exist\n", 440 435 vbd->pdevice); 441 436 xen_vbd_free(vbd); 442 437 return -ENOENT; ··· 455 450 if (q && blk_queue_secdiscard(q)) 456 451 vbd->discard_secure = true; 457 452 458 - DPRINTK("Successful creation of handle=%04x (dom=%u)\n", 453 + pr_debug("Successful creation of handle=%04x (dom=%u)\n", 459 454 handle, blkif->domid); 460 455 return 0; 461 456 } ··· 463 458 { 464 459 struct backend_info *be = dev_get_drvdata(&dev->dev); 465 460 466 - DPRINTK(""); 461 + pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); 467 462 468 463 if (be->major || be->minor) 469 464 xenvbd_sysfs_delif(dev); ··· 569 564 int err; 570 565 struct backend_info *be = kzalloc(sizeof(struct backend_info), 571 566 GFP_KERNEL); 567 + 568 + /* match the pr_debug in xen_blkbk_remove */ 569 + pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); 570 + 572 571 if (!be) { 573 572 xenbus_dev_fatal(dev, -ENOMEM, 574 573 "allocating backend structure"); ··· 604 595 return 0; 605 596 606 597 fail: 607 - DPRINTK("failed"); 598 + pr_warn("%s failed\n", __func__); 608 599 xen_blkbk_remove(dev); 609 600 return err; 610 601 } ··· 628 619 unsigned long handle; 629 620 char *device_type; 630 621 631 - DPRINTK(""); 622 + pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); 632 623 633 624 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x", 634 625 &major, &minor); ··· 647 638 648 639 if (be->major | be->minor) { 649 640 if (be->major != major || be->minor != minor) 650 - pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", 641 + pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n", 651 642 be->major, be->minor, major, minor); 652 643 return; 653 644 } ··· 708 699 struct backend_info *be = dev_get_drvdata(&dev->dev); 709 700 int err; 710 701 711 - DPRINTK("%s", xenbus_strstate(frontend_state)); 702 + pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state)); 712 703 713 704 switch (frontend_state) { 714 705 case XenbusStateInitialising: 715 706 if (dev->state == XenbusStateClosed) { 716 - pr_info(DRV_PFX "%s: prepare for reconnect\n", 717 - dev->nodename); 707 + pr_info("%s: prepare for reconnect\n", dev->nodename); 718 708 xenbus_switch_state(dev, XenbusStateInitWait); 719 709 } 720 710 break; ··· 780 772 int err; 781 773 struct xenbus_device *dev = be->dev; 782 774 783 - DPRINTK("%s", dev->otherend); 775 + pr_debug("%s %s\n", __func__, dev->otherend); 784 776 785 777 /* Supply the information about the device the frontend needs */ 786 778 again: ··· 866 858 char protocol[64] = ""; 867 859 int err; 868 860 869 - DPRINTK("%s", dev->otherend); 861 + pr_debug("%s %s\n", __func__, dev->otherend); 870 862 871 863 err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", 872 864 &ring_ref, "event-channel", "%u", &evtchn, NULL); ··· 901 893 be->blkif->vbd.feature_gnt_persistent = pers_grants; 902 894 be->blkif->vbd.overflow_max_grants = 0; 903 895 904 - pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n", 896 + pr_info("ring-ref %ld, event-channel %d, protocol %d (%s) %s\n", 905 897 ring_ref, evtchn, be->blkif->blk_protocol, protocol, 906 898 pers_grants ? "persistent grants" : ""); 907 899
-46
include/linux/nbd.h
··· 1 - /* 2 - * 1999 Copyright (C) Pavel Machek, pavel@ucw.cz. This code is GPL. 3 - * 1999/11/04 Copyright (C) 1999 VMware, Inc. (Regis "HPReg" Duchesne) 4 - * Made nbd_end_request() use the io_request_lock 5 - * 2001 Copyright (C) Steven Whitehouse 6 - * New nbd_end_request() for compatibility with new linux block 7 - * layer code. 8 - * 2003/06/24 Louis D. Langholtz <ldl@aros.net> 9 - * Removed unneeded blksize_bits field from nbd_device struct. 10 - * Cleanup PARANOIA usage & code. 11 - * 2004/02/19 Paul Clements 12 - * Removed PARANOIA, plus various cleanup and comments 13 - */ 14 - #ifndef LINUX_NBD_H 15 - #define LINUX_NBD_H 16 - 17 - 18 - #include <linux/wait.h> 19 - #include <linux/mutex.h> 20 - #include <uapi/linux/nbd.h> 21 - 22 - struct request; 23 - 24 - struct nbd_device { 25 - int flags; 26 - int harderror; /* Code of hard error */ 27 - struct socket * sock; /* If == NULL, device is not ready, yet */ 28 - int magic; 29 - 30 - spinlock_t queue_lock; 31 - struct list_head queue_head; /* Requests waiting result */ 32 - struct request *active_req; 33 - wait_queue_head_t active_wq; 34 - struct list_head waiting_queue; /* Requests to be sent */ 35 - wait_queue_head_t waiting_wq; 36 - 37 - struct mutex tx_lock; 38 - struct gendisk *disk; 39 - int blksize; 40 - u64 bytesize; 41 - pid_t pid; /* pid of nbd-client, if attached */ 42 - int xmit_timeout; 43 - int disconnect; /* a disconnect has been requested by user */ 44 - }; 45 - 46 - #endif
+3 -2
include/linux/nvme.h
··· 117 117 118 118 unsigned ns_id; 119 119 int lba_shift; 120 - int ms; 121 - int pi_type; 120 + u16 ms; 121 + bool ext; 122 + u8 pi_type; 122 123 u64 mode_select_num_blocks; 123 124 u32 mode_select_block_len; 124 125 };