Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.17-rc5 2977 lines 76 kB view raw
1/* 2 * NVM Express device driver 3 * Copyright (c) 2011-2014, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15#include <linux/nvme.h> 16#include <linux/bio.h> 17#include <linux/bitops.h> 18#include <linux/blkdev.h> 19#include <linux/cpu.h> 20#include <linux/delay.h> 21#include <linux/errno.h> 22#include <linux/fs.h> 23#include <linux/genhd.h> 24#include <linux/hdreg.h> 25#include <linux/idr.h> 26#include <linux/init.h> 27#include <linux/interrupt.h> 28#include <linux/io.h> 29#include <linux/kdev_t.h> 30#include <linux/kthread.h> 31#include <linux/kernel.h> 32#include <linux/mm.h> 33#include <linux/module.h> 34#include <linux/moduleparam.h> 35#include <linux/pci.h> 36#include <linux/percpu.h> 37#include <linux/poison.h> 38#include <linux/ptrace.h> 39#include <linux/sched.h> 40#include <linux/slab.h> 41#include <linux/types.h> 42#include <scsi/sg.h> 43#include <asm-generic/io-64-nonatomic-lo-hi.h> 44 45#include <trace/events/block.h> 46 47#define NVME_Q_DEPTH 1024 48#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 49#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 50#define ADMIN_TIMEOUT (admin_timeout * HZ) 51#define IOD_TIMEOUT (retry_time * HZ) 52 53static unsigned char admin_timeout = 60; 54module_param(admin_timeout, byte, 0644); 55MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 56 57unsigned char nvme_io_timeout = 30; 58module_param_named(io_timeout, nvme_io_timeout, byte, 0644); 59MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 60 61static unsigned char retry_time = 30; 62module_param(retry_time, byte, 0644); 63MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O"); 64 65static int nvme_major; 66module_param(nvme_major, int, 0); 67 68static int use_threaded_interrupts; 69module_param(use_threaded_interrupts, int, 0); 70 71static DEFINE_SPINLOCK(dev_list_lock); 72static LIST_HEAD(dev_list); 73static struct task_struct *nvme_thread; 74static struct workqueue_struct *nvme_workq; 75static wait_queue_head_t nvme_kthread_wait; 76static struct notifier_block nvme_nb; 77 78static void nvme_reset_failed_dev(struct work_struct *ws); 79 80struct async_cmd_info { 81 struct kthread_work work; 82 struct kthread_worker *worker; 83 u32 result; 84 int status; 85 void *ctx; 86}; 87 88/* 89 * An NVM Express queue. Each device has at least two (one for admin 90 * commands and one for I/O commands). 91 */ 92struct nvme_queue { 93 struct rcu_head r_head; 94 struct device *q_dmadev; 95 struct nvme_dev *dev; 96 char irqname[24]; /* nvme4294967295-65535\0 */ 97 spinlock_t q_lock; 98 struct nvme_command *sq_cmds; 99 volatile struct nvme_completion *cqes; 100 dma_addr_t sq_dma_addr; 101 dma_addr_t cq_dma_addr; 102 wait_queue_head_t sq_full; 103 wait_queue_t sq_cong_wait; 104 struct bio_list sq_cong; 105 struct list_head iod_bio; 106 u32 __iomem *q_db; 107 u16 q_depth; 108 u16 cq_vector; 109 u16 sq_head; 110 u16 sq_tail; 111 u16 cq_head; 112 u16 qid; 113 u8 cq_phase; 114 u8 cqe_seen; 115 u8 q_suspended; 116 cpumask_var_t cpu_mask; 117 struct async_cmd_info cmdinfo; 118 unsigned long cmdid_data[]; 119}; 120 121/* 122 * Check we didin't inadvertently grow the command struct 123 */ 124static inline void _nvme_check_size(void) 125{ 126 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 127 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 128 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 129 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 130 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 131 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 132 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 133 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 134 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 135 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 136 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 137 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 138} 139 140typedef void (*nvme_completion_fn)(struct nvme_queue *, void *, 141 struct nvme_completion *); 142 143struct nvme_cmd_info { 144 nvme_completion_fn fn; 145 void *ctx; 146 unsigned long timeout; 147 int aborted; 148}; 149 150static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) 151{ 152 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; 153} 154 155static unsigned nvme_queue_extra(int depth) 156{ 157 return DIV_ROUND_UP(depth, 8) + (depth * sizeof(struct nvme_cmd_info)); 158} 159 160/** 161 * alloc_cmdid() - Allocate a Command ID 162 * @nvmeq: The queue that will be used for this command 163 * @ctx: A pointer that will be passed to the handler 164 * @handler: The function to call on completion 165 * 166 * Allocate a Command ID for a queue. The data passed in will 167 * be passed to the completion handler. This is implemented by using 168 * the bottom two bits of the ctx pointer to store the handler ID. 169 * Passing in a pointer that's not 4-byte aligned will cause a BUG. 170 * We can change this if it becomes a problem. 171 * 172 * May be called with local interrupts disabled and the q_lock held, 173 * or with interrupts enabled and no locks held. 174 */ 175static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, 176 nvme_completion_fn handler, unsigned timeout) 177{ 178 int depth = nvmeq->q_depth - 1; 179 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 180 int cmdid; 181 182 do { 183 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); 184 if (cmdid >= depth) 185 return -EBUSY; 186 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); 187 188 info[cmdid].fn = handler; 189 info[cmdid].ctx = ctx; 190 info[cmdid].timeout = jiffies + timeout; 191 info[cmdid].aborted = 0; 192 return cmdid; 193} 194 195static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, 196 nvme_completion_fn handler, unsigned timeout) 197{ 198 int cmdid; 199 wait_event_killable(nvmeq->sq_full, 200 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); 201 return (cmdid < 0) ? -EINTR : cmdid; 202} 203 204/* Special values must be less than 0x1000 */ 205#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA) 206#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) 207#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 208#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 209#define CMD_CTX_ABORT (0x318 + CMD_CTX_BASE) 210 211static void special_completion(struct nvme_queue *nvmeq, void *ctx, 212 struct nvme_completion *cqe) 213{ 214 if (ctx == CMD_CTX_CANCELLED) 215 return; 216 if (ctx == CMD_CTX_ABORT) { 217 ++nvmeq->dev->abort_limit; 218 return; 219 } 220 if (ctx == CMD_CTX_COMPLETED) { 221 dev_warn(nvmeq->q_dmadev, 222 "completed id %d twice on queue %d\n", 223 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 224 return; 225 } 226 if (ctx == CMD_CTX_INVALID) { 227 dev_warn(nvmeq->q_dmadev, 228 "invalid id %d completed on queue %d\n", 229 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 230 return; 231 } 232 233 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx); 234} 235 236static void async_completion(struct nvme_queue *nvmeq, void *ctx, 237 struct nvme_completion *cqe) 238{ 239 struct async_cmd_info *cmdinfo = ctx; 240 cmdinfo->result = le32_to_cpup(&cqe->result); 241 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; 242 queue_kthread_work(cmdinfo->worker, &cmdinfo->work); 243} 244 245/* 246 * Called with local interrupts disabled and the q_lock held. May not sleep. 247 */ 248static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, 249 nvme_completion_fn *fn) 250{ 251 void *ctx; 252 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 253 254 if (cmdid >= nvmeq->q_depth || !info[cmdid].fn) { 255 if (fn) 256 *fn = special_completion; 257 return CMD_CTX_INVALID; 258 } 259 if (fn) 260 *fn = info[cmdid].fn; 261 ctx = info[cmdid].ctx; 262 info[cmdid].fn = special_completion; 263 info[cmdid].ctx = CMD_CTX_COMPLETED; 264 clear_bit(cmdid, nvmeq->cmdid_data); 265 wake_up(&nvmeq->sq_full); 266 return ctx; 267} 268 269static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, 270 nvme_completion_fn *fn) 271{ 272 void *ctx; 273 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 274 if (fn) 275 *fn = info[cmdid].fn; 276 ctx = info[cmdid].ctx; 277 info[cmdid].fn = special_completion; 278 info[cmdid].ctx = CMD_CTX_CANCELLED; 279 return ctx; 280} 281 282static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid) 283{ 284 return rcu_dereference_raw(dev->queues[qid]); 285} 286 287static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU) 288{ 289 struct nvme_queue *nvmeq; 290 unsigned queue_id = get_cpu_var(*dev->io_queue); 291 292 rcu_read_lock(); 293 nvmeq = rcu_dereference(dev->queues[queue_id]); 294 if (nvmeq) 295 return nvmeq; 296 297 rcu_read_unlock(); 298 put_cpu_var(*dev->io_queue); 299 return NULL; 300} 301 302static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU) 303{ 304 rcu_read_unlock(); 305 put_cpu_var(nvmeq->dev->io_queue); 306} 307 308static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx) 309 __acquires(RCU) 310{ 311 struct nvme_queue *nvmeq; 312 313 rcu_read_lock(); 314 nvmeq = rcu_dereference(dev->queues[q_idx]); 315 if (nvmeq) 316 return nvmeq; 317 318 rcu_read_unlock(); 319 return NULL; 320} 321 322static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU) 323{ 324 rcu_read_unlock(); 325} 326 327/** 328 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 329 * @nvmeq: The queue to use 330 * @cmd: The command to send 331 * 332 * Safe to use from interrupt context 333 */ 334static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) 335{ 336 unsigned long flags; 337 u16 tail; 338 spin_lock_irqsave(&nvmeq->q_lock, flags); 339 if (nvmeq->q_suspended) { 340 spin_unlock_irqrestore(&nvmeq->q_lock, flags); 341 return -EBUSY; 342 } 343 tail = nvmeq->sq_tail; 344 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 345 if (++tail == nvmeq->q_depth) 346 tail = 0; 347 writel(tail, nvmeq->q_db); 348 nvmeq->sq_tail = tail; 349 spin_unlock_irqrestore(&nvmeq->q_lock, flags); 350 351 return 0; 352} 353 354static __le64 **iod_list(struct nvme_iod *iod) 355{ 356 return ((void *)iod) + iod->offset; 357} 358 359/* 360 * Will slightly overestimate the number of pages needed. This is OK 361 * as it only leads to a small amount of wasted memory for the lifetime of 362 * the I/O. 363 */ 364static int nvme_npages(unsigned size) 365{ 366 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE); 367 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 368} 369 370static struct nvme_iod * 371nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) 372{ 373 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + 374 sizeof(__le64 *) * nvme_npages(nbytes) + 375 sizeof(struct scatterlist) * nseg, gfp); 376 377 if (iod) { 378 iod->offset = offsetof(struct nvme_iod, sg[nseg]); 379 iod->npages = -1; 380 iod->length = nbytes; 381 iod->nents = 0; 382 iod->first_dma = 0ULL; 383 iod->start_time = jiffies; 384 } 385 386 return iod; 387} 388 389void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) 390{ 391 const int last_prp = PAGE_SIZE / 8 - 1; 392 int i; 393 __le64 **list = iod_list(iod); 394 dma_addr_t prp_dma = iod->first_dma; 395 396 if (iod->npages == 0) 397 dma_pool_free(dev->prp_small_pool, list[0], prp_dma); 398 for (i = 0; i < iod->npages; i++) { 399 __le64 *prp_list = list[i]; 400 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 401 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 402 prp_dma = next_prp_dma; 403 } 404 kfree(iod); 405} 406 407static void nvme_start_io_acct(struct bio *bio) 408{ 409 struct gendisk *disk = bio->bi_bdev->bd_disk; 410 if (blk_queue_io_stat(disk->queue)) { 411 const int rw = bio_data_dir(bio); 412 int cpu = part_stat_lock(); 413 part_round_stats(cpu, &disk->part0); 414 part_stat_inc(cpu, &disk->part0, ios[rw]); 415 part_stat_add(cpu, &disk->part0, sectors[rw], 416 bio_sectors(bio)); 417 part_inc_in_flight(&disk->part0, rw); 418 part_stat_unlock(); 419 } 420} 421 422static void nvme_end_io_acct(struct bio *bio, unsigned long start_time) 423{ 424 struct gendisk *disk = bio->bi_bdev->bd_disk; 425 if (blk_queue_io_stat(disk->queue)) { 426 const int rw = bio_data_dir(bio); 427 unsigned long duration = jiffies - start_time; 428 int cpu = part_stat_lock(); 429 part_stat_add(cpu, &disk->part0, ticks[rw], duration); 430 part_round_stats(cpu, &disk->part0); 431 part_dec_in_flight(&disk->part0, rw); 432 part_stat_unlock(); 433 } 434} 435 436static void bio_completion(struct nvme_queue *nvmeq, void *ctx, 437 struct nvme_completion *cqe) 438{ 439 struct nvme_iod *iod = ctx; 440 struct bio *bio = iod->private; 441 u16 status = le16_to_cpup(&cqe->status) >> 1; 442 int error = 0; 443 444 if (unlikely(status)) { 445 if (!(status & NVME_SC_DNR || 446 bio->bi_rw & REQ_FAILFAST_MASK) && 447 (jiffies - iod->start_time) < IOD_TIMEOUT) { 448 if (!waitqueue_active(&nvmeq->sq_full)) 449 add_wait_queue(&nvmeq->sq_full, 450 &nvmeq->sq_cong_wait); 451 list_add_tail(&iod->node, &nvmeq->iod_bio); 452 wake_up(&nvmeq->sq_full); 453 return; 454 } 455 error = -EIO; 456 } 457 if (iod->nents) { 458 dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents, 459 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 460 nvme_end_io_acct(bio, iod->start_time); 461 } 462 nvme_free_iod(nvmeq->dev, iod); 463 464 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio, error); 465 bio_endio(bio, error); 466} 467 468/* length is in bytes. gfp flags indicates whether we may sleep. */ 469int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len, 470 gfp_t gfp) 471{ 472 struct dma_pool *pool; 473 int length = total_len; 474 struct scatterlist *sg = iod->sg; 475 int dma_len = sg_dma_len(sg); 476 u64 dma_addr = sg_dma_address(sg); 477 int offset = offset_in_page(dma_addr); 478 __le64 *prp_list; 479 __le64 **list = iod_list(iod); 480 dma_addr_t prp_dma; 481 int nprps, i; 482 483 length -= (PAGE_SIZE - offset); 484 if (length <= 0) 485 return total_len; 486 487 dma_len -= (PAGE_SIZE - offset); 488 if (dma_len) { 489 dma_addr += (PAGE_SIZE - offset); 490 } else { 491 sg = sg_next(sg); 492 dma_addr = sg_dma_address(sg); 493 dma_len = sg_dma_len(sg); 494 } 495 496 if (length <= PAGE_SIZE) { 497 iod->first_dma = dma_addr; 498 return total_len; 499 } 500 501 nprps = DIV_ROUND_UP(length, PAGE_SIZE); 502 if (nprps <= (256 / 8)) { 503 pool = dev->prp_small_pool; 504 iod->npages = 0; 505 } else { 506 pool = dev->prp_page_pool; 507 iod->npages = 1; 508 } 509 510 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 511 if (!prp_list) { 512 iod->first_dma = dma_addr; 513 iod->npages = -1; 514 return (total_len - length) + PAGE_SIZE; 515 } 516 list[0] = prp_list; 517 iod->first_dma = prp_dma; 518 i = 0; 519 for (;;) { 520 if (i == PAGE_SIZE / 8) { 521 __le64 *old_prp_list = prp_list; 522 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 523 if (!prp_list) 524 return total_len - length; 525 list[iod->npages++] = prp_list; 526 prp_list[0] = old_prp_list[i - 1]; 527 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 528 i = 1; 529 } 530 prp_list[i++] = cpu_to_le64(dma_addr); 531 dma_len -= PAGE_SIZE; 532 dma_addr += PAGE_SIZE; 533 length -= PAGE_SIZE; 534 if (length <= 0) 535 break; 536 if (dma_len > 0) 537 continue; 538 BUG_ON(dma_len < 0); 539 sg = sg_next(sg); 540 dma_addr = sg_dma_address(sg); 541 dma_len = sg_dma_len(sg); 542 } 543 544 return total_len; 545} 546 547static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, 548 int len) 549{ 550 struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL); 551 if (!split) 552 return -ENOMEM; 553 554 trace_block_split(bdev_get_queue(bio->bi_bdev), bio, 555 split->bi_iter.bi_sector); 556 bio_chain(split, bio); 557 558 if (!waitqueue_active(&nvmeq->sq_full)) 559 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 560 bio_list_add(&nvmeq->sq_cong, split); 561 bio_list_add(&nvmeq->sq_cong, bio); 562 wake_up(&nvmeq->sq_full); 563 564 return 0; 565} 566 567/* NVMe scatterlists require no holes in the virtual address */ 568#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ 569 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) 570 571static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, 572 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 573{ 574 struct bio_vec bvec, bvprv; 575 struct bvec_iter iter; 576 struct scatterlist *sg = NULL; 577 int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; 578 int first = 1; 579 580 if (nvmeq->dev->stripe_size) 581 split_len = nvmeq->dev->stripe_size - 582 ((bio->bi_iter.bi_sector << 9) & 583 (nvmeq->dev->stripe_size - 1)); 584 585 sg_init_table(iod->sg, psegs); 586 bio_for_each_segment(bvec, bio, iter) { 587 if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) { 588 sg->length += bvec.bv_len; 589 } else { 590 if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec)) 591 return nvme_split_and_submit(bio, nvmeq, 592 length); 593 594 sg = sg ? sg + 1 : iod->sg; 595 sg_set_page(sg, bvec.bv_page, 596 bvec.bv_len, bvec.bv_offset); 597 nsegs++; 598 } 599 600 if (split_len - length < bvec.bv_len) 601 return nvme_split_and_submit(bio, nvmeq, split_len); 602 length += bvec.bv_len; 603 bvprv = bvec; 604 first = 0; 605 } 606 iod->nents = nsegs; 607 sg_mark_end(sg); 608 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) 609 return -ENOMEM; 610 611 BUG_ON(length != bio->bi_iter.bi_size); 612 return length; 613} 614 615static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, 616 struct bio *bio, struct nvme_iod *iod, int cmdid) 617{ 618 struct nvme_dsm_range *range = 619 (struct nvme_dsm_range *)iod_list(iod)[0]; 620 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 621 622 range->cattr = cpu_to_le32(0); 623 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift); 624 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); 625 626 memset(cmnd, 0, sizeof(*cmnd)); 627 cmnd->dsm.opcode = nvme_cmd_dsm; 628 cmnd->dsm.command_id = cmdid; 629 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 630 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); 631 cmnd->dsm.nr = 0; 632 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 633 634 if (++nvmeq->sq_tail == nvmeq->q_depth) 635 nvmeq->sq_tail = 0; 636 writel(nvmeq->sq_tail, nvmeq->q_db); 637 638 return 0; 639} 640 641static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, 642 int cmdid) 643{ 644 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 645 646 memset(cmnd, 0, sizeof(*cmnd)); 647 cmnd->common.opcode = nvme_cmd_flush; 648 cmnd->common.command_id = cmdid; 649 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 650 651 if (++nvmeq->sq_tail == nvmeq->q_depth) 652 nvmeq->sq_tail = 0; 653 writel(nvmeq->sq_tail, nvmeq->q_db); 654 655 return 0; 656} 657 658static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod) 659{ 660 struct bio *bio = iod->private; 661 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 662 struct nvme_command *cmnd; 663 int cmdid; 664 u16 control; 665 u32 dsmgmt; 666 667 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); 668 if (unlikely(cmdid < 0)) 669 return cmdid; 670 671 if (bio->bi_rw & REQ_DISCARD) 672 return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); 673 if (bio->bi_rw & REQ_FLUSH) 674 return nvme_submit_flush(nvmeq, ns, cmdid); 675 676 control = 0; 677 if (bio->bi_rw & REQ_FUA) 678 control |= NVME_RW_FUA; 679 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 680 control |= NVME_RW_LR; 681 682 dsmgmt = 0; 683 if (bio->bi_rw & REQ_RAHEAD) 684 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 685 686 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 687 memset(cmnd, 0, sizeof(*cmnd)); 688 689 cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read; 690 cmnd->rw.command_id = cmdid; 691 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 692 cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 693 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); 694 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); 695 cmnd->rw.length = 696 cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1); 697 cmnd->rw.control = cpu_to_le16(control); 698 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 699 700 if (++nvmeq->sq_tail == nvmeq->q_depth) 701 nvmeq->sq_tail = 0; 702 writel(nvmeq->sq_tail, nvmeq->q_db); 703 704 return 0; 705} 706 707static int nvme_split_flush_data(struct nvme_queue *nvmeq, struct bio *bio) 708{ 709 struct bio *split = bio_clone(bio, GFP_ATOMIC); 710 if (!split) 711 return -ENOMEM; 712 713 split->bi_iter.bi_size = 0; 714 split->bi_phys_segments = 0; 715 bio->bi_rw &= ~REQ_FLUSH; 716 bio_chain(split, bio); 717 718 if (!waitqueue_active(&nvmeq->sq_full)) 719 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 720 bio_list_add(&nvmeq->sq_cong, split); 721 bio_list_add(&nvmeq->sq_cong, bio); 722 wake_up_process(nvme_thread); 723 724 return 0; 725} 726 727/* 728 * Called with local interrupts disabled and the q_lock held. May not sleep. 729 */ 730static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 731 struct bio *bio) 732{ 733 struct nvme_iod *iod; 734 int psegs = bio_phys_segments(ns->queue, bio); 735 int result; 736 737 if ((bio->bi_rw & REQ_FLUSH) && psegs) 738 return nvme_split_flush_data(nvmeq, bio); 739 740 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC); 741 if (!iod) 742 return -ENOMEM; 743 744 iod->private = bio; 745 if (bio->bi_rw & REQ_DISCARD) { 746 void *range; 747 /* 748 * We reuse the small pool to allocate the 16-byte range here 749 * as it is not worth having a special pool for these or 750 * additional cases to handle freeing the iod. 751 */ 752 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, 753 GFP_ATOMIC, 754 &iod->first_dma); 755 if (!range) { 756 result = -ENOMEM; 757 goto free_iod; 758 } 759 iod_list(iod)[0] = (__le64 *)range; 760 iod->npages = 0; 761 } else if (psegs) { 762 result = nvme_map_bio(nvmeq, iod, bio, 763 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 764 psegs); 765 if (result <= 0) 766 goto free_iod; 767 if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) != 768 result) { 769 result = -ENOMEM; 770 goto free_iod; 771 } 772 nvme_start_io_acct(bio); 773 } 774 if (unlikely(nvme_submit_iod(nvmeq, iod))) { 775 if (!waitqueue_active(&nvmeq->sq_full)) 776 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 777 list_add_tail(&iod->node, &nvmeq->iod_bio); 778 } 779 return 0; 780 781 free_iod: 782 nvme_free_iod(nvmeq->dev, iod); 783 return result; 784} 785 786static int nvme_process_cq(struct nvme_queue *nvmeq) 787{ 788 u16 head, phase; 789 790 head = nvmeq->cq_head; 791 phase = nvmeq->cq_phase; 792 793 for (;;) { 794 void *ctx; 795 nvme_completion_fn fn; 796 struct nvme_completion cqe = nvmeq->cqes[head]; 797 if ((le16_to_cpu(cqe.status) & 1) != phase) 798 break; 799 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); 800 if (++head == nvmeq->q_depth) { 801 head = 0; 802 phase = !phase; 803 } 804 805 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); 806 fn(nvmeq, ctx, &cqe); 807 } 808 809 /* If the controller ignores the cq head doorbell and continuously 810 * writes to the queue, it is theoretically possible to wrap around 811 * the queue twice and mistakenly return IRQ_NONE. Linux only 812 * requires that 0.1% of your interrupts are handled, so this isn't 813 * a big problem. 814 */ 815 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 816 return 0; 817 818 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 819 nvmeq->cq_head = head; 820 nvmeq->cq_phase = phase; 821 822 nvmeq->cqe_seen = 1; 823 return 1; 824} 825 826static void nvme_make_request(struct request_queue *q, struct bio *bio) 827{ 828 struct nvme_ns *ns = q->queuedata; 829 struct nvme_queue *nvmeq = get_nvmeq(ns->dev); 830 int result = -EBUSY; 831 832 if (!nvmeq) { 833 bio_endio(bio, -EIO); 834 return; 835 } 836 837 spin_lock_irq(&nvmeq->q_lock); 838 if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong)) 839 result = nvme_submit_bio_queue(nvmeq, ns, bio); 840 if (unlikely(result)) { 841 if (!waitqueue_active(&nvmeq->sq_full)) 842 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 843 bio_list_add(&nvmeq->sq_cong, bio); 844 } 845 846 nvme_process_cq(nvmeq); 847 spin_unlock_irq(&nvmeq->q_lock); 848 put_nvmeq(nvmeq); 849} 850 851static irqreturn_t nvme_irq(int irq, void *data) 852{ 853 irqreturn_t result; 854 struct nvme_queue *nvmeq = data; 855 spin_lock(&nvmeq->q_lock); 856 nvme_process_cq(nvmeq); 857 result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE; 858 nvmeq->cqe_seen = 0; 859 spin_unlock(&nvmeq->q_lock); 860 return result; 861} 862 863static irqreturn_t nvme_irq_check(int irq, void *data) 864{ 865 struct nvme_queue *nvmeq = data; 866 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; 867 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) 868 return IRQ_NONE; 869 return IRQ_WAKE_THREAD; 870} 871 872static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) 873{ 874 spin_lock_irq(&nvmeq->q_lock); 875 cancel_cmdid(nvmeq, cmdid, NULL); 876 spin_unlock_irq(&nvmeq->q_lock); 877} 878 879struct sync_cmd_info { 880 struct task_struct *task; 881 u32 result; 882 int status; 883}; 884 885static void sync_completion(struct nvme_queue *nvmeq, void *ctx, 886 struct nvme_completion *cqe) 887{ 888 struct sync_cmd_info *cmdinfo = ctx; 889 cmdinfo->result = le32_to_cpup(&cqe->result); 890 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; 891 wake_up_process(cmdinfo->task); 892} 893 894/* 895 * Returns 0 on success. If the result is negative, it's a Linux error code; 896 * if the result is positive, it's an NVM Express status code 897 */ 898static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx, 899 struct nvme_command *cmd, 900 u32 *result, unsigned timeout) 901{ 902 int cmdid, ret; 903 struct sync_cmd_info cmdinfo; 904 struct nvme_queue *nvmeq; 905 906 nvmeq = lock_nvmeq(dev, q_idx); 907 if (!nvmeq) 908 return -ENODEV; 909 910 cmdinfo.task = current; 911 cmdinfo.status = -EINTR; 912 913 cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout); 914 if (cmdid < 0) { 915 unlock_nvmeq(nvmeq); 916 return cmdid; 917 } 918 cmd->common.command_id = cmdid; 919 920 set_current_state(TASK_KILLABLE); 921 ret = nvme_submit_cmd(nvmeq, cmd); 922 if (ret) { 923 free_cmdid(nvmeq, cmdid, NULL); 924 unlock_nvmeq(nvmeq); 925 set_current_state(TASK_RUNNING); 926 return ret; 927 } 928 unlock_nvmeq(nvmeq); 929 schedule_timeout(timeout); 930 931 if (cmdinfo.status == -EINTR) { 932 nvmeq = lock_nvmeq(dev, q_idx); 933 if (nvmeq) { 934 nvme_abort_command(nvmeq, cmdid); 935 unlock_nvmeq(nvmeq); 936 } 937 return -EINTR; 938 } 939 940 if (result) 941 *result = cmdinfo.result; 942 943 return cmdinfo.status; 944} 945 946static int nvme_submit_async_cmd(struct nvme_queue *nvmeq, 947 struct nvme_command *cmd, 948 struct async_cmd_info *cmdinfo, unsigned timeout) 949{ 950 int cmdid; 951 952 cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout); 953 if (cmdid < 0) 954 return cmdid; 955 cmdinfo->status = -EINTR; 956 cmd->common.command_id = cmdid; 957 return nvme_submit_cmd(nvmeq, cmd); 958} 959 960int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 961 u32 *result) 962{ 963 return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT); 964} 965 966int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 967 u32 *result) 968{ 969 return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result, 970 NVME_IO_TIMEOUT); 971} 972 973static int nvme_submit_admin_cmd_async(struct nvme_dev *dev, 974 struct nvme_command *cmd, struct async_cmd_info *cmdinfo) 975{ 976 return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo, 977 ADMIN_TIMEOUT); 978} 979 980static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 981{ 982 int status; 983 struct nvme_command c; 984 985 memset(&c, 0, sizeof(c)); 986 c.delete_queue.opcode = opcode; 987 c.delete_queue.qid = cpu_to_le16(id); 988 989 status = nvme_submit_admin_cmd(dev, &c, NULL); 990 if (status) 991 return -EIO; 992 return 0; 993} 994 995static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 996 struct nvme_queue *nvmeq) 997{ 998 int status; 999 struct nvme_command c; 1000 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 1001 1002 memset(&c, 0, sizeof(c)); 1003 c.create_cq.opcode = nvme_admin_create_cq; 1004 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 1005 c.create_cq.cqid = cpu_to_le16(qid); 1006 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 1007 c.create_cq.cq_flags = cpu_to_le16(flags); 1008 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 1009 1010 status = nvme_submit_admin_cmd(dev, &c, NULL); 1011 if (status) 1012 return -EIO; 1013 return 0; 1014} 1015 1016static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 1017 struct nvme_queue *nvmeq) 1018{ 1019 int status; 1020 struct nvme_command c; 1021 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; 1022 1023 memset(&c, 0, sizeof(c)); 1024 c.create_sq.opcode = nvme_admin_create_sq; 1025 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 1026 c.create_sq.sqid = cpu_to_le16(qid); 1027 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 1028 c.create_sq.sq_flags = cpu_to_le16(flags); 1029 c.create_sq.cqid = cpu_to_le16(qid); 1030 1031 status = nvme_submit_admin_cmd(dev, &c, NULL); 1032 if (status) 1033 return -EIO; 1034 return 0; 1035} 1036 1037static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 1038{ 1039 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 1040} 1041 1042static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 1043{ 1044 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 1045} 1046 1047int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, 1048 dma_addr_t dma_addr) 1049{ 1050 struct nvme_command c; 1051 1052 memset(&c, 0, sizeof(c)); 1053 c.identify.opcode = nvme_admin_identify; 1054 c.identify.nsid = cpu_to_le32(nsid); 1055 c.identify.prp1 = cpu_to_le64(dma_addr); 1056 c.identify.cns = cpu_to_le32(cns); 1057 1058 return nvme_submit_admin_cmd(dev, &c, NULL); 1059} 1060 1061int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, 1062 dma_addr_t dma_addr, u32 *result) 1063{ 1064 struct nvme_command c; 1065 1066 memset(&c, 0, sizeof(c)); 1067 c.features.opcode = nvme_admin_get_features; 1068 c.features.nsid = cpu_to_le32(nsid); 1069 c.features.prp1 = cpu_to_le64(dma_addr); 1070 c.features.fid = cpu_to_le32(fid); 1071 1072 return nvme_submit_admin_cmd(dev, &c, result); 1073} 1074 1075int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, 1076 dma_addr_t dma_addr, u32 *result) 1077{ 1078 struct nvme_command c; 1079 1080 memset(&c, 0, sizeof(c)); 1081 c.features.opcode = nvme_admin_set_features; 1082 c.features.prp1 = cpu_to_le64(dma_addr); 1083 c.features.fid = cpu_to_le32(fid); 1084 c.features.dword11 = cpu_to_le32(dword11); 1085 1086 return nvme_submit_admin_cmd(dev, &c, result); 1087} 1088 1089/** 1090 * nvme_abort_cmd - Attempt aborting a command 1091 * @cmdid: Command id of a timed out IO 1092 * @queue: The queue with timed out IO 1093 * 1094 * Schedule controller reset if the command was already aborted once before and 1095 * still hasn't been returned to the driver, or if this is the admin queue. 1096 */ 1097static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq) 1098{ 1099 int a_cmdid; 1100 struct nvme_command cmd; 1101 struct nvme_dev *dev = nvmeq->dev; 1102 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 1103 struct nvme_queue *adminq; 1104 1105 if (!nvmeq->qid || info[cmdid].aborted) { 1106 if (work_busy(&dev->reset_work)) 1107 return; 1108 list_del_init(&dev->node); 1109 dev_warn(&dev->pci_dev->dev, 1110 "I/O %d QID %d timeout, reset controller\n", cmdid, 1111 nvmeq->qid); 1112 dev->reset_workfn = nvme_reset_failed_dev; 1113 queue_work(nvme_workq, &dev->reset_work); 1114 return; 1115 } 1116 1117 if (!dev->abort_limit) 1118 return; 1119 1120 adminq = rcu_dereference(dev->queues[0]); 1121 a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion, 1122 ADMIN_TIMEOUT); 1123 if (a_cmdid < 0) 1124 return; 1125 1126 memset(&cmd, 0, sizeof(cmd)); 1127 cmd.abort.opcode = nvme_admin_abort_cmd; 1128 cmd.abort.cid = cmdid; 1129 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 1130 cmd.abort.command_id = a_cmdid; 1131 1132 --dev->abort_limit; 1133 info[cmdid].aborted = 1; 1134 info[cmdid].timeout = jiffies + ADMIN_TIMEOUT; 1135 1136 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid, 1137 nvmeq->qid); 1138 nvme_submit_cmd(adminq, &cmd); 1139} 1140 1141/** 1142 * nvme_cancel_ios - Cancel outstanding I/Os 1143 * @queue: The queue to cancel I/Os on 1144 * @timeout: True to only cancel I/Os which have timed out 1145 */ 1146static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) 1147{ 1148 int depth = nvmeq->q_depth - 1; 1149 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 1150 unsigned long now = jiffies; 1151 int cmdid; 1152 1153 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { 1154 void *ctx; 1155 nvme_completion_fn fn; 1156 static struct nvme_completion cqe = { 1157 .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), 1158 }; 1159 1160 if (timeout && !time_after(now, info[cmdid].timeout)) 1161 continue; 1162 if (info[cmdid].ctx == CMD_CTX_CANCELLED) 1163 continue; 1164 if (timeout && nvmeq->dev->initialized) { 1165 nvme_abort_cmd(cmdid, nvmeq); 1166 continue; 1167 } 1168 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid, 1169 nvmeq->qid); 1170 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 1171 fn(nvmeq, ctx, &cqe); 1172 } 1173} 1174 1175static void nvme_free_queue(struct rcu_head *r) 1176{ 1177 struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head); 1178 1179 spin_lock_irq(&nvmeq->q_lock); 1180 while (bio_list_peek(&nvmeq->sq_cong)) { 1181 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1182 bio_endio(bio, -EIO); 1183 } 1184 while (!list_empty(&nvmeq->iod_bio)) { 1185 static struct nvme_completion cqe = { 1186 .status = cpu_to_le16( 1187 (NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1), 1188 }; 1189 struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio, 1190 struct nvme_iod, 1191 node); 1192 list_del(&iod->node); 1193 bio_completion(nvmeq, iod, &cqe); 1194 } 1195 spin_unlock_irq(&nvmeq->q_lock); 1196 1197 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 1198 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1199 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 1200 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1201 if (nvmeq->qid) 1202 free_cpumask_var(nvmeq->cpu_mask); 1203 kfree(nvmeq); 1204} 1205 1206static void nvme_free_queues(struct nvme_dev *dev, int lowest) 1207{ 1208 int i; 1209 1210 for (i = dev->queue_count - 1; i >= lowest; i--) { 1211 struct nvme_queue *nvmeq = raw_nvmeq(dev, i); 1212 rcu_assign_pointer(dev->queues[i], NULL); 1213 call_rcu(&nvmeq->r_head, nvme_free_queue); 1214 dev->queue_count--; 1215 } 1216} 1217 1218/** 1219 * nvme_suspend_queue - put queue into suspended state 1220 * @nvmeq - queue to suspend 1221 * 1222 * Returns 1 if already suspended, 0 otherwise. 1223 */ 1224static int nvme_suspend_queue(struct nvme_queue *nvmeq) 1225{ 1226 int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; 1227 1228 spin_lock_irq(&nvmeq->q_lock); 1229 if (nvmeq->q_suspended) { 1230 spin_unlock_irq(&nvmeq->q_lock); 1231 return 1; 1232 } 1233 nvmeq->q_suspended = 1; 1234 nvmeq->dev->online_queues--; 1235 spin_unlock_irq(&nvmeq->q_lock); 1236 1237 irq_set_affinity_hint(vector, NULL); 1238 free_irq(vector, nvmeq); 1239 1240 return 0; 1241} 1242 1243static void nvme_clear_queue(struct nvme_queue *nvmeq) 1244{ 1245 spin_lock_irq(&nvmeq->q_lock); 1246 nvme_process_cq(nvmeq); 1247 nvme_cancel_ios(nvmeq, false); 1248 spin_unlock_irq(&nvmeq->q_lock); 1249} 1250 1251static void nvme_disable_queue(struct nvme_dev *dev, int qid) 1252{ 1253 struct nvme_queue *nvmeq = raw_nvmeq(dev, qid); 1254 1255 if (!nvmeq) 1256 return; 1257 if (nvme_suspend_queue(nvmeq)) 1258 return; 1259 1260 /* Don't tell the adapter to delete the admin queue. 1261 * Don't tell a removed adapter to delete IO queues. */ 1262 if (qid && readl(&dev->bar->csts) != -1) { 1263 adapter_delete_sq(dev, qid); 1264 adapter_delete_cq(dev, qid); 1265 } 1266 nvme_clear_queue(nvmeq); 1267} 1268 1269static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1270 int depth, int vector) 1271{ 1272 struct device *dmadev = &dev->pci_dev->dev; 1273 unsigned extra = nvme_queue_extra(depth); 1274 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 1275 if (!nvmeq) 1276 return NULL; 1277 1278 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), 1279 &nvmeq->cq_dma_addr, GFP_KERNEL); 1280 if (!nvmeq->cqes) 1281 goto free_nvmeq; 1282 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); 1283 1284 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), 1285 &nvmeq->sq_dma_addr, GFP_KERNEL); 1286 if (!nvmeq->sq_cmds) 1287 goto free_cqdma; 1288 1289 if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL)) 1290 goto free_sqdma; 1291 1292 nvmeq->q_dmadev = dmadev; 1293 nvmeq->dev = dev; 1294 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", 1295 dev->instance, qid); 1296 spin_lock_init(&nvmeq->q_lock); 1297 nvmeq->cq_head = 0; 1298 nvmeq->cq_phase = 1; 1299 init_waitqueue_head(&nvmeq->sq_full); 1300 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 1301 bio_list_init(&nvmeq->sq_cong); 1302 INIT_LIST_HEAD(&nvmeq->iod_bio); 1303 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1304 nvmeq->q_depth = depth; 1305 nvmeq->cq_vector = vector; 1306 nvmeq->qid = qid; 1307 nvmeq->q_suspended = 1; 1308 dev->queue_count++; 1309 rcu_assign_pointer(dev->queues[qid], nvmeq); 1310 1311 return nvmeq; 1312 1313 free_sqdma: 1314 dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds, 1315 nvmeq->sq_dma_addr); 1316 free_cqdma: 1317 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, 1318 nvmeq->cq_dma_addr); 1319 free_nvmeq: 1320 kfree(nvmeq); 1321 return NULL; 1322} 1323 1324static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1325 const char *name) 1326{ 1327 if (use_threaded_interrupts) 1328 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, 1329 nvme_irq_check, nvme_irq, IRQF_SHARED, 1330 name, nvmeq); 1331 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, 1332 IRQF_SHARED, name, nvmeq); 1333} 1334 1335static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 1336{ 1337 struct nvme_dev *dev = nvmeq->dev; 1338 unsigned extra = nvme_queue_extra(nvmeq->q_depth); 1339 1340 nvmeq->sq_tail = 0; 1341 nvmeq->cq_head = 0; 1342 nvmeq->cq_phase = 1; 1343 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1344 memset(nvmeq->cmdid_data, 0, extra); 1345 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 1346 nvme_cancel_ios(nvmeq, false); 1347 nvmeq->q_suspended = 0; 1348 dev->online_queues++; 1349} 1350 1351static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) 1352{ 1353 struct nvme_dev *dev = nvmeq->dev; 1354 int result; 1355 1356 result = adapter_alloc_cq(dev, qid, nvmeq); 1357 if (result < 0) 1358 return result; 1359 1360 result = adapter_alloc_sq(dev, qid, nvmeq); 1361 if (result < 0) 1362 goto release_cq; 1363 1364 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1365 if (result < 0) 1366 goto release_sq; 1367 1368 spin_lock_irq(&nvmeq->q_lock); 1369 nvme_init_queue(nvmeq, qid); 1370 spin_unlock_irq(&nvmeq->q_lock); 1371 1372 return result; 1373 1374 release_sq: 1375 adapter_delete_sq(dev, qid); 1376 release_cq: 1377 adapter_delete_cq(dev, qid); 1378 return result; 1379} 1380 1381static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled) 1382{ 1383 unsigned long timeout; 1384 u32 bit = enabled ? NVME_CSTS_RDY : 0; 1385 1386 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1387 1388 while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) { 1389 msleep(100); 1390 if (fatal_signal_pending(current)) 1391 return -EINTR; 1392 if (time_after(jiffies, timeout)) { 1393 dev_err(&dev->pci_dev->dev, 1394 "Device not ready; aborting %s\n", enabled ? 1395 "initialisation" : "reset"); 1396 return -ENODEV; 1397 } 1398 } 1399 1400 return 0; 1401} 1402 1403/* 1404 * If the device has been passed off to us in an enabled state, just clear 1405 * the enabled bit. The spec says we should set the 'shutdown notification 1406 * bits', but doing so may cause the device to complete commands to the 1407 * admin queue ... and we don't know what memory that might be pointing at! 1408 */ 1409static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap) 1410{ 1411 u32 cc = readl(&dev->bar->cc); 1412 1413 if (cc & NVME_CC_ENABLE) 1414 writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc); 1415 return nvme_wait_ready(dev, cap, false); 1416} 1417 1418static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap) 1419{ 1420 return nvme_wait_ready(dev, cap, true); 1421} 1422 1423static int nvme_shutdown_ctrl(struct nvme_dev *dev) 1424{ 1425 unsigned long timeout; 1426 u32 cc; 1427 1428 cc = (readl(&dev->bar->cc) & ~NVME_CC_SHN_MASK) | NVME_CC_SHN_NORMAL; 1429 writel(cc, &dev->bar->cc); 1430 1431 timeout = 2 * HZ + jiffies; 1432 while ((readl(&dev->bar->csts) & NVME_CSTS_SHST_MASK) != 1433 NVME_CSTS_SHST_CMPLT) { 1434 msleep(100); 1435 if (fatal_signal_pending(current)) 1436 return -EINTR; 1437 if (time_after(jiffies, timeout)) { 1438 dev_err(&dev->pci_dev->dev, 1439 "Device shutdown incomplete; abort shutdown\n"); 1440 return -ENODEV; 1441 } 1442 } 1443 1444 return 0; 1445} 1446 1447static int nvme_configure_admin_queue(struct nvme_dev *dev) 1448{ 1449 int result; 1450 u32 aqa; 1451 u64 cap = readq(&dev->bar->cap); 1452 struct nvme_queue *nvmeq; 1453 1454 result = nvme_disable_ctrl(dev, cap); 1455 if (result < 0) 1456 return result; 1457 1458 nvmeq = raw_nvmeq(dev, 0); 1459 if (!nvmeq) { 1460 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 1461 if (!nvmeq) 1462 return -ENOMEM; 1463 } 1464 1465 aqa = nvmeq->q_depth - 1; 1466 aqa |= aqa << 16; 1467 1468 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; 1469 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 1470 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 1471 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1472 1473 writel(aqa, &dev->bar->aqa); 1474 writeq(nvmeq->sq_dma_addr, &dev->bar->asq); 1475 writeq(nvmeq->cq_dma_addr, &dev->bar->acq); 1476 writel(dev->ctrl_config, &dev->bar->cc); 1477 1478 result = nvme_enable_ctrl(dev, cap); 1479 if (result) 1480 return result; 1481 1482 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1483 if (result) 1484 return result; 1485 1486 spin_lock_irq(&nvmeq->q_lock); 1487 nvme_init_queue(nvmeq, 0); 1488 spin_unlock_irq(&nvmeq->q_lock); 1489 return result; 1490} 1491 1492struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 1493 unsigned long addr, unsigned length) 1494{ 1495 int i, err, count, nents, offset; 1496 struct scatterlist *sg; 1497 struct page **pages; 1498 struct nvme_iod *iod; 1499 1500 if (addr & 3) 1501 return ERR_PTR(-EINVAL); 1502 if (!length || length > INT_MAX - PAGE_SIZE) 1503 return ERR_PTR(-EINVAL); 1504 1505 offset = offset_in_page(addr); 1506 count = DIV_ROUND_UP(offset + length, PAGE_SIZE); 1507 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); 1508 if (!pages) 1509 return ERR_PTR(-ENOMEM); 1510 1511 err = get_user_pages_fast(addr, count, 1, pages); 1512 if (err < count) { 1513 count = err; 1514 err = -EFAULT; 1515 goto put_pages; 1516 } 1517 1518 err = -ENOMEM; 1519 iod = nvme_alloc_iod(count, length, GFP_KERNEL); 1520 if (!iod) 1521 goto put_pages; 1522 1523 sg = iod->sg; 1524 sg_init_table(sg, count); 1525 for (i = 0; i < count; i++) { 1526 sg_set_page(&sg[i], pages[i], 1527 min_t(unsigned, length, PAGE_SIZE - offset), 1528 offset); 1529 length -= (PAGE_SIZE - offset); 1530 offset = 0; 1531 } 1532 sg_mark_end(&sg[i - 1]); 1533 iod->nents = count; 1534 1535 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 1536 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1537 if (!nents) 1538 goto free_iod; 1539 1540 kfree(pages); 1541 return iod; 1542 1543 free_iod: 1544 kfree(iod); 1545 put_pages: 1546 for (i = 0; i < count; i++) 1547 put_page(pages[i]); 1548 kfree(pages); 1549 return ERR_PTR(err); 1550} 1551 1552void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 1553 struct nvme_iod *iod) 1554{ 1555 int i; 1556 1557 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 1558 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1559 1560 for (i = 0; i < iod->nents; i++) 1561 put_page(sg_page(&iod->sg[i])); 1562} 1563 1564static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1565{ 1566 struct nvme_dev *dev = ns->dev; 1567 struct nvme_user_io io; 1568 struct nvme_command c; 1569 unsigned length, meta_len; 1570 int status, i; 1571 struct nvme_iod *iod, *meta_iod = NULL; 1572 dma_addr_t meta_dma_addr; 1573 void *meta, *uninitialized_var(meta_mem); 1574 1575 if (copy_from_user(&io, uio, sizeof(io))) 1576 return -EFAULT; 1577 length = (io.nblocks + 1) << ns->lba_shift; 1578 meta_len = (io.nblocks + 1) * ns->ms; 1579 1580 if (meta_len && ((io.metadata & 3) || !io.metadata)) 1581 return -EINVAL; 1582 1583 switch (io.opcode) { 1584 case nvme_cmd_write: 1585 case nvme_cmd_read: 1586 case nvme_cmd_compare: 1587 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); 1588 break; 1589 default: 1590 return -EINVAL; 1591 } 1592 1593 if (IS_ERR(iod)) 1594 return PTR_ERR(iod); 1595 1596 memset(&c, 0, sizeof(c)); 1597 c.rw.opcode = io.opcode; 1598 c.rw.flags = io.flags; 1599 c.rw.nsid = cpu_to_le32(ns->ns_id); 1600 c.rw.slba = cpu_to_le64(io.slba); 1601 c.rw.length = cpu_to_le16(io.nblocks); 1602 c.rw.control = cpu_to_le16(io.control); 1603 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 1604 c.rw.reftag = cpu_to_le32(io.reftag); 1605 c.rw.apptag = cpu_to_le16(io.apptag); 1606 c.rw.appmask = cpu_to_le16(io.appmask); 1607 1608 if (meta_len) { 1609 meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, 1610 meta_len); 1611 if (IS_ERR(meta_iod)) { 1612 status = PTR_ERR(meta_iod); 1613 meta_iod = NULL; 1614 goto unmap; 1615 } 1616 1617 meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, 1618 &meta_dma_addr, GFP_KERNEL); 1619 if (!meta_mem) { 1620 status = -ENOMEM; 1621 goto unmap; 1622 } 1623 1624 if (io.opcode & 1) { 1625 int meta_offset = 0; 1626 1627 for (i = 0; i < meta_iod->nents; i++) { 1628 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + 1629 meta_iod->sg[i].offset; 1630 memcpy(meta_mem + meta_offset, meta, 1631 meta_iod->sg[i].length); 1632 kunmap_atomic(meta); 1633 meta_offset += meta_iod->sg[i].length; 1634 } 1635 } 1636 1637 c.rw.metadata = cpu_to_le64(meta_dma_addr); 1638 } 1639 1640 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL); 1641 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 1642 c.rw.prp2 = cpu_to_le64(iod->first_dma); 1643 1644 if (length != (io.nblocks + 1) << ns->lba_shift) 1645 status = -ENOMEM; 1646 else 1647 status = nvme_submit_io_cmd(dev, &c, NULL); 1648 1649 if (meta_len) { 1650 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { 1651 int meta_offset = 0; 1652 1653 for (i = 0; i < meta_iod->nents; i++) { 1654 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + 1655 meta_iod->sg[i].offset; 1656 memcpy(meta, meta_mem + meta_offset, 1657 meta_iod->sg[i].length); 1658 kunmap_atomic(meta); 1659 meta_offset += meta_iod->sg[i].length; 1660 } 1661 } 1662 1663 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem, 1664 meta_dma_addr); 1665 } 1666 1667 unmap: 1668 nvme_unmap_user_pages(dev, io.opcode & 1, iod); 1669 nvme_free_iod(dev, iod); 1670 1671 if (meta_iod) { 1672 nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); 1673 nvme_free_iod(dev, meta_iod); 1674 } 1675 1676 return status; 1677} 1678 1679static int nvme_user_admin_cmd(struct nvme_dev *dev, 1680 struct nvme_admin_cmd __user *ucmd) 1681{ 1682 struct nvme_admin_cmd cmd; 1683 struct nvme_command c; 1684 int status, length; 1685 struct nvme_iod *uninitialized_var(iod); 1686 unsigned timeout; 1687 1688 if (!capable(CAP_SYS_ADMIN)) 1689 return -EACCES; 1690 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1691 return -EFAULT; 1692 1693 memset(&c, 0, sizeof(c)); 1694 c.common.opcode = cmd.opcode; 1695 c.common.flags = cmd.flags; 1696 c.common.nsid = cpu_to_le32(cmd.nsid); 1697 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1698 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1699 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 1700 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 1701 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 1702 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 1703 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 1704 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 1705 1706 length = cmd.data_len; 1707 if (cmd.data_len) { 1708 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr, 1709 length); 1710 if (IS_ERR(iod)) 1711 return PTR_ERR(iod); 1712 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL); 1713 c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 1714 c.common.prp2 = cpu_to_le64(iod->first_dma); 1715 } 1716 1717 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : 1718 ADMIN_TIMEOUT; 1719 if (length != cmd.data_len) 1720 status = -ENOMEM; 1721 else 1722 status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout); 1723 1724 if (cmd.data_len) { 1725 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); 1726 nvme_free_iod(dev, iod); 1727 } 1728 1729 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result, 1730 sizeof(cmd.result))) 1731 status = -EFAULT; 1732 1733 return status; 1734} 1735 1736static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, 1737 unsigned long arg) 1738{ 1739 struct nvme_ns *ns = bdev->bd_disk->private_data; 1740 1741 switch (cmd) { 1742 case NVME_IOCTL_ID: 1743 force_successful_syscall_return(); 1744 return ns->ns_id; 1745 case NVME_IOCTL_ADMIN_CMD: 1746 return nvme_user_admin_cmd(ns->dev, (void __user *)arg); 1747 case NVME_IOCTL_SUBMIT_IO: 1748 return nvme_submit_io(ns, (void __user *)arg); 1749 case SG_GET_VERSION_NUM: 1750 return nvme_sg_get_version_num((void __user *)arg); 1751 case SG_IO: 1752 return nvme_sg_io(ns, (void __user *)arg); 1753 default: 1754 return -ENOTTY; 1755 } 1756} 1757 1758#ifdef CONFIG_COMPAT 1759static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, 1760 unsigned int cmd, unsigned long arg) 1761{ 1762 struct nvme_ns *ns = bdev->bd_disk->private_data; 1763 1764 switch (cmd) { 1765 case SG_IO: 1766 return nvme_sg_io32(ns, arg); 1767 } 1768 return nvme_ioctl(bdev, mode, cmd, arg); 1769} 1770#else 1771#define nvme_compat_ioctl NULL 1772#endif 1773 1774static int nvme_open(struct block_device *bdev, fmode_t mode) 1775{ 1776 struct nvme_ns *ns = bdev->bd_disk->private_data; 1777 struct nvme_dev *dev = ns->dev; 1778 1779 kref_get(&dev->kref); 1780 return 0; 1781} 1782 1783static void nvme_free_dev(struct kref *kref); 1784 1785static void nvme_release(struct gendisk *disk, fmode_t mode) 1786{ 1787 struct nvme_ns *ns = disk->private_data; 1788 struct nvme_dev *dev = ns->dev; 1789 1790 kref_put(&dev->kref, nvme_free_dev); 1791} 1792 1793static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo) 1794{ 1795 /* some standard values */ 1796 geo->heads = 1 << 6; 1797 geo->sectors = 1 << 5; 1798 geo->cylinders = get_capacity(bd->bd_disk) >> 11; 1799 return 0; 1800} 1801 1802static const struct block_device_operations nvme_fops = { 1803 .owner = THIS_MODULE, 1804 .ioctl = nvme_ioctl, 1805 .compat_ioctl = nvme_compat_ioctl, 1806 .open = nvme_open, 1807 .release = nvme_release, 1808 .getgeo = nvme_getgeo, 1809}; 1810 1811static void nvme_resubmit_iods(struct nvme_queue *nvmeq) 1812{ 1813 struct nvme_iod *iod, *next; 1814 1815 list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) { 1816 if (unlikely(nvme_submit_iod(nvmeq, iod))) 1817 break; 1818 list_del(&iod->node); 1819 if (bio_list_empty(&nvmeq->sq_cong) && 1820 list_empty(&nvmeq->iod_bio)) 1821 remove_wait_queue(&nvmeq->sq_full, 1822 &nvmeq->sq_cong_wait); 1823 } 1824} 1825 1826static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1827{ 1828 while (bio_list_peek(&nvmeq->sq_cong)) { 1829 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1830 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 1831 1832 if (bio_list_empty(&nvmeq->sq_cong) && 1833 list_empty(&nvmeq->iod_bio)) 1834 remove_wait_queue(&nvmeq->sq_full, 1835 &nvmeq->sq_cong_wait); 1836 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 1837 if (!waitqueue_active(&nvmeq->sq_full)) 1838 add_wait_queue(&nvmeq->sq_full, 1839 &nvmeq->sq_cong_wait); 1840 bio_list_add_head(&nvmeq->sq_cong, bio); 1841 break; 1842 } 1843 } 1844} 1845 1846static int nvme_kthread(void *data) 1847{ 1848 struct nvme_dev *dev, *next; 1849 1850 while (!kthread_should_stop()) { 1851 set_current_state(TASK_INTERRUPTIBLE); 1852 spin_lock(&dev_list_lock); 1853 list_for_each_entry_safe(dev, next, &dev_list, node) { 1854 int i; 1855 if (readl(&dev->bar->csts) & NVME_CSTS_CFS && 1856 dev->initialized) { 1857 if (work_busy(&dev->reset_work)) 1858 continue; 1859 list_del_init(&dev->node); 1860 dev_warn(&dev->pci_dev->dev, 1861 "Failed status, reset controller\n"); 1862 dev->reset_workfn = nvme_reset_failed_dev; 1863 queue_work(nvme_workq, &dev->reset_work); 1864 continue; 1865 } 1866 rcu_read_lock(); 1867 for (i = 0; i < dev->queue_count; i++) { 1868 struct nvme_queue *nvmeq = 1869 rcu_dereference(dev->queues[i]); 1870 if (!nvmeq) 1871 continue; 1872 spin_lock_irq(&nvmeq->q_lock); 1873 if (nvmeq->q_suspended) 1874 goto unlock; 1875 nvme_process_cq(nvmeq); 1876 nvme_cancel_ios(nvmeq, true); 1877 nvme_resubmit_bios(nvmeq); 1878 nvme_resubmit_iods(nvmeq); 1879 unlock: 1880 spin_unlock_irq(&nvmeq->q_lock); 1881 } 1882 rcu_read_unlock(); 1883 } 1884 spin_unlock(&dev_list_lock); 1885 schedule_timeout(round_jiffies_relative(HZ)); 1886 } 1887 return 0; 1888} 1889 1890static void nvme_config_discard(struct nvme_ns *ns) 1891{ 1892 u32 logical_block_size = queue_logical_block_size(ns->queue); 1893 ns->queue->limits.discard_zeroes_data = 0; 1894 ns->queue->limits.discard_alignment = logical_block_size; 1895 ns->queue->limits.discard_granularity = logical_block_size; 1896 ns->queue->limits.max_discard_sectors = 0xffffffff; 1897 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); 1898} 1899 1900static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, 1901 struct nvme_id_ns *id, struct nvme_lba_range_type *rt) 1902{ 1903 struct nvme_ns *ns; 1904 struct gendisk *disk; 1905 int lbaf; 1906 1907 if (rt->attributes & NVME_LBART_ATTRIB_HIDE) 1908 return NULL; 1909 1910 ns = kzalloc(sizeof(*ns), GFP_KERNEL); 1911 if (!ns) 1912 return NULL; 1913 ns->queue = blk_alloc_queue(GFP_KERNEL); 1914 if (!ns->queue) 1915 goto out_free_ns; 1916 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; 1917 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); 1918 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1919 blk_queue_make_request(ns->queue, nvme_make_request); 1920 ns->dev = dev; 1921 ns->queue->queuedata = ns; 1922 1923 disk = alloc_disk(0); 1924 if (!disk) 1925 goto out_free_queue; 1926 ns->ns_id = nsid; 1927 ns->disk = disk; 1928 lbaf = id->flbas & 0xf; 1929 ns->lba_shift = id->lbaf[lbaf].ds; 1930 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 1931 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1932 if (dev->max_hw_sectors) 1933 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 1934 if (dev->vwc & NVME_CTRL_VWC_PRESENT) 1935 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA); 1936 1937 disk->major = nvme_major; 1938 disk->first_minor = 0; 1939 disk->fops = &nvme_fops; 1940 disk->private_data = ns; 1941 disk->queue = ns->queue; 1942 disk->driverfs_dev = &dev->pci_dev->dev; 1943 disk->flags = GENHD_FL_EXT_DEVT; 1944 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 1945 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 1946 1947 if (dev->oncs & NVME_CTRL_ONCS_DSM) 1948 nvme_config_discard(ns); 1949 1950 return ns; 1951 1952 out_free_queue: 1953 blk_cleanup_queue(ns->queue); 1954 out_free_ns: 1955 kfree(ns); 1956 return NULL; 1957} 1958 1959static int nvme_find_closest_node(int node) 1960{ 1961 int n, val, min_val = INT_MAX, best_node = node; 1962 1963 for_each_online_node(n) { 1964 if (n == node) 1965 continue; 1966 val = node_distance(node, n); 1967 if (val < min_val) { 1968 min_val = val; 1969 best_node = n; 1970 } 1971 } 1972 return best_node; 1973} 1974 1975static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq, 1976 int count) 1977{ 1978 int cpu; 1979 for_each_cpu(cpu, qmask) { 1980 if (cpumask_weight(nvmeq->cpu_mask) >= count) 1981 break; 1982 if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask)) 1983 *per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid; 1984 } 1985} 1986 1987static void nvme_add_cpus(cpumask_t *mask, const cpumask_t *unassigned_cpus, 1988 const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue) 1989{ 1990 int next_cpu; 1991 for_each_cpu(next_cpu, new_mask) { 1992 cpumask_or(mask, mask, get_cpu_mask(next_cpu)); 1993 cpumask_or(mask, mask, topology_thread_cpumask(next_cpu)); 1994 cpumask_and(mask, mask, unassigned_cpus); 1995 nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue); 1996 } 1997} 1998 1999static void nvme_create_io_queues(struct nvme_dev *dev) 2000{ 2001 unsigned i, max; 2002 2003 max = min(dev->max_qid, num_online_cpus()); 2004 for (i = dev->queue_count; i <= max; i++) 2005 if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) 2006 break; 2007 2008 max = min(dev->queue_count - 1, num_online_cpus()); 2009 for (i = dev->online_queues; i <= max; i++) 2010 if (nvme_create_queue(raw_nvmeq(dev, i), i)) 2011 break; 2012} 2013 2014/* 2015 * If there are fewer queues than online cpus, this will try to optimally 2016 * assign a queue to multiple cpus by grouping cpus that are "close" together: 2017 * thread siblings, core, socket, closest node, then whatever else is 2018 * available. 2019 */ 2020static void nvme_assign_io_queues(struct nvme_dev *dev) 2021{ 2022 unsigned cpu, cpus_per_queue, queues, remainder, i; 2023 cpumask_var_t unassigned_cpus; 2024 2025 nvme_create_io_queues(dev); 2026 2027 queues = min(dev->online_queues - 1, num_online_cpus()); 2028 if (!queues) 2029 return; 2030 2031 cpus_per_queue = num_online_cpus() / queues; 2032 remainder = queues - (num_online_cpus() - queues * cpus_per_queue); 2033 2034 if (!alloc_cpumask_var(&unassigned_cpus, GFP_KERNEL)) 2035 return; 2036 2037 cpumask_copy(unassigned_cpus, cpu_online_mask); 2038 cpu = cpumask_first(unassigned_cpus); 2039 for (i = 1; i <= queues; i++) { 2040 struct nvme_queue *nvmeq = lock_nvmeq(dev, i); 2041 cpumask_t mask; 2042 2043 cpumask_clear(nvmeq->cpu_mask); 2044 if (!cpumask_weight(unassigned_cpus)) { 2045 unlock_nvmeq(nvmeq); 2046 break; 2047 } 2048 2049 mask = *get_cpu_mask(cpu); 2050 nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue); 2051 if (cpus_weight(mask) < cpus_per_queue) 2052 nvme_add_cpus(&mask, unassigned_cpus, 2053 topology_thread_cpumask(cpu), 2054 nvmeq, cpus_per_queue); 2055 if (cpus_weight(mask) < cpus_per_queue) 2056 nvme_add_cpus(&mask, unassigned_cpus, 2057 topology_core_cpumask(cpu), 2058 nvmeq, cpus_per_queue); 2059 if (cpus_weight(mask) < cpus_per_queue) 2060 nvme_add_cpus(&mask, unassigned_cpus, 2061 cpumask_of_node(cpu_to_node(cpu)), 2062 nvmeq, cpus_per_queue); 2063 if (cpus_weight(mask) < cpus_per_queue) 2064 nvme_add_cpus(&mask, unassigned_cpus, 2065 cpumask_of_node( 2066 nvme_find_closest_node( 2067 cpu_to_node(cpu))), 2068 nvmeq, cpus_per_queue); 2069 if (cpus_weight(mask) < cpus_per_queue) 2070 nvme_add_cpus(&mask, unassigned_cpus, 2071 unassigned_cpus, 2072 nvmeq, cpus_per_queue); 2073 2074 WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue, 2075 "nvme%d qid:%d mis-matched queue-to-cpu assignment\n", 2076 dev->instance, i); 2077 2078 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, 2079 nvmeq->cpu_mask); 2080 cpumask_andnot(unassigned_cpus, unassigned_cpus, 2081 nvmeq->cpu_mask); 2082 cpu = cpumask_next(cpu, unassigned_cpus); 2083 if (remainder && !--remainder) 2084 cpus_per_queue++; 2085 unlock_nvmeq(nvmeq); 2086 } 2087 WARN(cpumask_weight(unassigned_cpus), "nvme%d unassigned online cpus\n", 2088 dev->instance); 2089 i = 0; 2090 cpumask_andnot(unassigned_cpus, cpu_possible_mask, cpu_online_mask); 2091 for_each_cpu(cpu, unassigned_cpus) 2092 *per_cpu_ptr(dev->io_queue, cpu) = (i++ % queues) + 1; 2093 free_cpumask_var(unassigned_cpus); 2094} 2095 2096static int set_queue_count(struct nvme_dev *dev, int count) 2097{ 2098 int status; 2099 u32 result; 2100 u32 q_count = (count - 1) | ((count - 1) << 16); 2101 2102 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, 2103 &result); 2104 if (status < 0) 2105 return status; 2106 if (status > 0) { 2107 dev_err(&dev->pci_dev->dev, "Could not set queue count (%d)\n", 2108 status); 2109 return -EBUSY; 2110 } 2111 return min(result & 0xffff, result >> 16) + 1; 2112} 2113 2114static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 2115{ 2116 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); 2117} 2118 2119static void nvme_cpu_workfn(struct work_struct *work) 2120{ 2121 struct nvme_dev *dev = container_of(work, struct nvme_dev, cpu_work); 2122 if (dev->initialized) 2123 nvme_assign_io_queues(dev); 2124} 2125 2126static int nvme_cpu_notify(struct notifier_block *self, 2127 unsigned long action, void *hcpu) 2128{ 2129 struct nvme_dev *dev; 2130 2131 switch (action) { 2132 case CPU_ONLINE: 2133 case CPU_DEAD: 2134 spin_lock(&dev_list_lock); 2135 list_for_each_entry(dev, &dev_list, node) 2136 schedule_work(&dev->cpu_work); 2137 spin_unlock(&dev_list_lock); 2138 break; 2139 } 2140 return NOTIFY_OK; 2141} 2142 2143static int nvme_setup_io_queues(struct nvme_dev *dev) 2144{ 2145 struct nvme_queue *adminq = raw_nvmeq(dev, 0); 2146 struct pci_dev *pdev = dev->pci_dev; 2147 int result, i, vecs, nr_io_queues, size; 2148 2149 nr_io_queues = num_possible_cpus(); 2150 result = set_queue_count(dev, nr_io_queues); 2151 if (result < 0) 2152 return result; 2153 if (result < nr_io_queues) 2154 nr_io_queues = result; 2155 2156 size = db_bar_size(dev, nr_io_queues); 2157 if (size > 8192) { 2158 iounmap(dev->bar); 2159 do { 2160 dev->bar = ioremap(pci_resource_start(pdev, 0), size); 2161 if (dev->bar) 2162 break; 2163 if (!--nr_io_queues) 2164 return -ENOMEM; 2165 size = db_bar_size(dev, nr_io_queues); 2166 } while (1); 2167 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2168 adminq->q_db = dev->dbs; 2169 } 2170 2171 /* Deregister the admin queue's interrupt */ 2172 free_irq(dev->entry[0].vector, adminq); 2173 2174 for (i = 0; i < nr_io_queues; i++) 2175 dev->entry[i].entry = i; 2176 vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues); 2177 if (vecs < 0) { 2178 vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32)); 2179 if (vecs < 0) { 2180 vecs = 1; 2181 } else { 2182 for (i = 0; i < vecs; i++) 2183 dev->entry[i].vector = i + pdev->irq; 2184 } 2185 } 2186 2187 /* 2188 * Should investigate if there's a performance win from allocating 2189 * more queues than interrupt vectors; it might allow the submission 2190 * path to scale better, even if the receive path is limited by the 2191 * number of interrupts. 2192 */ 2193 nr_io_queues = vecs; 2194 dev->max_qid = nr_io_queues; 2195 2196 result = queue_request_irq(dev, adminq, adminq->irqname); 2197 if (result) { 2198 adminq->q_suspended = 1; 2199 goto free_queues; 2200 } 2201 2202 /* Free previously allocated queues that are no longer usable */ 2203 nvme_free_queues(dev, nr_io_queues + 1); 2204 nvme_assign_io_queues(dev); 2205 2206 return 0; 2207 2208 free_queues: 2209 nvme_free_queues(dev, 1); 2210 return result; 2211} 2212 2213/* 2214 * Return: error value if an error occurred setting up the queues or calling 2215 * Identify Device. 0 if these succeeded, even if adding some of the 2216 * namespaces failed. At the moment, these failures are silent. TBD which 2217 * failures should be reported. 2218 */ 2219static int nvme_dev_add(struct nvme_dev *dev) 2220{ 2221 struct pci_dev *pdev = dev->pci_dev; 2222 int res; 2223 unsigned nn, i; 2224 struct nvme_ns *ns; 2225 struct nvme_id_ctrl *ctrl; 2226 struct nvme_id_ns *id_ns; 2227 void *mem; 2228 dma_addr_t dma_addr; 2229 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; 2230 2231 mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL); 2232 if (!mem) 2233 return -ENOMEM; 2234 2235 res = nvme_identify(dev, 0, 1, dma_addr); 2236 if (res) { 2237 dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res); 2238 res = -EIO; 2239 goto out; 2240 } 2241 2242 ctrl = mem; 2243 nn = le32_to_cpup(&ctrl->nn); 2244 dev->oncs = le16_to_cpup(&ctrl->oncs); 2245 dev->abort_limit = ctrl->acl + 1; 2246 dev->vwc = ctrl->vwc; 2247 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 2248 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 2249 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 2250 if (ctrl->mdts) 2251 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); 2252 if ((pdev->vendor == PCI_VENDOR_ID_INTEL) && 2253 (pdev->device == 0x0953) && ctrl->vs[3]) 2254 dev->stripe_size = 1 << (ctrl->vs[3] + shift); 2255 2256 id_ns = mem; 2257 for (i = 1; i <= nn; i++) { 2258 res = nvme_identify(dev, i, 0, dma_addr); 2259 if (res) 2260 continue; 2261 2262 if (id_ns->ncap == 0) 2263 continue; 2264 2265 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, 2266 dma_addr + 4096, NULL); 2267 if (res) 2268 memset(mem + 4096, 0, 4096); 2269 2270 ns = nvme_alloc_ns(dev, i, mem, mem + 4096); 2271 if (ns) 2272 list_add_tail(&ns->list, &dev->namespaces); 2273 } 2274 list_for_each_entry(ns, &dev->namespaces, list) 2275 add_disk(ns->disk); 2276 res = 0; 2277 2278 out: 2279 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); 2280 return res; 2281} 2282 2283static int nvme_dev_map(struct nvme_dev *dev) 2284{ 2285 u64 cap; 2286 int bars, result = -ENOMEM; 2287 struct pci_dev *pdev = dev->pci_dev; 2288 2289 if (pci_enable_device_mem(pdev)) 2290 return result; 2291 2292 dev->entry[0].vector = pdev->irq; 2293 pci_set_master(pdev); 2294 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2295 if (pci_request_selected_regions(pdev, bars, "nvme")) 2296 goto disable_pci; 2297 2298 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && 2299 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 2300 goto disable; 2301 2302 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 2303 if (!dev->bar) 2304 goto disable; 2305 if (readl(&dev->bar->csts) == -1) { 2306 result = -ENODEV; 2307 goto unmap; 2308 } 2309 cap = readq(&dev->bar->cap); 2310 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); 2311 dev->db_stride = 1 << NVME_CAP_STRIDE(cap); 2312 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2313 2314 return 0; 2315 2316 unmap: 2317 iounmap(dev->bar); 2318 dev->bar = NULL; 2319 disable: 2320 pci_release_regions(pdev); 2321 disable_pci: 2322 pci_disable_device(pdev); 2323 return result; 2324} 2325 2326static void nvme_dev_unmap(struct nvme_dev *dev) 2327{ 2328 if (dev->pci_dev->msi_enabled) 2329 pci_disable_msi(dev->pci_dev); 2330 else if (dev->pci_dev->msix_enabled) 2331 pci_disable_msix(dev->pci_dev); 2332 2333 if (dev->bar) { 2334 iounmap(dev->bar); 2335 dev->bar = NULL; 2336 pci_release_regions(dev->pci_dev); 2337 } 2338 2339 if (pci_is_enabled(dev->pci_dev)) 2340 pci_disable_device(dev->pci_dev); 2341} 2342 2343struct nvme_delq_ctx { 2344 struct task_struct *waiter; 2345 struct kthread_worker *worker; 2346 atomic_t refcount; 2347}; 2348 2349static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) 2350{ 2351 dq->waiter = current; 2352 mb(); 2353 2354 for (;;) { 2355 set_current_state(TASK_KILLABLE); 2356 if (!atomic_read(&dq->refcount)) 2357 break; 2358 if (!schedule_timeout(ADMIN_TIMEOUT) || 2359 fatal_signal_pending(current)) { 2360 set_current_state(TASK_RUNNING); 2361 2362 nvme_disable_ctrl(dev, readq(&dev->bar->cap)); 2363 nvme_disable_queue(dev, 0); 2364 2365 send_sig(SIGKILL, dq->worker->task, 1); 2366 flush_kthread_worker(dq->worker); 2367 return; 2368 } 2369 } 2370 set_current_state(TASK_RUNNING); 2371} 2372 2373static void nvme_put_dq(struct nvme_delq_ctx *dq) 2374{ 2375 atomic_dec(&dq->refcount); 2376 if (dq->waiter) 2377 wake_up_process(dq->waiter); 2378} 2379 2380static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq) 2381{ 2382 atomic_inc(&dq->refcount); 2383 return dq; 2384} 2385 2386static void nvme_del_queue_end(struct nvme_queue *nvmeq) 2387{ 2388 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; 2389 2390 nvme_clear_queue(nvmeq); 2391 nvme_put_dq(dq); 2392} 2393 2394static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, 2395 kthread_work_func_t fn) 2396{ 2397 struct nvme_command c; 2398 2399 memset(&c, 0, sizeof(c)); 2400 c.delete_queue.opcode = opcode; 2401 c.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2402 2403 init_kthread_work(&nvmeq->cmdinfo.work, fn); 2404 return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo); 2405} 2406 2407static void nvme_del_cq_work_handler(struct kthread_work *work) 2408{ 2409 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, 2410 cmdinfo.work); 2411 nvme_del_queue_end(nvmeq); 2412} 2413 2414static int nvme_delete_cq(struct nvme_queue *nvmeq) 2415{ 2416 return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq, 2417 nvme_del_cq_work_handler); 2418} 2419 2420static void nvme_del_sq_work_handler(struct kthread_work *work) 2421{ 2422 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, 2423 cmdinfo.work); 2424 int status = nvmeq->cmdinfo.status; 2425 2426 if (!status) 2427 status = nvme_delete_cq(nvmeq); 2428 if (status) 2429 nvme_del_queue_end(nvmeq); 2430} 2431 2432static int nvme_delete_sq(struct nvme_queue *nvmeq) 2433{ 2434 return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq, 2435 nvme_del_sq_work_handler); 2436} 2437 2438static void nvme_del_queue_start(struct kthread_work *work) 2439{ 2440 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, 2441 cmdinfo.work); 2442 allow_signal(SIGKILL); 2443 if (nvme_delete_sq(nvmeq)) 2444 nvme_del_queue_end(nvmeq); 2445} 2446 2447static void nvme_disable_io_queues(struct nvme_dev *dev) 2448{ 2449 int i; 2450 DEFINE_KTHREAD_WORKER_ONSTACK(worker); 2451 struct nvme_delq_ctx dq; 2452 struct task_struct *kworker_task = kthread_run(kthread_worker_fn, 2453 &worker, "nvme%d", dev->instance); 2454 2455 if (IS_ERR(kworker_task)) { 2456 dev_err(&dev->pci_dev->dev, 2457 "Failed to create queue del task\n"); 2458 for (i = dev->queue_count - 1; i > 0; i--) 2459 nvme_disable_queue(dev, i); 2460 return; 2461 } 2462 2463 dq.waiter = NULL; 2464 atomic_set(&dq.refcount, 0); 2465 dq.worker = &worker; 2466 for (i = dev->queue_count - 1; i > 0; i--) { 2467 struct nvme_queue *nvmeq = raw_nvmeq(dev, i); 2468 2469 if (nvme_suspend_queue(nvmeq)) 2470 continue; 2471 nvmeq->cmdinfo.ctx = nvme_get_dq(&dq); 2472 nvmeq->cmdinfo.worker = dq.worker; 2473 init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start); 2474 queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work); 2475 } 2476 nvme_wait_dq(&dq, dev); 2477 kthread_stop(kworker_task); 2478} 2479 2480/* 2481* Remove the node from the device list and check 2482* for whether or not we need to stop the nvme_thread. 2483*/ 2484static void nvme_dev_list_remove(struct nvme_dev *dev) 2485{ 2486 struct task_struct *tmp = NULL; 2487 2488 spin_lock(&dev_list_lock); 2489 list_del_init(&dev->node); 2490 if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) { 2491 tmp = nvme_thread; 2492 nvme_thread = NULL; 2493 } 2494 spin_unlock(&dev_list_lock); 2495 2496 if (tmp) 2497 kthread_stop(tmp); 2498} 2499 2500static void nvme_dev_shutdown(struct nvme_dev *dev) 2501{ 2502 int i; 2503 2504 dev->initialized = 0; 2505 nvme_dev_list_remove(dev); 2506 2507 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) { 2508 for (i = dev->queue_count - 1; i >= 0; i--) { 2509 struct nvme_queue *nvmeq = raw_nvmeq(dev, i); 2510 nvme_suspend_queue(nvmeq); 2511 nvme_clear_queue(nvmeq); 2512 } 2513 } else { 2514 nvme_disable_io_queues(dev); 2515 nvme_shutdown_ctrl(dev); 2516 nvme_disable_queue(dev, 0); 2517 } 2518 nvme_dev_unmap(dev); 2519} 2520 2521static void nvme_dev_remove(struct nvme_dev *dev) 2522{ 2523 struct nvme_ns *ns; 2524 2525 list_for_each_entry(ns, &dev->namespaces, list) { 2526 if (ns->disk->flags & GENHD_FL_UP) 2527 del_gendisk(ns->disk); 2528 if (!blk_queue_dying(ns->queue)) 2529 blk_cleanup_queue(ns->queue); 2530 } 2531} 2532 2533static int nvme_setup_prp_pools(struct nvme_dev *dev) 2534{ 2535 struct device *dmadev = &dev->pci_dev->dev; 2536 dev->prp_page_pool = dma_pool_create("prp list page", dmadev, 2537 PAGE_SIZE, PAGE_SIZE, 0); 2538 if (!dev->prp_page_pool) 2539 return -ENOMEM; 2540 2541 /* Optimisation for I/Os between 4k and 128k */ 2542 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, 2543 256, 256, 0); 2544 if (!dev->prp_small_pool) { 2545 dma_pool_destroy(dev->prp_page_pool); 2546 return -ENOMEM; 2547 } 2548 return 0; 2549} 2550 2551static void nvme_release_prp_pools(struct nvme_dev *dev) 2552{ 2553 dma_pool_destroy(dev->prp_page_pool); 2554 dma_pool_destroy(dev->prp_small_pool); 2555} 2556 2557static DEFINE_IDA(nvme_instance_ida); 2558 2559static int nvme_set_instance(struct nvme_dev *dev) 2560{ 2561 int instance, error; 2562 2563 do { 2564 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) 2565 return -ENODEV; 2566 2567 spin_lock(&dev_list_lock); 2568 error = ida_get_new(&nvme_instance_ida, &instance); 2569 spin_unlock(&dev_list_lock); 2570 } while (error == -EAGAIN); 2571 2572 if (error) 2573 return -ENODEV; 2574 2575 dev->instance = instance; 2576 return 0; 2577} 2578 2579static void nvme_release_instance(struct nvme_dev *dev) 2580{ 2581 spin_lock(&dev_list_lock); 2582 ida_remove(&nvme_instance_ida, dev->instance); 2583 spin_unlock(&dev_list_lock); 2584} 2585 2586static void nvme_free_namespaces(struct nvme_dev *dev) 2587{ 2588 struct nvme_ns *ns, *next; 2589 2590 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 2591 list_del(&ns->list); 2592 put_disk(ns->disk); 2593 kfree(ns); 2594 } 2595} 2596 2597static void nvme_free_dev(struct kref *kref) 2598{ 2599 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 2600 2601 nvme_free_namespaces(dev); 2602 free_percpu(dev->io_queue); 2603 kfree(dev->queues); 2604 kfree(dev->entry); 2605 kfree(dev); 2606} 2607 2608static int nvme_dev_open(struct inode *inode, struct file *f) 2609{ 2610 struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev, 2611 miscdev); 2612 kref_get(&dev->kref); 2613 f->private_data = dev; 2614 return 0; 2615} 2616 2617static int nvme_dev_release(struct inode *inode, struct file *f) 2618{ 2619 struct nvme_dev *dev = f->private_data; 2620 kref_put(&dev->kref, nvme_free_dev); 2621 return 0; 2622} 2623 2624static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 2625{ 2626 struct nvme_dev *dev = f->private_data; 2627 switch (cmd) { 2628 case NVME_IOCTL_ADMIN_CMD: 2629 return nvme_user_admin_cmd(dev, (void __user *)arg); 2630 default: 2631 return -ENOTTY; 2632 } 2633} 2634 2635static const struct file_operations nvme_dev_fops = { 2636 .owner = THIS_MODULE, 2637 .open = nvme_dev_open, 2638 .release = nvme_dev_release, 2639 .unlocked_ioctl = nvme_dev_ioctl, 2640 .compat_ioctl = nvme_dev_ioctl, 2641}; 2642 2643static int nvme_dev_start(struct nvme_dev *dev) 2644{ 2645 int result; 2646 bool start_thread = false; 2647 2648 result = nvme_dev_map(dev); 2649 if (result) 2650 return result; 2651 2652 result = nvme_configure_admin_queue(dev); 2653 if (result) 2654 goto unmap; 2655 2656 spin_lock(&dev_list_lock); 2657 if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) { 2658 start_thread = true; 2659 nvme_thread = NULL; 2660 } 2661 list_add(&dev->node, &dev_list); 2662 spin_unlock(&dev_list_lock); 2663 2664 if (start_thread) { 2665 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 2666 wake_up(&nvme_kthread_wait); 2667 } else 2668 wait_event_killable(nvme_kthread_wait, nvme_thread); 2669 2670 if (IS_ERR_OR_NULL(nvme_thread)) { 2671 result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR; 2672 goto disable; 2673 } 2674 2675 result = nvme_setup_io_queues(dev); 2676 if (result && result != -EBUSY) 2677 goto disable; 2678 2679 return result; 2680 2681 disable: 2682 nvme_disable_queue(dev, 0); 2683 nvme_dev_list_remove(dev); 2684 unmap: 2685 nvme_dev_unmap(dev); 2686 return result; 2687} 2688 2689static int nvme_remove_dead_ctrl(void *arg) 2690{ 2691 struct nvme_dev *dev = (struct nvme_dev *)arg; 2692 struct pci_dev *pdev = dev->pci_dev; 2693 2694 if (pci_get_drvdata(pdev)) 2695 pci_stop_and_remove_bus_device(pdev); 2696 kref_put(&dev->kref, nvme_free_dev); 2697 return 0; 2698} 2699 2700static void nvme_remove_disks(struct work_struct *ws) 2701{ 2702 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); 2703 2704 nvme_dev_remove(dev); 2705 nvme_free_queues(dev, 1); 2706} 2707 2708static int nvme_dev_resume(struct nvme_dev *dev) 2709{ 2710 int ret; 2711 2712 ret = nvme_dev_start(dev); 2713 if (ret && ret != -EBUSY) 2714 return ret; 2715 if (ret == -EBUSY) { 2716 spin_lock(&dev_list_lock); 2717 dev->reset_workfn = nvme_remove_disks; 2718 queue_work(nvme_workq, &dev->reset_work); 2719 spin_unlock(&dev_list_lock); 2720 } 2721 dev->initialized = 1; 2722 return 0; 2723} 2724 2725static void nvme_dev_reset(struct nvme_dev *dev) 2726{ 2727 nvme_dev_shutdown(dev); 2728 if (nvme_dev_resume(dev)) { 2729 dev_err(&dev->pci_dev->dev, "Device failed to resume\n"); 2730 kref_get(&dev->kref); 2731 if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d", 2732 dev->instance))) { 2733 dev_err(&dev->pci_dev->dev, 2734 "Failed to start controller remove task\n"); 2735 kref_put(&dev->kref, nvme_free_dev); 2736 } 2737 } 2738} 2739 2740static void nvme_reset_failed_dev(struct work_struct *ws) 2741{ 2742 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); 2743 nvme_dev_reset(dev); 2744} 2745 2746static void nvme_reset_workfn(struct work_struct *work) 2747{ 2748 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); 2749 dev->reset_workfn(work); 2750} 2751 2752static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2753{ 2754 int result = -ENOMEM; 2755 struct nvme_dev *dev; 2756 2757 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2758 if (!dev) 2759 return -ENOMEM; 2760 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), 2761 GFP_KERNEL); 2762 if (!dev->entry) 2763 goto free; 2764 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), 2765 GFP_KERNEL); 2766 if (!dev->queues) 2767 goto free; 2768 dev->io_queue = alloc_percpu(unsigned short); 2769 if (!dev->io_queue) 2770 goto free; 2771 2772 INIT_LIST_HEAD(&dev->namespaces); 2773 dev->reset_workfn = nvme_reset_failed_dev; 2774 INIT_WORK(&dev->reset_work, nvme_reset_workfn); 2775 INIT_WORK(&dev->cpu_work, nvme_cpu_workfn); 2776 dev->pci_dev = pdev; 2777 pci_set_drvdata(pdev, dev); 2778 result = nvme_set_instance(dev); 2779 if (result) 2780 goto free; 2781 2782 result = nvme_setup_prp_pools(dev); 2783 if (result) 2784 goto release; 2785 2786 kref_init(&dev->kref); 2787 result = nvme_dev_start(dev); 2788 if (result) { 2789 if (result == -EBUSY) 2790 goto create_cdev; 2791 goto release_pools; 2792 } 2793 2794 result = nvme_dev_add(dev); 2795 if (result) 2796 goto shutdown; 2797 2798 create_cdev: 2799 scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance); 2800 dev->miscdev.minor = MISC_DYNAMIC_MINOR; 2801 dev->miscdev.parent = &pdev->dev; 2802 dev->miscdev.name = dev->name; 2803 dev->miscdev.fops = &nvme_dev_fops; 2804 result = misc_register(&dev->miscdev); 2805 if (result) 2806 goto remove; 2807 2808 dev->initialized = 1; 2809 return 0; 2810 2811 remove: 2812 nvme_dev_remove(dev); 2813 nvme_free_namespaces(dev); 2814 shutdown: 2815 nvme_dev_shutdown(dev); 2816 release_pools: 2817 nvme_free_queues(dev, 0); 2818 nvme_release_prp_pools(dev); 2819 release: 2820 nvme_release_instance(dev); 2821 free: 2822 free_percpu(dev->io_queue); 2823 kfree(dev->queues); 2824 kfree(dev->entry); 2825 kfree(dev); 2826 return result; 2827} 2828 2829static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) 2830{ 2831 struct nvme_dev *dev = pci_get_drvdata(pdev); 2832 2833 if (prepare) 2834 nvme_dev_shutdown(dev); 2835 else 2836 nvme_dev_resume(dev); 2837} 2838 2839static void nvme_shutdown(struct pci_dev *pdev) 2840{ 2841 struct nvme_dev *dev = pci_get_drvdata(pdev); 2842 nvme_dev_shutdown(dev); 2843} 2844 2845static void nvme_remove(struct pci_dev *pdev) 2846{ 2847 struct nvme_dev *dev = pci_get_drvdata(pdev); 2848 2849 spin_lock(&dev_list_lock); 2850 list_del_init(&dev->node); 2851 spin_unlock(&dev_list_lock); 2852 2853 pci_set_drvdata(pdev, NULL); 2854 flush_work(&dev->reset_work); 2855 flush_work(&dev->cpu_work); 2856 misc_deregister(&dev->miscdev); 2857 nvme_dev_remove(dev); 2858 nvme_dev_shutdown(dev); 2859 nvme_free_queues(dev, 0); 2860 rcu_barrier(); 2861 nvme_release_instance(dev); 2862 nvme_release_prp_pools(dev); 2863 kref_put(&dev->kref, nvme_free_dev); 2864} 2865 2866/* These functions are yet to be implemented */ 2867#define nvme_error_detected NULL 2868#define nvme_dump_registers NULL 2869#define nvme_link_reset NULL 2870#define nvme_slot_reset NULL 2871#define nvme_error_resume NULL 2872 2873#ifdef CONFIG_PM_SLEEP 2874static int nvme_suspend(struct device *dev) 2875{ 2876 struct pci_dev *pdev = to_pci_dev(dev); 2877 struct nvme_dev *ndev = pci_get_drvdata(pdev); 2878 2879 nvme_dev_shutdown(ndev); 2880 return 0; 2881} 2882 2883static int nvme_resume(struct device *dev) 2884{ 2885 struct pci_dev *pdev = to_pci_dev(dev); 2886 struct nvme_dev *ndev = pci_get_drvdata(pdev); 2887 2888 if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) { 2889 ndev->reset_workfn = nvme_reset_failed_dev; 2890 queue_work(nvme_workq, &ndev->reset_work); 2891 } 2892 return 0; 2893} 2894#endif 2895 2896static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); 2897 2898static const struct pci_error_handlers nvme_err_handler = { 2899 .error_detected = nvme_error_detected, 2900 .mmio_enabled = nvme_dump_registers, 2901 .link_reset = nvme_link_reset, 2902 .slot_reset = nvme_slot_reset, 2903 .resume = nvme_error_resume, 2904 .reset_notify = nvme_reset_notify, 2905}; 2906 2907/* Move to pci_ids.h later */ 2908#define PCI_CLASS_STORAGE_EXPRESS 0x010802 2909 2910static const struct pci_device_id nvme_id_table[] = { 2911 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2912 { 0, } 2913}; 2914MODULE_DEVICE_TABLE(pci, nvme_id_table); 2915 2916static struct pci_driver nvme_driver = { 2917 .name = "nvme", 2918 .id_table = nvme_id_table, 2919 .probe = nvme_probe, 2920 .remove = nvme_remove, 2921 .shutdown = nvme_shutdown, 2922 .driver = { 2923 .pm = &nvme_dev_pm_ops, 2924 }, 2925 .err_handler = &nvme_err_handler, 2926}; 2927 2928static int __init nvme_init(void) 2929{ 2930 int result; 2931 2932 init_waitqueue_head(&nvme_kthread_wait); 2933 2934 nvme_workq = create_singlethread_workqueue("nvme"); 2935 if (!nvme_workq) 2936 return -ENOMEM; 2937 2938 result = register_blkdev(nvme_major, "nvme"); 2939 if (result < 0) 2940 goto kill_workq; 2941 else if (result > 0) 2942 nvme_major = result; 2943 2944 nvme_nb.notifier_call = &nvme_cpu_notify; 2945 result = register_hotcpu_notifier(&nvme_nb); 2946 if (result) 2947 goto unregister_blkdev; 2948 2949 result = pci_register_driver(&nvme_driver); 2950 if (result) 2951 goto unregister_hotcpu; 2952 return 0; 2953 2954 unregister_hotcpu: 2955 unregister_hotcpu_notifier(&nvme_nb); 2956 unregister_blkdev: 2957 unregister_blkdev(nvme_major, "nvme"); 2958 kill_workq: 2959 destroy_workqueue(nvme_workq); 2960 return result; 2961} 2962 2963static void __exit nvme_exit(void) 2964{ 2965 pci_unregister_driver(&nvme_driver); 2966 unregister_hotcpu_notifier(&nvme_nb); 2967 unregister_blkdev(nvme_major, "nvme"); 2968 destroy_workqueue(nvme_workq); 2969 BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); 2970 _nvme_check_size(); 2971} 2972 2973MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 2974MODULE_LICENSE("GPL"); 2975MODULE_VERSION("0.9"); 2976module_init(nvme_init); 2977module_exit(nvme_exit);