Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2//#define DEBUG
3#include <linux/spinlock.h>
4#include <linux/slab.h>
5#include <linux/blkdev.h>
6#include <linux/hdreg.h>
7#include <linux/module.h>
8#include <linux/mutex.h>
9#include <linux/interrupt.h>
10#include <linux/virtio.h>
11#include <linux/virtio_blk.h>
12#include <linux/scatterlist.h>
13#include <linux/string_helpers.h>
14#include <linux/idr.h>
15#include <linux/blk-mq.h>
16#include <linux/blk-mq-virtio.h>
17#include <linux/numa.h>
18#include <uapi/linux/virtio_ring.h>
19
20#define PART_BITS 4
21#define VQ_NAME_LEN 16
22#define MAX_DISCARD_SEGMENTS 256u
23
24/* The maximum number of sg elements that fit into a virtqueue */
25#define VIRTIO_BLK_MAX_SG_ELEMS 32768
26
27#ifdef CONFIG_ARCH_NO_SG_CHAIN
28#define VIRTIO_BLK_INLINE_SG_CNT 0
29#else
30#define VIRTIO_BLK_INLINE_SG_CNT 2
31#endif
32
33static unsigned int num_request_queues;
34module_param(num_request_queues, uint, 0644);
35MODULE_PARM_DESC(num_request_queues,
36 "Limit the number of request queues to use for blk device. "
37 "0 for no limit. "
38 "Values > nr_cpu_ids truncated to nr_cpu_ids.");
39
40static int major;
41static DEFINE_IDA(vd_index_ida);
42
43static struct workqueue_struct *virtblk_wq;
44
45struct virtio_blk_vq {
46 struct virtqueue *vq;
47 spinlock_t lock;
48 char name[VQ_NAME_LEN];
49} ____cacheline_aligned_in_smp;
50
51struct virtio_blk {
52 /*
53 * This mutex must be held by anything that may run after
54 * virtblk_remove() sets vblk->vdev to NULL.
55 *
56 * blk-mq, virtqueue processing, and sysfs attribute code paths are
57 * shut down before vblk->vdev is set to NULL and therefore do not need
58 * to hold this mutex.
59 */
60 struct mutex vdev_mutex;
61 struct virtio_device *vdev;
62
63 /* The disk structure for the kernel. */
64 struct gendisk *disk;
65
66 /* Block layer tags. */
67 struct blk_mq_tag_set tag_set;
68
69 /* Process context for config space updates */
70 struct work_struct config_work;
71
72 /*
73 * Tracks references from block_device_operations open/release and
74 * virtio_driver probe/remove so this object can be freed once no
75 * longer in use.
76 */
77 refcount_t refs;
78
79 /* What host tells us, plus 2 for header & tailer. */
80 unsigned int sg_elems;
81
82 /* Ida index - used to track minor number allocations. */
83 int index;
84
85 /* num of vqs */
86 int num_vqs;
87 struct virtio_blk_vq *vqs;
88};
89
90struct virtblk_req {
91 struct virtio_blk_outhdr out_hdr;
92 u8 status;
93 struct sg_table sg_table;
94 struct scatterlist sg[];
95};
96
97static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
98{
99 switch (vbr->status) {
100 case VIRTIO_BLK_S_OK:
101 return BLK_STS_OK;
102 case VIRTIO_BLK_S_UNSUPP:
103 return BLK_STS_NOTSUPP;
104 default:
105 return BLK_STS_IOERR;
106 }
107}
108
109static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
110 struct scatterlist *data_sg, bool have_data)
111{
112 struct scatterlist hdr, status, *sgs[3];
113 unsigned int num_out = 0, num_in = 0;
114
115 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
116 sgs[num_out++] = &hdr;
117
118 if (have_data) {
119 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
120 sgs[num_out++] = data_sg;
121 else
122 sgs[num_out + num_in++] = data_sg;
123 }
124
125 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
126 sgs[num_out + num_in++] = &status;
127
128 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
129}
130
131static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
132{
133 unsigned short segments = blk_rq_nr_discard_segments(req);
134 unsigned short n = 0;
135 struct virtio_blk_discard_write_zeroes *range;
136 struct bio *bio;
137 u32 flags = 0;
138
139 if (unmap)
140 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
141
142 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
143 if (!range)
144 return -ENOMEM;
145
146 /*
147 * Single max discard segment means multi-range discard isn't
148 * supported, and block layer only runs contiguity merge like
149 * normal RW request. So we can't reply on bio for retrieving
150 * each range info.
151 */
152 if (queue_max_discard_segments(req->q) == 1) {
153 range[0].flags = cpu_to_le32(flags);
154 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
155 range[0].sector = cpu_to_le64(blk_rq_pos(req));
156 n = 1;
157 } else {
158 __rq_for_each_bio(bio, req) {
159 u64 sector = bio->bi_iter.bi_sector;
160 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
161
162 range[n].flags = cpu_to_le32(flags);
163 range[n].num_sectors = cpu_to_le32(num_sectors);
164 range[n].sector = cpu_to_le64(sector);
165 n++;
166 }
167 }
168
169 WARN_ON_ONCE(n != segments);
170
171 req->special_vec.bv_page = virt_to_page(range);
172 req->special_vec.bv_offset = offset_in_page(range);
173 req->special_vec.bv_len = sizeof(*range) * segments;
174 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
175
176 return 0;
177}
178
179static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
180{
181 if (blk_rq_nr_phys_segments(req))
182 sg_free_table_chained(&vbr->sg_table,
183 VIRTIO_BLK_INLINE_SG_CNT);
184}
185
186static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
187 struct virtblk_req *vbr)
188{
189 int err;
190
191 if (!blk_rq_nr_phys_segments(req))
192 return 0;
193
194 vbr->sg_table.sgl = vbr->sg;
195 err = sg_alloc_table_chained(&vbr->sg_table,
196 blk_rq_nr_phys_segments(req),
197 vbr->sg_table.sgl,
198 VIRTIO_BLK_INLINE_SG_CNT);
199 if (unlikely(err))
200 return -ENOMEM;
201
202 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
203}
204
205static void virtblk_cleanup_cmd(struct request *req)
206{
207 if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
208 kfree(bvec_virt(&req->special_vec));
209}
210
211static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
212 struct request *req,
213 struct virtblk_req *vbr)
214{
215 bool unmap = false;
216 u32 type;
217
218 vbr->out_hdr.sector = 0;
219
220 switch (req_op(req)) {
221 case REQ_OP_READ:
222 type = VIRTIO_BLK_T_IN;
223 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
224 blk_rq_pos(req));
225 break;
226 case REQ_OP_WRITE:
227 type = VIRTIO_BLK_T_OUT;
228 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
229 blk_rq_pos(req));
230 break;
231 case REQ_OP_FLUSH:
232 type = VIRTIO_BLK_T_FLUSH;
233 break;
234 case REQ_OP_DISCARD:
235 type = VIRTIO_BLK_T_DISCARD;
236 break;
237 case REQ_OP_WRITE_ZEROES:
238 type = VIRTIO_BLK_T_WRITE_ZEROES;
239 unmap = !(req->cmd_flags & REQ_NOUNMAP);
240 break;
241 case REQ_OP_DRV_IN:
242 type = VIRTIO_BLK_T_GET_ID;
243 break;
244 default:
245 WARN_ON_ONCE(1);
246 return BLK_STS_IOERR;
247 }
248
249 vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
250 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
251
252 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
253 if (virtblk_setup_discard_write_zeroes(req, unmap))
254 return BLK_STS_RESOURCE;
255 }
256
257 return 0;
258}
259
260static inline void virtblk_request_done(struct request *req)
261{
262 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
263
264 virtblk_unmap_data(req, vbr);
265 virtblk_cleanup_cmd(req);
266 blk_mq_end_request(req, virtblk_result(vbr));
267}
268
269static void virtblk_done(struct virtqueue *vq)
270{
271 struct virtio_blk *vblk = vq->vdev->priv;
272 bool req_done = false;
273 int qid = vq->index;
274 struct virtblk_req *vbr;
275 unsigned long flags;
276 unsigned int len;
277
278 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
279 do {
280 virtqueue_disable_cb(vq);
281 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
282 struct request *req = blk_mq_rq_from_pdu(vbr);
283
284 if (likely(!blk_should_fake_timeout(req->q)))
285 blk_mq_complete_request(req);
286 req_done = true;
287 }
288 if (unlikely(virtqueue_is_broken(vq)))
289 break;
290 } while (!virtqueue_enable_cb(vq));
291
292 /* In case queue is stopped waiting for more buffers. */
293 if (req_done)
294 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
295 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
296}
297
298static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
299{
300 struct virtio_blk *vblk = hctx->queue->queuedata;
301 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
302 bool kick;
303
304 spin_lock_irq(&vq->lock);
305 kick = virtqueue_kick_prepare(vq->vq);
306 spin_unlock_irq(&vq->lock);
307
308 if (kick)
309 virtqueue_notify(vq->vq);
310}
311
312static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
313 const struct blk_mq_queue_data *bd)
314{
315 struct virtio_blk *vblk = hctx->queue->queuedata;
316 struct request *req = bd->rq;
317 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
318 unsigned long flags;
319 unsigned int num;
320 int qid = hctx->queue_num;
321 bool notify = false;
322 blk_status_t status;
323 int err;
324
325 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
326
327 status = virtblk_setup_cmd(vblk->vdev, req, vbr);
328 if (unlikely(status))
329 return status;
330
331 blk_mq_start_request(req);
332
333 num = virtblk_map_data(hctx, req, vbr);
334 if (unlikely(num < 0)) {
335 virtblk_cleanup_cmd(req);
336 return BLK_STS_RESOURCE;
337 }
338
339 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
340 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
341 if (err) {
342 virtqueue_kick(vblk->vqs[qid].vq);
343 /* Don't stop the queue if -ENOMEM: we may have failed to
344 * bounce the buffer due to global resource outage.
345 */
346 if (err == -ENOSPC)
347 blk_mq_stop_hw_queue(hctx);
348 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
349 virtblk_unmap_data(req, vbr);
350 virtblk_cleanup_cmd(req);
351 switch (err) {
352 case -ENOSPC:
353 return BLK_STS_DEV_RESOURCE;
354 case -ENOMEM:
355 return BLK_STS_RESOURCE;
356 default:
357 return BLK_STS_IOERR;
358 }
359 }
360
361 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
362 notify = true;
363 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
364
365 if (notify)
366 virtqueue_notify(vblk->vqs[qid].vq);
367 return BLK_STS_OK;
368}
369
370/* return id (s/n) string for *disk to *id_str
371 */
372static int virtblk_get_id(struct gendisk *disk, char *id_str)
373{
374 struct virtio_blk *vblk = disk->private_data;
375 struct request_queue *q = vblk->disk->queue;
376 struct request *req;
377 int err;
378
379 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
380 if (IS_ERR(req))
381 return PTR_ERR(req);
382
383 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
384 if (err)
385 goto out;
386
387 blk_execute_rq(vblk->disk, req, false);
388 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
389out:
390 blk_mq_free_request(req);
391 return err;
392}
393
394static void virtblk_get(struct virtio_blk *vblk)
395{
396 refcount_inc(&vblk->refs);
397}
398
399static void virtblk_put(struct virtio_blk *vblk)
400{
401 if (refcount_dec_and_test(&vblk->refs)) {
402 ida_simple_remove(&vd_index_ida, vblk->index);
403 mutex_destroy(&vblk->vdev_mutex);
404 kfree(vblk);
405 }
406}
407
408static int virtblk_open(struct block_device *bd, fmode_t mode)
409{
410 struct virtio_blk *vblk = bd->bd_disk->private_data;
411 int ret = 0;
412
413 mutex_lock(&vblk->vdev_mutex);
414
415 if (vblk->vdev)
416 virtblk_get(vblk);
417 else
418 ret = -ENXIO;
419
420 mutex_unlock(&vblk->vdev_mutex);
421 return ret;
422}
423
424static void virtblk_release(struct gendisk *disk, fmode_t mode)
425{
426 struct virtio_blk *vblk = disk->private_data;
427
428 virtblk_put(vblk);
429}
430
431/* We provide getgeo only to please some old bootloader/partitioning tools */
432static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
433{
434 struct virtio_blk *vblk = bd->bd_disk->private_data;
435 int ret = 0;
436
437 mutex_lock(&vblk->vdev_mutex);
438
439 if (!vblk->vdev) {
440 ret = -ENXIO;
441 goto out;
442 }
443
444 /* see if the host passed in geometry config */
445 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
446 virtio_cread(vblk->vdev, struct virtio_blk_config,
447 geometry.cylinders, &geo->cylinders);
448 virtio_cread(vblk->vdev, struct virtio_blk_config,
449 geometry.heads, &geo->heads);
450 virtio_cread(vblk->vdev, struct virtio_blk_config,
451 geometry.sectors, &geo->sectors);
452 } else {
453 /* some standard values, similar to sd */
454 geo->heads = 1 << 6;
455 geo->sectors = 1 << 5;
456 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
457 }
458out:
459 mutex_unlock(&vblk->vdev_mutex);
460 return ret;
461}
462
463static const struct block_device_operations virtblk_fops = {
464 .owner = THIS_MODULE,
465 .open = virtblk_open,
466 .release = virtblk_release,
467 .getgeo = virtblk_getgeo,
468};
469
470static int index_to_minor(int index)
471{
472 return index << PART_BITS;
473}
474
475static int minor_to_index(int minor)
476{
477 return minor >> PART_BITS;
478}
479
480static ssize_t serial_show(struct device *dev,
481 struct device_attribute *attr, char *buf)
482{
483 struct gendisk *disk = dev_to_disk(dev);
484 int err;
485
486 /* sysfs gives us a PAGE_SIZE buffer */
487 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
488
489 buf[VIRTIO_BLK_ID_BYTES] = '\0';
490 err = virtblk_get_id(disk, buf);
491 if (!err)
492 return strlen(buf);
493
494 if (err == -EIO) /* Unsupported? Make it empty. */
495 return 0;
496
497 return err;
498}
499
500static DEVICE_ATTR_RO(serial);
501
502/* The queue's logical block size must be set before calling this */
503static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
504{
505 struct virtio_device *vdev = vblk->vdev;
506 struct request_queue *q = vblk->disk->queue;
507 char cap_str_2[10], cap_str_10[10];
508 unsigned long long nblocks;
509 u64 capacity;
510
511 /* Host must always specify the capacity. */
512 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
513
514 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
515
516 string_get_size(nblocks, queue_logical_block_size(q),
517 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
518 string_get_size(nblocks, queue_logical_block_size(q),
519 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
520
521 dev_notice(&vdev->dev,
522 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
523 vblk->disk->disk_name,
524 resize ? "new size: " : "",
525 nblocks,
526 queue_logical_block_size(q),
527 cap_str_10,
528 cap_str_2);
529
530 set_capacity_and_notify(vblk->disk, capacity);
531}
532
533static void virtblk_config_changed_work(struct work_struct *work)
534{
535 struct virtio_blk *vblk =
536 container_of(work, struct virtio_blk, config_work);
537
538 virtblk_update_capacity(vblk, true);
539}
540
541static void virtblk_config_changed(struct virtio_device *vdev)
542{
543 struct virtio_blk *vblk = vdev->priv;
544
545 queue_work(virtblk_wq, &vblk->config_work);
546}
547
548static int init_vq(struct virtio_blk *vblk)
549{
550 int err;
551 int i;
552 vq_callback_t **callbacks;
553 const char **names;
554 struct virtqueue **vqs;
555 unsigned short num_vqs;
556 struct virtio_device *vdev = vblk->vdev;
557 struct irq_affinity desc = { 0, };
558
559 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
560 struct virtio_blk_config, num_queues,
561 &num_vqs);
562 if (err)
563 num_vqs = 1;
564 if (!err && !num_vqs) {
565 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
566 return -EINVAL;
567 }
568
569 num_vqs = min_t(unsigned int,
570 min_not_zero(num_request_queues, nr_cpu_ids),
571 num_vqs);
572
573 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
574 if (!vblk->vqs)
575 return -ENOMEM;
576
577 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
578 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
579 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
580 if (!names || !callbacks || !vqs) {
581 err = -ENOMEM;
582 goto out;
583 }
584
585 for (i = 0; i < num_vqs; i++) {
586 callbacks[i] = virtblk_done;
587 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
588 names[i] = vblk->vqs[i].name;
589 }
590
591 /* Discover virtqueues and write information to configuration. */
592 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
593 if (err)
594 goto out;
595
596 for (i = 0; i < num_vqs; i++) {
597 spin_lock_init(&vblk->vqs[i].lock);
598 vblk->vqs[i].vq = vqs[i];
599 }
600 vblk->num_vqs = num_vqs;
601
602out:
603 kfree(vqs);
604 kfree(callbacks);
605 kfree(names);
606 if (err)
607 kfree(vblk->vqs);
608 return err;
609}
610
611/*
612 * Legacy naming scheme used for virtio devices. We are stuck with it for
613 * virtio blk but don't ever use it for any new driver.
614 */
615static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
616{
617 const int base = 'z' - 'a' + 1;
618 char *begin = buf + strlen(prefix);
619 char *end = buf + buflen;
620 char *p;
621 int unit;
622
623 p = end - 1;
624 *p = '\0';
625 unit = base;
626 do {
627 if (p == begin)
628 return -EINVAL;
629 *--p = 'a' + (index % unit);
630 index = (index / unit) - 1;
631 } while (index >= 0);
632
633 memmove(begin, p, end - p);
634 memcpy(buf, prefix, strlen(prefix));
635
636 return 0;
637}
638
639static int virtblk_get_cache_mode(struct virtio_device *vdev)
640{
641 u8 writeback;
642 int err;
643
644 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
645 struct virtio_blk_config, wce,
646 &writeback);
647
648 /*
649 * If WCE is not configurable and flush is not available,
650 * assume no writeback cache is in use.
651 */
652 if (err)
653 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
654
655 return writeback;
656}
657
658static void virtblk_update_cache_mode(struct virtio_device *vdev)
659{
660 u8 writeback = virtblk_get_cache_mode(vdev);
661 struct virtio_blk *vblk = vdev->priv;
662
663 blk_queue_write_cache(vblk->disk->queue, writeback, false);
664}
665
666static const char *const virtblk_cache_types[] = {
667 "write through", "write back"
668};
669
670static ssize_t
671cache_type_store(struct device *dev, struct device_attribute *attr,
672 const char *buf, size_t count)
673{
674 struct gendisk *disk = dev_to_disk(dev);
675 struct virtio_blk *vblk = disk->private_data;
676 struct virtio_device *vdev = vblk->vdev;
677 int i;
678
679 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
680 i = sysfs_match_string(virtblk_cache_types, buf);
681 if (i < 0)
682 return i;
683
684 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
685 virtblk_update_cache_mode(vdev);
686 return count;
687}
688
689static ssize_t
690cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
691{
692 struct gendisk *disk = dev_to_disk(dev);
693 struct virtio_blk *vblk = disk->private_data;
694 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
695
696 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
697 return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
698}
699
700static DEVICE_ATTR_RW(cache_type);
701
702static struct attribute *virtblk_attrs[] = {
703 &dev_attr_serial.attr,
704 &dev_attr_cache_type.attr,
705 NULL,
706};
707
708static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
709 struct attribute *a, int n)
710{
711 struct device *dev = kobj_to_dev(kobj);
712 struct gendisk *disk = dev_to_disk(dev);
713 struct virtio_blk *vblk = disk->private_data;
714 struct virtio_device *vdev = vblk->vdev;
715
716 if (a == &dev_attr_cache_type.attr &&
717 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
718 return S_IRUGO;
719
720 return a->mode;
721}
722
723static const struct attribute_group virtblk_attr_group = {
724 .attrs = virtblk_attrs,
725 .is_visible = virtblk_attrs_are_visible,
726};
727
728static const struct attribute_group *virtblk_attr_groups[] = {
729 &virtblk_attr_group,
730 NULL,
731};
732
733static int virtblk_map_queues(struct blk_mq_tag_set *set)
734{
735 struct virtio_blk *vblk = set->driver_data;
736
737 return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
738 vblk->vdev, 0);
739}
740
741static const struct blk_mq_ops virtio_mq_ops = {
742 .queue_rq = virtio_queue_rq,
743 .commit_rqs = virtio_commit_rqs,
744 .complete = virtblk_request_done,
745 .map_queues = virtblk_map_queues,
746};
747
748static unsigned int virtblk_queue_depth;
749module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
750
751static int virtblk_probe(struct virtio_device *vdev)
752{
753 struct virtio_blk *vblk;
754 struct request_queue *q;
755 int err, index;
756
757 u32 v, blk_size, max_size, sg_elems, opt_io_size;
758 u16 min_io_size;
759 u8 physical_block_exp, alignment_offset;
760 unsigned int queue_depth;
761
762 if (!vdev->config->get) {
763 dev_err(&vdev->dev, "%s failure: config access disabled\n",
764 __func__);
765 return -EINVAL;
766 }
767
768 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
769 GFP_KERNEL);
770 if (err < 0)
771 goto out;
772 index = err;
773
774 /* We need to know how many segments before we allocate. */
775 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
776 struct virtio_blk_config, seg_max,
777 &sg_elems);
778
779 /* We need at least one SG element, whatever they say. */
780 if (err || !sg_elems)
781 sg_elems = 1;
782
783 /* Prevent integer overflows and honor max vq size */
784 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
785
786 /* We need extra sg elements at head and tail. */
787 sg_elems += 2;
788 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
789 if (!vblk) {
790 err = -ENOMEM;
791 goto out_free_index;
792 }
793
794 /* This reference is dropped in virtblk_remove(). */
795 refcount_set(&vblk->refs, 1);
796 mutex_init(&vblk->vdev_mutex);
797
798 vblk->vdev = vdev;
799 vblk->sg_elems = sg_elems;
800
801 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
802
803 err = init_vq(vblk);
804 if (err)
805 goto out_free_vblk;
806
807 /* Default queue sizing is to fill the ring. */
808 if (!virtblk_queue_depth) {
809 queue_depth = vblk->vqs[0].vq->num_free;
810 /* ... but without indirect descs, we use 2 descs per req */
811 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
812 queue_depth /= 2;
813 } else {
814 queue_depth = virtblk_queue_depth;
815 }
816
817 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
818 vblk->tag_set.ops = &virtio_mq_ops;
819 vblk->tag_set.queue_depth = queue_depth;
820 vblk->tag_set.numa_node = NUMA_NO_NODE;
821 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
822 vblk->tag_set.cmd_size =
823 sizeof(struct virtblk_req) +
824 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
825 vblk->tag_set.driver_data = vblk;
826 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
827
828 err = blk_mq_alloc_tag_set(&vblk->tag_set);
829 if (err)
830 goto out_free_vq;
831
832 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
833 if (IS_ERR(vblk->disk)) {
834 err = PTR_ERR(vblk->disk);
835 goto out_free_tags;
836 }
837 q = vblk->disk->queue;
838
839 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
840
841 vblk->disk->major = major;
842 vblk->disk->first_minor = index_to_minor(index);
843 vblk->disk->minors = 1 << PART_BITS;
844 vblk->disk->private_data = vblk;
845 vblk->disk->fops = &virtblk_fops;
846 vblk->disk->flags |= GENHD_FL_EXT_DEVT;
847 vblk->index = index;
848
849 /* configure queue flush support */
850 virtblk_update_cache_mode(vdev);
851
852 /* If disk is read-only in the host, the guest should obey */
853 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
854 set_disk_ro(vblk->disk, 1);
855
856 /* We can handle whatever the host told us to handle. */
857 blk_queue_max_segments(q, vblk->sg_elems-2);
858
859 /* No real sector limit. */
860 blk_queue_max_hw_sectors(q, -1U);
861
862 max_size = virtio_max_dma_size(vdev);
863
864 /* Host can optionally specify maximum segment size and number of
865 * segments. */
866 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
867 struct virtio_blk_config, size_max, &v);
868 if (!err)
869 max_size = min(max_size, v);
870
871 blk_queue_max_segment_size(q, max_size);
872
873 /* Host can optionally specify the block size of the device */
874 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
875 struct virtio_blk_config, blk_size,
876 &blk_size);
877 if (!err) {
878 err = blk_validate_block_size(blk_size);
879 if (err) {
880 dev_err(&vdev->dev,
881 "virtio_blk: invalid block size: 0x%x\n",
882 blk_size);
883 goto out_cleanup_disk;
884 }
885
886 blk_queue_logical_block_size(q, blk_size);
887 } else
888 blk_size = queue_logical_block_size(q);
889
890 /* Use topology information if available */
891 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
892 struct virtio_blk_config, physical_block_exp,
893 &physical_block_exp);
894 if (!err && physical_block_exp)
895 blk_queue_physical_block_size(q,
896 blk_size * (1 << physical_block_exp));
897
898 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
899 struct virtio_blk_config, alignment_offset,
900 &alignment_offset);
901 if (!err && alignment_offset)
902 blk_queue_alignment_offset(q, blk_size * alignment_offset);
903
904 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
905 struct virtio_blk_config, min_io_size,
906 &min_io_size);
907 if (!err && min_io_size)
908 blk_queue_io_min(q, blk_size * min_io_size);
909
910 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
911 struct virtio_blk_config, opt_io_size,
912 &opt_io_size);
913 if (!err && opt_io_size)
914 blk_queue_io_opt(q, blk_size * opt_io_size);
915
916 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
917 q->limits.discard_granularity = blk_size;
918
919 virtio_cread(vdev, struct virtio_blk_config,
920 discard_sector_alignment, &v);
921 q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
922
923 virtio_cread(vdev, struct virtio_blk_config,
924 max_discard_sectors, &v);
925 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
926
927 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
928 &v);
929 blk_queue_max_discard_segments(q,
930 min_not_zero(v,
931 MAX_DISCARD_SEGMENTS));
932
933 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
934 }
935
936 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
937 virtio_cread(vdev, struct virtio_blk_config,
938 max_write_zeroes_sectors, &v);
939 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
940 }
941
942 virtblk_update_capacity(vblk, false);
943 virtio_device_ready(vdev);
944
945 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
946 if (err)
947 goto out_cleanup_disk;
948
949 return 0;
950
951out_cleanup_disk:
952 blk_cleanup_disk(vblk->disk);
953out_free_tags:
954 blk_mq_free_tag_set(&vblk->tag_set);
955out_free_vq:
956 vdev->config->del_vqs(vdev);
957 kfree(vblk->vqs);
958out_free_vblk:
959 kfree(vblk);
960out_free_index:
961 ida_simple_remove(&vd_index_ida, index);
962out:
963 return err;
964}
965
966static void virtblk_remove(struct virtio_device *vdev)
967{
968 struct virtio_blk *vblk = vdev->priv;
969
970 /* Make sure no work handler is accessing the device. */
971 flush_work(&vblk->config_work);
972
973 del_gendisk(vblk->disk);
974 blk_cleanup_disk(vblk->disk);
975 blk_mq_free_tag_set(&vblk->tag_set);
976
977 mutex_lock(&vblk->vdev_mutex);
978
979 /* Stop all the virtqueues. */
980 vdev->config->reset(vdev);
981
982 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
983 vblk->vdev = NULL;
984
985 vdev->config->del_vqs(vdev);
986 kfree(vblk->vqs);
987
988 mutex_unlock(&vblk->vdev_mutex);
989
990 virtblk_put(vblk);
991}
992
993#ifdef CONFIG_PM_SLEEP
994static int virtblk_freeze(struct virtio_device *vdev)
995{
996 struct virtio_blk *vblk = vdev->priv;
997
998 /* Ensure we don't receive any more interrupts */
999 vdev->config->reset(vdev);
1000
1001 /* Make sure no work handler is accessing the device. */
1002 flush_work(&vblk->config_work);
1003
1004 blk_mq_quiesce_queue(vblk->disk->queue);
1005
1006 vdev->config->del_vqs(vdev);
1007 kfree(vblk->vqs);
1008
1009 return 0;
1010}
1011
1012static int virtblk_restore(struct virtio_device *vdev)
1013{
1014 struct virtio_blk *vblk = vdev->priv;
1015 int ret;
1016
1017 ret = init_vq(vdev->priv);
1018 if (ret)
1019 return ret;
1020
1021 virtio_device_ready(vdev);
1022
1023 blk_mq_unquiesce_queue(vblk->disk->queue);
1024 return 0;
1025}
1026#endif
1027
1028static const struct virtio_device_id id_table[] = {
1029 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
1030 { 0 },
1031};
1032
1033static unsigned int features_legacy[] = {
1034 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1035 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1036 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1037 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1038}
1039;
1040static unsigned int features[] = {
1041 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1042 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1043 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1044 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1045};
1046
1047static struct virtio_driver virtio_blk = {
1048 .feature_table = features,
1049 .feature_table_size = ARRAY_SIZE(features),
1050 .feature_table_legacy = features_legacy,
1051 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
1052 .suppress_used_validation = true,
1053 .driver.name = KBUILD_MODNAME,
1054 .driver.owner = THIS_MODULE,
1055 .id_table = id_table,
1056 .probe = virtblk_probe,
1057 .remove = virtblk_remove,
1058 .config_changed = virtblk_config_changed,
1059#ifdef CONFIG_PM_SLEEP
1060 .freeze = virtblk_freeze,
1061 .restore = virtblk_restore,
1062#endif
1063};
1064
1065static int __init init(void)
1066{
1067 int error;
1068
1069 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1070 if (!virtblk_wq)
1071 return -ENOMEM;
1072
1073 major = register_blkdev(0, "virtblk");
1074 if (major < 0) {
1075 error = major;
1076 goto out_destroy_workqueue;
1077 }
1078
1079 error = register_virtio_driver(&virtio_blk);
1080 if (error)
1081 goto out_unregister_blkdev;
1082 return 0;
1083
1084out_unregister_blkdev:
1085 unregister_blkdev(major, "virtblk");
1086out_destroy_workqueue:
1087 destroy_workqueue(virtblk_wq);
1088 return error;
1089}
1090
1091static void __exit fini(void)
1092{
1093 unregister_virtio_driver(&virtio_blk);
1094 unregister_blkdev(major, "virtblk");
1095 destroy_workqueue(virtblk_wq);
1096}
1097module_init(init);
1098module_exit(fini);
1099
1100MODULE_DEVICE_TABLE(virtio, id_table);
1101MODULE_DESCRIPTION("Virtio block driver");
1102MODULE_LICENSE("GPL");