Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Network block device - make block devices work over TCP
4 *
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
7 *
8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 *
11 * (part of code stolen from loop.c)
12 */
13
14#define pr_fmt(fmt) "nbd: " fmt
15
16#include <linux/major.h>
17
18#include <linux/blkdev.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/sched/mm.h>
23#include <linux/fs.h>
24#include <linux/bio.h>
25#include <linux/stat.h>
26#include <linux/errno.h>
27#include <linux/file.h>
28#include <linux/ioctl.h>
29#include <linux/mutex.h>
30#include <linux/compiler.h>
31#include <linux/completion.h>
32#include <linux/err.h>
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <net/sock.h>
36#include <linux/net.h>
37#include <linux/kthread.h>
38#include <linux/types.h>
39#include <linux/debugfs.h>
40#include <linux/blk-mq.h>
41
42#include <linux/uaccess.h>
43#include <asm/types.h>
44
45#include <linux/nbd.h>
46#include <linux/nbd-netlink.h>
47#include <net/genetlink.h>
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/nbd.h>
51
52static DEFINE_IDR(nbd_index_idr);
53static DEFINE_MUTEX(nbd_index_mutex);
54static struct workqueue_struct *nbd_del_wq;
55static int nbd_total_devices = 0;
56
57struct nbd_sock {
58 struct socket *sock;
59 struct mutex tx_lock;
60 struct request *pending;
61 int sent;
62 bool dead;
63 int fallback_index;
64 int cookie;
65};
66
67struct recv_thread_args {
68 struct work_struct work;
69 struct nbd_device *nbd;
70 struct nbd_sock *nsock;
71 int index;
72};
73
74struct link_dead_args {
75 struct work_struct work;
76 int index;
77};
78
79#define NBD_RT_TIMEDOUT 0
80#define NBD_RT_DISCONNECT_REQUESTED 1
81#define NBD_RT_DISCONNECTED 2
82#define NBD_RT_HAS_PID_FILE 3
83#define NBD_RT_HAS_CONFIG_REF 4
84#define NBD_RT_BOUND 5
85#define NBD_RT_DISCONNECT_ON_CLOSE 6
86#define NBD_RT_HAS_BACKEND_FILE 7
87
88#define NBD_DESTROY_ON_DISCONNECT 0
89#define NBD_DISCONNECT_REQUESTED 1
90
91struct nbd_config {
92 u32 flags;
93 unsigned long runtime_flags;
94 u64 dead_conn_timeout;
95
96 struct nbd_sock **socks;
97 int num_connections;
98 atomic_t live_connections;
99 wait_queue_head_t conn_wait;
100
101 atomic_t recv_threads;
102 wait_queue_head_t recv_wq;
103 unsigned int blksize_bits;
104 loff_t bytesize;
105#if IS_ENABLED(CONFIG_DEBUG_FS)
106 struct dentry *dbg_dir;
107#endif
108};
109
110static inline unsigned int nbd_blksize(struct nbd_config *config)
111{
112 return 1u << config->blksize_bits;
113}
114
115struct nbd_device {
116 struct blk_mq_tag_set tag_set;
117
118 int index;
119 refcount_t config_refs;
120 refcount_t refs;
121 struct nbd_config *config;
122 struct mutex config_lock;
123 struct gendisk *disk;
124 struct workqueue_struct *recv_workq;
125 struct work_struct remove_work;
126
127 struct list_head list;
128 struct task_struct *task_setup;
129
130 unsigned long flags;
131 pid_t pid; /* pid of nbd-client, if attached */
132
133 char *backend;
134};
135
136#define NBD_CMD_REQUEUED 1
137/*
138 * This flag will be set if nbd_queue_rq() succeed, and will be checked and
139 * cleared in completion. Both setting and clearing of the flag are protected
140 * by cmd->lock.
141 */
142#define NBD_CMD_INFLIGHT 2
143
144struct nbd_cmd {
145 struct nbd_device *nbd;
146 struct mutex lock;
147 int index;
148 int cookie;
149 int retries;
150 blk_status_t status;
151 unsigned long flags;
152 u32 cmd_cookie;
153};
154
155#if IS_ENABLED(CONFIG_DEBUG_FS)
156static struct dentry *nbd_dbg_dir;
157#endif
158
159#define nbd_name(nbd) ((nbd)->disk->disk_name)
160
161#define NBD_DEF_BLKSIZE_BITS 10
162
163static unsigned int nbds_max = 16;
164static int max_part = 16;
165static int part_shift;
166
167static int nbd_dev_dbg_init(struct nbd_device *nbd);
168static void nbd_dev_dbg_close(struct nbd_device *nbd);
169static void nbd_config_put(struct nbd_device *nbd);
170static void nbd_connect_reply(struct genl_info *info, int index);
171static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
172static void nbd_dead_link_work(struct work_struct *work);
173static void nbd_disconnect_and_put(struct nbd_device *nbd);
174
175static inline struct device *nbd_to_dev(struct nbd_device *nbd)
176{
177 return disk_to_dev(nbd->disk);
178}
179
180static void nbd_requeue_cmd(struct nbd_cmd *cmd)
181{
182 struct request *req = blk_mq_rq_from_pdu(cmd);
183
184 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
185 blk_mq_requeue_request(req, true);
186}
187
188#define NBD_COOKIE_BITS 32
189
190static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
191{
192 struct request *req = blk_mq_rq_from_pdu(cmd);
193 u32 tag = blk_mq_unique_tag(req);
194 u64 cookie = cmd->cmd_cookie;
195
196 return (cookie << NBD_COOKIE_BITS) | tag;
197}
198
199static u32 nbd_handle_to_tag(u64 handle)
200{
201 return (u32)handle;
202}
203
204static u32 nbd_handle_to_cookie(u64 handle)
205{
206 return (u32)(handle >> NBD_COOKIE_BITS);
207}
208
209static const char *nbdcmd_to_ascii(int cmd)
210{
211 switch (cmd) {
212 case NBD_CMD_READ: return "read";
213 case NBD_CMD_WRITE: return "write";
214 case NBD_CMD_DISC: return "disconnect";
215 case NBD_CMD_FLUSH: return "flush";
216 case NBD_CMD_TRIM: return "trim/discard";
217 }
218 return "invalid";
219}
220
221static ssize_t pid_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 struct gendisk *disk = dev_to_disk(dev);
225 struct nbd_device *nbd = disk->private_data;
226
227 return sprintf(buf, "%d\n", nbd->pid);
228}
229
230static const struct device_attribute pid_attr = {
231 .attr = { .name = "pid", .mode = 0444},
232 .show = pid_show,
233};
234
235static ssize_t backend_show(struct device *dev,
236 struct device_attribute *attr, char *buf)
237{
238 struct gendisk *disk = dev_to_disk(dev);
239 struct nbd_device *nbd = disk->private_data;
240
241 return sprintf(buf, "%s\n", nbd->backend ?: "");
242}
243
244static const struct device_attribute backend_attr = {
245 .attr = { .name = "backend", .mode = 0444},
246 .show = backend_show,
247};
248
249static void nbd_dev_remove(struct nbd_device *nbd)
250{
251 struct gendisk *disk = nbd->disk;
252
253 del_gendisk(disk);
254 blk_mq_free_tag_set(&nbd->tag_set);
255
256 /*
257 * Remove from idr after del_gendisk() completes, so if the same ID is
258 * reused, the following add_disk() will succeed.
259 */
260 mutex_lock(&nbd_index_mutex);
261 idr_remove(&nbd_index_idr, nbd->index);
262 mutex_unlock(&nbd_index_mutex);
263 destroy_workqueue(nbd->recv_workq);
264 put_disk(disk);
265}
266
267static void nbd_dev_remove_work(struct work_struct *work)
268{
269 nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
270}
271
272static void nbd_put(struct nbd_device *nbd)
273{
274 if (!refcount_dec_and_test(&nbd->refs))
275 return;
276
277 /* Call del_gendisk() asynchrounously to prevent deadlock */
278 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
279 queue_work(nbd_del_wq, &nbd->remove_work);
280 else
281 nbd_dev_remove(nbd);
282}
283
284static int nbd_disconnected(struct nbd_config *config)
285{
286 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
287 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
288}
289
290static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
291 int notify)
292{
293 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
294 struct link_dead_args *args;
295 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
296 if (args) {
297 INIT_WORK(&args->work, nbd_dead_link_work);
298 args->index = nbd->index;
299 queue_work(system_wq, &args->work);
300 }
301 }
302 if (!nsock->dead) {
303 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
304 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
305 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
306 &nbd->config->runtime_flags)) {
307 set_bit(NBD_RT_DISCONNECTED,
308 &nbd->config->runtime_flags);
309 dev_info(nbd_to_dev(nbd),
310 "Disconnected due to user request.\n");
311 }
312 }
313 }
314 nsock->dead = true;
315 nsock->pending = NULL;
316 nsock->sent = 0;
317}
318
319static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
320 loff_t blksize)
321{
322 struct queue_limits lim;
323 int error;
324
325 if (!blksize)
326 blksize = 1u << NBD_DEF_BLKSIZE_BITS;
327
328 if (blk_validate_block_size(blksize))
329 return -EINVAL;
330
331 if (bytesize < 0)
332 return -EINVAL;
333
334 nbd->config->bytesize = bytesize;
335 nbd->config->blksize_bits = __ffs(blksize);
336
337 if (!nbd->pid)
338 return 0;
339
340 lim = queue_limits_start_update(nbd->disk->queue);
341 if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
342 lim.max_hw_discard_sectors = UINT_MAX;
343 else
344 lim.max_hw_discard_sectors = 0;
345 lim.logical_block_size = blksize;
346 lim.physical_block_size = blksize;
347 error = queue_limits_commit_update(nbd->disk->queue, &lim);
348 if (error)
349 return error;
350
351 if (max_part)
352 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
353 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
354 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
355 return 0;
356}
357
358static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
359 loff_t blksize)
360{
361 int error;
362
363 blk_mq_freeze_queue(nbd->disk->queue);
364 error = __nbd_set_size(nbd, bytesize, blksize);
365 blk_mq_unfreeze_queue(nbd->disk->queue);
366
367 return error;
368}
369
370static void nbd_complete_rq(struct request *req)
371{
372 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
373
374 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
375 cmd->status ? "failed" : "done");
376
377 blk_mq_end_request(req, cmd->status);
378}
379
380/*
381 * Forcibly shutdown the socket causing all listeners to error
382 */
383static void sock_shutdown(struct nbd_device *nbd)
384{
385 struct nbd_config *config = nbd->config;
386 int i;
387
388 if (config->num_connections == 0)
389 return;
390 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
391 return;
392
393 for (i = 0; i < config->num_connections; i++) {
394 struct nbd_sock *nsock = config->socks[i];
395 mutex_lock(&nsock->tx_lock);
396 nbd_mark_nsock_dead(nbd, nsock, 0);
397 mutex_unlock(&nsock->tx_lock);
398 }
399 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
400}
401
402static u32 req_to_nbd_cmd_type(struct request *req)
403{
404 switch (req_op(req)) {
405 case REQ_OP_DISCARD:
406 return NBD_CMD_TRIM;
407 case REQ_OP_FLUSH:
408 return NBD_CMD_FLUSH;
409 case REQ_OP_WRITE:
410 return NBD_CMD_WRITE;
411 case REQ_OP_READ:
412 return NBD_CMD_READ;
413 default:
414 return U32_MAX;
415 }
416}
417
418static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
419{
420 if (refcount_inc_not_zero(&nbd->config_refs)) {
421 /*
422 * Add smp_mb__after_atomic to ensure that reading nbd->config_refs
423 * and reading nbd->config is ordered. The pair is the barrier in
424 * nbd_alloc_and_init_config(), avoid nbd->config_refs is set
425 * before nbd->config.
426 */
427 smp_mb__after_atomic();
428 return nbd->config;
429 }
430
431 return NULL;
432}
433
434static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
435{
436 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
437 struct nbd_device *nbd = cmd->nbd;
438 struct nbd_config *config;
439
440 if (!mutex_trylock(&cmd->lock))
441 return BLK_EH_RESET_TIMER;
442
443 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
444 mutex_unlock(&cmd->lock);
445 return BLK_EH_DONE;
446 }
447
448 config = nbd_get_config_unlocked(nbd);
449 if (!config) {
450 cmd->status = BLK_STS_TIMEOUT;
451 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
452 mutex_unlock(&cmd->lock);
453 goto done;
454 }
455
456 if (config->num_connections > 1 ||
457 (config->num_connections == 1 && nbd->tag_set.timeout)) {
458 dev_err_ratelimited(nbd_to_dev(nbd),
459 "Connection timed out, retrying (%d/%d alive)\n",
460 atomic_read(&config->live_connections),
461 config->num_connections);
462 /*
463 * Hooray we have more connections, requeue this IO, the submit
464 * path will put it on a real connection. Or if only one
465 * connection is configured, the submit path will wait util
466 * a new connection is reconfigured or util dead timeout.
467 */
468 if (config->socks) {
469 if (cmd->index < config->num_connections) {
470 struct nbd_sock *nsock =
471 config->socks[cmd->index];
472 mutex_lock(&nsock->tx_lock);
473 /* We can have multiple outstanding requests, so
474 * we don't want to mark the nsock dead if we've
475 * already reconnected with a new socket, so
476 * only mark it dead if its the same socket we
477 * were sent out on.
478 */
479 if (cmd->cookie == nsock->cookie)
480 nbd_mark_nsock_dead(nbd, nsock, 1);
481 mutex_unlock(&nsock->tx_lock);
482 }
483 mutex_unlock(&cmd->lock);
484 nbd_requeue_cmd(cmd);
485 nbd_config_put(nbd);
486 return BLK_EH_DONE;
487 }
488 }
489
490 if (!nbd->tag_set.timeout) {
491 /*
492 * Userspace sets timeout=0 to disable socket disconnection,
493 * so just warn and reset the timer.
494 */
495 struct nbd_sock *nsock = config->socks[cmd->index];
496 cmd->retries++;
497 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
498 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
499 (unsigned long long)blk_rq_pos(req) << 9,
500 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
501
502 mutex_lock(&nsock->tx_lock);
503 if (cmd->cookie != nsock->cookie) {
504 nbd_requeue_cmd(cmd);
505 mutex_unlock(&nsock->tx_lock);
506 mutex_unlock(&cmd->lock);
507 nbd_config_put(nbd);
508 return BLK_EH_DONE;
509 }
510 mutex_unlock(&nsock->tx_lock);
511 mutex_unlock(&cmd->lock);
512 nbd_config_put(nbd);
513 return BLK_EH_RESET_TIMER;
514 }
515
516 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
517 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
518 cmd->status = BLK_STS_IOERR;
519 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
520 mutex_unlock(&cmd->lock);
521 sock_shutdown(nbd);
522 nbd_config_put(nbd);
523done:
524 blk_mq_complete_request(req);
525 return BLK_EH_DONE;
526}
527
528static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
529 struct iov_iter *iter, int msg_flags, int *sent)
530{
531 int result;
532 struct msghdr msg = {} ;
533 unsigned int noreclaim_flag;
534
535 if (unlikely(!sock)) {
536 dev_err_ratelimited(disk_to_dev(nbd->disk),
537 "Attempted %s on closed socket in sock_xmit\n",
538 (send ? "send" : "recv"));
539 return -EINVAL;
540 }
541
542 msg.msg_iter = *iter;
543
544 noreclaim_flag = memalloc_noreclaim_save();
545 do {
546 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
547 sock->sk->sk_use_task_frag = false;
548 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
549
550 if (send)
551 result = sock_sendmsg(sock, &msg);
552 else
553 result = sock_recvmsg(sock, &msg, msg.msg_flags);
554
555 if (result <= 0) {
556 if (result == 0)
557 result = -EPIPE; /* short read */
558 break;
559 }
560 if (sent)
561 *sent += result;
562 } while (msg_data_left(&msg));
563
564 memalloc_noreclaim_restore(noreclaim_flag);
565
566 return result;
567}
568
569/*
570 * Send or receive packet. Return a positive value on success and
571 * negtive value on failure, and never return 0.
572 */
573static int sock_xmit(struct nbd_device *nbd, int index, int send,
574 struct iov_iter *iter, int msg_flags, int *sent)
575{
576 struct nbd_config *config = nbd->config;
577 struct socket *sock = config->socks[index]->sock;
578
579 return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
580}
581
582/*
583 * Different settings for sk->sk_sndtimeo can result in different return values
584 * if there is a signal pending when we enter sendmsg, because reasons?
585 */
586static inline int was_interrupted(int result)
587{
588 return result == -ERESTARTSYS || result == -EINTR;
589}
590
591/*
592 * Returns BLK_STS_RESOURCE if the caller should retry after a delay.
593 * Returns BLK_STS_IOERR if sending failed.
594 */
595static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
596 int index)
597{
598 struct request *req = blk_mq_rq_from_pdu(cmd);
599 struct nbd_config *config = nbd->config;
600 struct nbd_sock *nsock = config->socks[index];
601 int result;
602 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
603 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
604 struct iov_iter from;
605 struct bio *bio;
606 u64 handle;
607 u32 type;
608 u32 nbd_cmd_flags = 0;
609 int sent = nsock->sent, skip = 0;
610
611 lockdep_assert_held(&cmd->lock);
612 lockdep_assert_held(&nsock->tx_lock);
613
614 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
615
616 type = req_to_nbd_cmd_type(req);
617 if (type == U32_MAX)
618 return BLK_STS_IOERR;
619
620 if (rq_data_dir(req) == WRITE &&
621 (config->flags & NBD_FLAG_READ_ONLY)) {
622 dev_err_ratelimited(disk_to_dev(nbd->disk),
623 "Write on read-only\n");
624 return BLK_STS_IOERR;
625 }
626
627 if (req->cmd_flags & REQ_FUA)
628 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
629
630 /* We did a partial send previously, and we at least sent the whole
631 * request struct, so just go and send the rest of the pages in the
632 * request.
633 */
634 if (sent) {
635 if (sent >= sizeof(request)) {
636 skip = sent - sizeof(request);
637
638 /* initialize handle for tracing purposes */
639 handle = nbd_cmd_handle(cmd);
640
641 goto send_pages;
642 }
643 iov_iter_advance(&from, sent);
644 } else {
645 cmd->cmd_cookie++;
646 }
647 cmd->index = index;
648 cmd->cookie = nsock->cookie;
649 cmd->retries = 0;
650 request.type = htonl(type | nbd_cmd_flags);
651 if (type != NBD_CMD_FLUSH) {
652 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
653 request.len = htonl(blk_rq_bytes(req));
654 }
655 handle = nbd_cmd_handle(cmd);
656 request.cookie = cpu_to_be64(handle);
657
658 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
659
660 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
661 req, nbdcmd_to_ascii(type),
662 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
663 result = sock_xmit(nbd, index, 1, &from,
664 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
665 trace_nbd_header_sent(req, handle);
666 if (result < 0) {
667 if (was_interrupted(result)) {
668 /* If we haven't sent anything we can just return BUSY,
669 * however if we have sent something we need to make
670 * sure we only allow this req to be sent until we are
671 * completely done.
672 */
673 if (sent) {
674 nsock->pending = req;
675 nsock->sent = sent;
676 }
677 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
678 return BLK_STS_RESOURCE;
679 }
680 dev_err_ratelimited(disk_to_dev(nbd->disk),
681 "Send control failed (result %d)\n", result);
682 goto requeue;
683 }
684send_pages:
685 if (type != NBD_CMD_WRITE)
686 goto out;
687
688 bio = req->bio;
689 while (bio) {
690 struct bio *next = bio->bi_next;
691 struct bvec_iter iter;
692 struct bio_vec bvec;
693
694 bio_for_each_segment(bvec, bio, iter) {
695 bool is_last = !next && bio_iter_last(bvec, iter);
696 int flags = is_last ? 0 : MSG_MORE;
697
698 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
699 req, bvec.bv_len);
700 iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len);
701 if (skip) {
702 if (skip >= iov_iter_count(&from)) {
703 skip -= iov_iter_count(&from);
704 continue;
705 }
706 iov_iter_advance(&from, skip);
707 skip = 0;
708 }
709 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
710 if (result < 0) {
711 if (was_interrupted(result)) {
712 /* We've already sent the header, we
713 * have no choice but to set pending and
714 * return BUSY.
715 */
716 nsock->pending = req;
717 nsock->sent = sent;
718 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
719 return BLK_STS_RESOURCE;
720 }
721 dev_err(disk_to_dev(nbd->disk),
722 "Send data failed (result %d)\n",
723 result);
724 goto requeue;
725 }
726 /*
727 * The completion might already have come in,
728 * so break for the last one instead of letting
729 * the iterator do it. This prevents use-after-free
730 * of the bio.
731 */
732 if (is_last)
733 break;
734 }
735 bio = next;
736 }
737out:
738 trace_nbd_payload_sent(req, handle);
739 nsock->pending = NULL;
740 nsock->sent = 0;
741 __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
742 return BLK_STS_OK;
743
744requeue:
745 /* retry on a different socket */
746 dev_err_ratelimited(disk_to_dev(nbd->disk),
747 "Request send failed, requeueing\n");
748 nbd_mark_nsock_dead(nbd, nsock, 1);
749 nbd_requeue_cmd(cmd);
750 return BLK_STS_OK;
751}
752
753static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
754 struct nbd_reply *reply)
755{
756 struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
757 struct iov_iter to;
758 int result;
759
760 reply->magic = 0;
761 iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
762 result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
763 if (result < 0) {
764 if (!nbd_disconnected(nbd->config))
765 dev_err(disk_to_dev(nbd->disk),
766 "Receive control failed (result %d)\n", result);
767 return result;
768 }
769
770 if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
771 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
772 (unsigned long)ntohl(reply->magic));
773 return -EPROTO;
774 }
775
776 return 0;
777}
778
779/* NULL returned = something went wrong, inform userspace */
780static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
781 struct nbd_reply *reply)
782{
783 int result;
784 struct nbd_cmd *cmd;
785 struct request *req = NULL;
786 u64 handle;
787 u16 hwq;
788 u32 tag;
789 int ret = 0;
790
791 handle = be64_to_cpu(reply->cookie);
792 tag = nbd_handle_to_tag(handle);
793 hwq = blk_mq_unique_tag_to_hwq(tag);
794 if (hwq < nbd->tag_set.nr_hw_queues)
795 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
796 blk_mq_unique_tag_to_tag(tag));
797 if (!req || !blk_mq_request_started(req)) {
798 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
799 tag, req);
800 return ERR_PTR(-ENOENT);
801 }
802 trace_nbd_header_received(req, handle);
803 cmd = blk_mq_rq_to_pdu(req);
804
805 mutex_lock(&cmd->lock);
806 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
807 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
808 tag, cmd->status, cmd->flags);
809 ret = -ENOENT;
810 goto out;
811 }
812 if (cmd->index != index) {
813 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
814 tag, index, cmd->index);
815 ret = -ENOENT;
816 goto out;
817 }
818 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
819 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
820 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
821 ret = -ENOENT;
822 goto out;
823 }
824 if (cmd->status != BLK_STS_OK) {
825 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
826 req);
827 ret = -ENOENT;
828 goto out;
829 }
830 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
831 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
832 req);
833 ret = -ENOENT;
834 goto out;
835 }
836 if (ntohl(reply->error)) {
837 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
838 ntohl(reply->error));
839 cmd->status = BLK_STS_IOERR;
840 goto out;
841 }
842
843 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
844 if (rq_data_dir(req) != WRITE) {
845 struct req_iterator iter;
846 struct bio_vec bvec;
847 struct iov_iter to;
848
849 rq_for_each_segment(bvec, req, iter) {
850 iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len);
851 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
852 if (result < 0) {
853 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
854 result);
855 /*
856 * If we've disconnected, we need to make sure we
857 * complete this request, otherwise error out
858 * and let the timeout stuff handle resubmitting
859 * this request onto another connection.
860 */
861 if (nbd_disconnected(nbd->config)) {
862 cmd->status = BLK_STS_IOERR;
863 goto out;
864 }
865 ret = -EIO;
866 goto out;
867 }
868 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
869 req, bvec.bv_len);
870 }
871 }
872out:
873 trace_nbd_payload_received(req, handle);
874 mutex_unlock(&cmd->lock);
875 return ret ? ERR_PTR(ret) : cmd;
876}
877
878static void recv_work(struct work_struct *work)
879{
880 struct recv_thread_args *args = container_of(work,
881 struct recv_thread_args,
882 work);
883 struct nbd_device *nbd = args->nbd;
884 struct nbd_config *config = nbd->config;
885 struct request_queue *q = nbd->disk->queue;
886 struct nbd_sock *nsock = args->nsock;
887 struct nbd_cmd *cmd;
888 struct request *rq;
889
890 while (1) {
891 struct nbd_reply reply;
892
893 if (nbd_read_reply(nbd, nsock->sock, &reply))
894 break;
895
896 /*
897 * Grab .q_usage_counter so request pool won't go away, then no
898 * request use-after-free is possible during nbd_handle_reply().
899 * If queue is frozen, there won't be any inflight requests, we
900 * needn't to handle the incoming garbage message.
901 */
902 if (!percpu_ref_tryget(&q->q_usage_counter)) {
903 dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
904 __func__);
905 break;
906 }
907
908 cmd = nbd_handle_reply(nbd, args->index, &reply);
909 if (IS_ERR(cmd)) {
910 percpu_ref_put(&q->q_usage_counter);
911 break;
912 }
913
914 rq = blk_mq_rq_from_pdu(cmd);
915 if (likely(!blk_should_fake_timeout(rq->q))) {
916 bool complete;
917
918 mutex_lock(&cmd->lock);
919 complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
920 &cmd->flags);
921 mutex_unlock(&cmd->lock);
922 if (complete)
923 blk_mq_complete_request(rq);
924 }
925 percpu_ref_put(&q->q_usage_counter);
926 }
927
928 mutex_lock(&nsock->tx_lock);
929 nbd_mark_nsock_dead(nbd, nsock, 1);
930 mutex_unlock(&nsock->tx_lock);
931
932 nbd_config_put(nbd);
933 atomic_dec(&config->recv_threads);
934 wake_up(&config->recv_wq);
935 kfree(args);
936}
937
938static bool nbd_clear_req(struct request *req, void *data)
939{
940 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
941
942 /* don't abort one completed request */
943 if (blk_mq_request_completed(req))
944 return true;
945
946 mutex_lock(&cmd->lock);
947 if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
948 mutex_unlock(&cmd->lock);
949 return true;
950 }
951 cmd->status = BLK_STS_IOERR;
952 mutex_unlock(&cmd->lock);
953
954 blk_mq_complete_request(req);
955 return true;
956}
957
958static void nbd_clear_que(struct nbd_device *nbd)
959{
960 blk_mq_quiesce_queue(nbd->disk->queue);
961 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
962 blk_mq_unquiesce_queue(nbd->disk->queue);
963 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
964}
965
966static int find_fallback(struct nbd_device *nbd, int index)
967{
968 struct nbd_config *config = nbd->config;
969 int new_index = -1;
970 struct nbd_sock *nsock = config->socks[index];
971 int fallback = nsock->fallback_index;
972
973 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
974 return new_index;
975
976 if (config->num_connections <= 1) {
977 dev_err_ratelimited(disk_to_dev(nbd->disk),
978 "Dead connection, failed to find a fallback\n");
979 return new_index;
980 }
981
982 if (fallback >= 0 && fallback < config->num_connections &&
983 !config->socks[fallback]->dead)
984 return fallback;
985
986 if (nsock->fallback_index < 0 ||
987 nsock->fallback_index >= config->num_connections ||
988 config->socks[nsock->fallback_index]->dead) {
989 int i;
990 for (i = 0; i < config->num_connections; i++) {
991 if (i == index)
992 continue;
993 if (!config->socks[i]->dead) {
994 new_index = i;
995 break;
996 }
997 }
998 nsock->fallback_index = new_index;
999 if (new_index < 0) {
1000 dev_err_ratelimited(disk_to_dev(nbd->disk),
1001 "Dead connection, failed to find a fallback\n");
1002 return new_index;
1003 }
1004 }
1005 new_index = nsock->fallback_index;
1006 return new_index;
1007}
1008
1009static int wait_for_reconnect(struct nbd_device *nbd)
1010{
1011 struct nbd_config *config = nbd->config;
1012 if (!config->dead_conn_timeout)
1013 return 0;
1014
1015 if (!wait_event_timeout(config->conn_wait,
1016 test_bit(NBD_RT_DISCONNECTED,
1017 &config->runtime_flags) ||
1018 atomic_read(&config->live_connections) > 0,
1019 config->dead_conn_timeout))
1020 return 0;
1021
1022 return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1023}
1024
1025static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1026{
1027 struct request *req = blk_mq_rq_from_pdu(cmd);
1028 struct nbd_device *nbd = cmd->nbd;
1029 struct nbd_config *config;
1030 struct nbd_sock *nsock;
1031 blk_status_t ret;
1032
1033 lockdep_assert_held(&cmd->lock);
1034
1035 config = nbd_get_config_unlocked(nbd);
1036 if (!config) {
1037 dev_err_ratelimited(disk_to_dev(nbd->disk),
1038 "Socks array is empty\n");
1039 return BLK_STS_IOERR;
1040 }
1041
1042 if (index >= config->num_connections) {
1043 dev_err_ratelimited(disk_to_dev(nbd->disk),
1044 "Attempted send on invalid socket\n");
1045 nbd_config_put(nbd);
1046 return BLK_STS_IOERR;
1047 }
1048 cmd->status = BLK_STS_OK;
1049again:
1050 nsock = config->socks[index];
1051 mutex_lock(&nsock->tx_lock);
1052 if (nsock->dead) {
1053 int old_index = index;
1054 index = find_fallback(nbd, index);
1055 mutex_unlock(&nsock->tx_lock);
1056 if (index < 0) {
1057 if (wait_for_reconnect(nbd)) {
1058 index = old_index;
1059 goto again;
1060 }
1061 /* All the sockets should already be down at this point,
1062 * we just want to make sure that DISCONNECTED is set so
1063 * any requests that come in that were queue'ed waiting
1064 * for the reconnect timer don't trigger the timer again
1065 * and instead just error out.
1066 */
1067 sock_shutdown(nbd);
1068 nbd_config_put(nbd);
1069 return BLK_STS_IOERR;
1070 }
1071 goto again;
1072 }
1073
1074 /* Handle the case that we have a pending request that was partially
1075 * transmitted that _has_ to be serviced first. We need to call requeue
1076 * here so that it gets put _after_ the request that is already on the
1077 * dispatch list.
1078 */
1079 blk_mq_start_request(req);
1080 if (unlikely(nsock->pending && nsock->pending != req)) {
1081 nbd_requeue_cmd(cmd);
1082 ret = BLK_STS_OK;
1083 goto out;
1084 }
1085 ret = nbd_send_cmd(nbd, cmd, index);
1086out:
1087 mutex_unlock(&nsock->tx_lock);
1088 nbd_config_put(nbd);
1089 return ret;
1090}
1091
1092static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1093 const struct blk_mq_queue_data *bd)
1094{
1095 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1096 blk_status_t ret;
1097
1098 /*
1099 * Since we look at the bio's to send the request over the network we
1100 * need to make sure the completion work doesn't mark this request done
1101 * before we are done doing our send. This keeps us from dereferencing
1102 * freed data if we have particularly fast completions (ie we get the
1103 * completion before we exit sock_xmit on the last bvec) or in the case
1104 * that the server is misbehaving (or there was an error) before we're
1105 * done sending everything over the wire.
1106 */
1107 mutex_lock(&cmd->lock);
1108 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
1109
1110 /* We can be called directly from the user space process, which means we
1111 * could possibly have signals pending so our sendmsg will fail. In
1112 * this case we need to return that we are busy, otherwise error out as
1113 * appropriate.
1114 */
1115 ret = nbd_handle_cmd(cmd, hctx->queue_num);
1116 mutex_unlock(&cmd->lock);
1117
1118 return ret;
1119}
1120
1121static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1122 int *err)
1123{
1124 struct socket *sock;
1125
1126 *err = 0;
1127 sock = sockfd_lookup(fd, err);
1128 if (!sock)
1129 return NULL;
1130
1131 if (sock->ops->shutdown == sock_no_shutdown) {
1132 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1133 *err = -EINVAL;
1134 sockfd_put(sock);
1135 return NULL;
1136 }
1137
1138 return sock;
1139}
1140
1141static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1142 bool netlink)
1143{
1144 struct nbd_config *config = nbd->config;
1145 struct socket *sock;
1146 struct nbd_sock **socks;
1147 struct nbd_sock *nsock;
1148 int err;
1149
1150 /* Arg will be cast to int, check it to avoid overflow */
1151 if (arg > INT_MAX)
1152 return -EINVAL;
1153 sock = nbd_get_socket(nbd, arg, &err);
1154 if (!sock)
1155 return err;
1156
1157 /*
1158 * We need to make sure we don't get any errant requests while we're
1159 * reallocating the ->socks array.
1160 */
1161 blk_mq_freeze_queue(nbd->disk->queue);
1162
1163 if (!netlink && !nbd->task_setup &&
1164 !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1165 nbd->task_setup = current;
1166
1167 if (!netlink &&
1168 (nbd->task_setup != current ||
1169 test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1170 dev_err(disk_to_dev(nbd->disk),
1171 "Device being setup by another task");
1172 err = -EBUSY;
1173 goto put_socket;
1174 }
1175
1176 nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1177 if (!nsock) {
1178 err = -ENOMEM;
1179 goto put_socket;
1180 }
1181
1182 socks = krealloc(config->socks, (config->num_connections + 1) *
1183 sizeof(struct nbd_sock *), GFP_KERNEL);
1184 if (!socks) {
1185 kfree(nsock);
1186 err = -ENOMEM;
1187 goto put_socket;
1188 }
1189
1190 config->socks = socks;
1191
1192 nsock->fallback_index = -1;
1193 nsock->dead = false;
1194 mutex_init(&nsock->tx_lock);
1195 nsock->sock = sock;
1196 nsock->pending = NULL;
1197 nsock->sent = 0;
1198 nsock->cookie = 0;
1199 socks[config->num_connections++] = nsock;
1200 atomic_inc(&config->live_connections);
1201 blk_mq_unfreeze_queue(nbd->disk->queue);
1202
1203 return 0;
1204
1205put_socket:
1206 blk_mq_unfreeze_queue(nbd->disk->queue);
1207 sockfd_put(sock);
1208 return err;
1209}
1210
1211static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1212{
1213 struct nbd_config *config = nbd->config;
1214 struct socket *sock, *old;
1215 struct recv_thread_args *args;
1216 int i;
1217 int err;
1218
1219 sock = nbd_get_socket(nbd, arg, &err);
1220 if (!sock)
1221 return err;
1222
1223 args = kzalloc(sizeof(*args), GFP_KERNEL);
1224 if (!args) {
1225 sockfd_put(sock);
1226 return -ENOMEM;
1227 }
1228
1229 for (i = 0; i < config->num_connections; i++) {
1230 struct nbd_sock *nsock = config->socks[i];
1231
1232 if (!nsock->dead)
1233 continue;
1234
1235 mutex_lock(&nsock->tx_lock);
1236 if (!nsock->dead) {
1237 mutex_unlock(&nsock->tx_lock);
1238 continue;
1239 }
1240 sk_set_memalloc(sock->sk);
1241 if (nbd->tag_set.timeout)
1242 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1243 atomic_inc(&config->recv_threads);
1244 refcount_inc(&nbd->config_refs);
1245 old = nsock->sock;
1246 nsock->fallback_index = -1;
1247 nsock->sock = sock;
1248 nsock->dead = false;
1249 INIT_WORK(&args->work, recv_work);
1250 args->index = i;
1251 args->nbd = nbd;
1252 args->nsock = nsock;
1253 nsock->cookie++;
1254 mutex_unlock(&nsock->tx_lock);
1255 sockfd_put(old);
1256
1257 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1258
1259 /* We take the tx_mutex in an error path in the recv_work, so we
1260 * need to queue_work outside of the tx_mutex.
1261 */
1262 queue_work(nbd->recv_workq, &args->work);
1263
1264 atomic_inc(&config->live_connections);
1265 wake_up(&config->conn_wait);
1266 return 0;
1267 }
1268 sockfd_put(sock);
1269 kfree(args);
1270 return -ENOSPC;
1271}
1272
1273static void nbd_bdev_reset(struct nbd_device *nbd)
1274{
1275 if (disk_openers(nbd->disk) > 1)
1276 return;
1277 set_capacity(nbd->disk, 0);
1278}
1279
1280static void nbd_parse_flags(struct nbd_device *nbd)
1281{
1282 struct nbd_config *config = nbd->config;
1283 if (config->flags & NBD_FLAG_READ_ONLY)
1284 set_disk_ro(nbd->disk, true);
1285 else
1286 set_disk_ro(nbd->disk, false);
1287 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1288 if (config->flags & NBD_FLAG_SEND_FUA)
1289 blk_queue_write_cache(nbd->disk->queue, true, true);
1290 else
1291 blk_queue_write_cache(nbd->disk->queue, true, false);
1292 }
1293 else
1294 blk_queue_write_cache(nbd->disk->queue, false, false);
1295}
1296
1297static void send_disconnects(struct nbd_device *nbd)
1298{
1299 struct nbd_config *config = nbd->config;
1300 struct nbd_request request = {
1301 .magic = htonl(NBD_REQUEST_MAGIC),
1302 .type = htonl(NBD_CMD_DISC),
1303 };
1304 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1305 struct iov_iter from;
1306 int i, ret;
1307
1308 for (i = 0; i < config->num_connections; i++) {
1309 struct nbd_sock *nsock = config->socks[i];
1310
1311 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
1312 mutex_lock(&nsock->tx_lock);
1313 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1314 if (ret < 0)
1315 dev_err(disk_to_dev(nbd->disk),
1316 "Send disconnect failed %d\n", ret);
1317 mutex_unlock(&nsock->tx_lock);
1318 }
1319}
1320
1321static int nbd_disconnect(struct nbd_device *nbd)
1322{
1323 struct nbd_config *config = nbd->config;
1324
1325 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1326 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1327 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1328 send_disconnects(nbd);
1329 return 0;
1330}
1331
1332static void nbd_clear_sock(struct nbd_device *nbd)
1333{
1334 sock_shutdown(nbd);
1335 nbd_clear_que(nbd);
1336 nbd->task_setup = NULL;
1337}
1338
1339static void nbd_config_put(struct nbd_device *nbd)
1340{
1341 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1342 &nbd->config_lock)) {
1343 struct nbd_config *config = nbd->config;
1344 nbd_dev_dbg_close(nbd);
1345 invalidate_disk(nbd->disk);
1346 if (nbd->config->bytesize)
1347 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
1348 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1349 &config->runtime_flags))
1350 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1351 nbd->pid = 0;
1352 if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
1353 &config->runtime_flags)) {
1354 device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
1355 kfree(nbd->backend);
1356 nbd->backend = NULL;
1357 }
1358 nbd_clear_sock(nbd);
1359 if (config->num_connections) {
1360 int i;
1361 for (i = 0; i < config->num_connections; i++) {
1362 sockfd_put(config->socks[i]->sock);
1363 kfree(config->socks[i]);
1364 }
1365 kfree(config->socks);
1366 }
1367 kfree(nbd->config);
1368 nbd->config = NULL;
1369
1370 nbd->tag_set.timeout = 0;
1371
1372 mutex_unlock(&nbd->config_lock);
1373 nbd_put(nbd);
1374 module_put(THIS_MODULE);
1375 }
1376}
1377
1378static int nbd_start_device(struct nbd_device *nbd)
1379{
1380 struct nbd_config *config = nbd->config;
1381 int num_connections = config->num_connections;
1382 int error = 0, i;
1383
1384 if (nbd->pid)
1385 return -EBUSY;
1386 if (!config->socks)
1387 return -EINVAL;
1388 if (num_connections > 1 &&
1389 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1390 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1391 return -EINVAL;
1392 }
1393
1394 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1395 nbd->pid = task_pid_nr(current);
1396
1397 nbd_parse_flags(nbd);
1398
1399 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1400 if (error) {
1401 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
1402 return error;
1403 }
1404 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1405
1406 nbd_dev_dbg_init(nbd);
1407 for (i = 0; i < num_connections; i++) {
1408 struct recv_thread_args *args;
1409
1410 args = kzalloc(sizeof(*args), GFP_KERNEL);
1411 if (!args) {
1412 sock_shutdown(nbd);
1413 /*
1414 * If num_connections is m (2 < m),
1415 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1416 * But NO.(n + 1) failed. We still have n recv threads.
1417 * So, add flush_workqueue here to prevent recv threads
1418 * dropping the last config_refs and trying to destroy
1419 * the workqueue from inside the workqueue.
1420 */
1421 if (i)
1422 flush_workqueue(nbd->recv_workq);
1423 return -ENOMEM;
1424 }
1425 sk_set_memalloc(config->socks[i]->sock->sk);
1426 if (nbd->tag_set.timeout)
1427 config->socks[i]->sock->sk->sk_sndtimeo =
1428 nbd->tag_set.timeout;
1429 atomic_inc(&config->recv_threads);
1430 refcount_inc(&nbd->config_refs);
1431 INIT_WORK(&args->work, recv_work);
1432 args->nbd = nbd;
1433 args->nsock = config->socks[i];
1434 args->index = i;
1435 queue_work(nbd->recv_workq, &args->work);
1436 }
1437 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
1438}
1439
1440static int nbd_start_device_ioctl(struct nbd_device *nbd)
1441{
1442 struct nbd_config *config = nbd->config;
1443 int ret;
1444
1445 ret = nbd_start_device(nbd);
1446 if (ret)
1447 return ret;
1448
1449 if (max_part)
1450 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1451 mutex_unlock(&nbd->config_lock);
1452 ret = wait_event_interruptible(config->recv_wq,
1453 atomic_read(&config->recv_threads) == 0);
1454 if (ret) {
1455 sock_shutdown(nbd);
1456 nbd_clear_que(nbd);
1457 }
1458
1459 flush_workqueue(nbd->recv_workq);
1460 mutex_lock(&nbd->config_lock);
1461 nbd_bdev_reset(nbd);
1462 /* user requested, ignore socket errors */
1463 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1464 ret = 0;
1465 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1466 ret = -ETIMEDOUT;
1467 return ret;
1468}
1469
1470static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
1471{
1472 nbd_clear_sock(nbd);
1473 disk_force_media_change(nbd->disk);
1474 nbd_bdev_reset(nbd);
1475 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1476 &nbd->config->runtime_flags))
1477 nbd_config_put(nbd);
1478}
1479
1480static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1481{
1482 nbd->tag_set.timeout = timeout * HZ;
1483 if (timeout)
1484 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1485 else
1486 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1487}
1488
1489/* Must be called with config_lock held */
1490static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1491 unsigned int cmd, unsigned long arg)
1492{
1493 struct nbd_config *config = nbd->config;
1494 loff_t bytesize;
1495
1496 switch (cmd) {
1497 case NBD_DISCONNECT:
1498 return nbd_disconnect(nbd);
1499 case NBD_CLEAR_SOCK:
1500 nbd_clear_sock_ioctl(nbd);
1501 return 0;
1502 case NBD_SET_SOCK:
1503 return nbd_add_socket(nbd, arg, false);
1504 case NBD_SET_BLKSIZE:
1505 return nbd_set_size(nbd, config->bytesize, arg);
1506 case NBD_SET_SIZE:
1507 return nbd_set_size(nbd, arg, nbd_blksize(config));
1508 case NBD_SET_SIZE_BLOCKS:
1509 if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
1510 return -EINVAL;
1511 return nbd_set_size(nbd, bytesize, nbd_blksize(config));
1512 case NBD_SET_TIMEOUT:
1513 nbd_set_cmd_timeout(nbd, arg);
1514 return 0;
1515
1516 case NBD_SET_FLAGS:
1517 config->flags = arg;
1518 return 0;
1519 case NBD_DO_IT:
1520 return nbd_start_device_ioctl(nbd);
1521 case NBD_CLEAR_QUE:
1522 /*
1523 * This is for compatibility only. The queue is always cleared
1524 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1525 */
1526 return 0;
1527 case NBD_PRINT_DEBUG:
1528 /*
1529 * For compatibility only, we no longer keep a list of
1530 * outstanding requests.
1531 */
1532 return 0;
1533 }
1534 return -ENOTTY;
1535}
1536
1537static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
1538 unsigned int cmd, unsigned long arg)
1539{
1540 struct nbd_device *nbd = bdev->bd_disk->private_data;
1541 struct nbd_config *config = nbd->config;
1542 int error = -EINVAL;
1543
1544 if (!capable(CAP_SYS_ADMIN))
1545 return -EPERM;
1546
1547 /* The block layer will pass back some non-nbd ioctls in case we have
1548 * special handling for them, but we don't so just return an error.
1549 */
1550 if (_IOC_TYPE(cmd) != 0xab)
1551 return -EINVAL;
1552
1553 mutex_lock(&nbd->config_lock);
1554
1555 /* Don't allow ioctl operations on a nbd device that was created with
1556 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1557 */
1558 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1559 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1560 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1561 else
1562 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1563 mutex_unlock(&nbd->config_lock);
1564 return error;
1565}
1566
1567static int nbd_alloc_and_init_config(struct nbd_device *nbd)
1568{
1569 struct nbd_config *config;
1570
1571 if (WARN_ON(nbd->config))
1572 return -EINVAL;
1573
1574 if (!try_module_get(THIS_MODULE))
1575 return -ENODEV;
1576
1577 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1578 if (!config) {
1579 module_put(THIS_MODULE);
1580 return -ENOMEM;
1581 }
1582
1583 atomic_set(&config->recv_threads, 0);
1584 init_waitqueue_head(&config->recv_wq);
1585 init_waitqueue_head(&config->conn_wait);
1586 config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
1587 atomic_set(&config->live_connections, 0);
1588
1589 nbd->config = config;
1590 /*
1591 * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
1592 * its pair is the barrier in nbd_get_config_unlocked().
1593 * So nbd_get_config_unlocked() won't see nbd->config as null after
1594 * refcount_inc_not_zero() succeed.
1595 */
1596 smp_mb__before_atomic();
1597 refcount_set(&nbd->config_refs, 1);
1598
1599 return 0;
1600}
1601
1602static int nbd_open(struct gendisk *disk, blk_mode_t mode)
1603{
1604 struct nbd_device *nbd;
1605 struct nbd_config *config;
1606 int ret = 0;
1607
1608 mutex_lock(&nbd_index_mutex);
1609 nbd = disk->private_data;
1610 if (!nbd) {
1611 ret = -ENXIO;
1612 goto out;
1613 }
1614 if (!refcount_inc_not_zero(&nbd->refs)) {
1615 ret = -ENXIO;
1616 goto out;
1617 }
1618
1619 config = nbd_get_config_unlocked(nbd);
1620 if (!config) {
1621 mutex_lock(&nbd->config_lock);
1622 if (refcount_inc_not_zero(&nbd->config_refs)) {
1623 mutex_unlock(&nbd->config_lock);
1624 goto out;
1625 }
1626 ret = nbd_alloc_and_init_config(nbd);
1627 if (ret) {
1628 mutex_unlock(&nbd->config_lock);
1629 goto out;
1630 }
1631
1632 refcount_inc(&nbd->refs);
1633 mutex_unlock(&nbd->config_lock);
1634 if (max_part)
1635 set_bit(GD_NEED_PART_SCAN, &disk->state);
1636 } else if (nbd_disconnected(config)) {
1637 if (max_part)
1638 set_bit(GD_NEED_PART_SCAN, &disk->state);
1639 }
1640out:
1641 mutex_unlock(&nbd_index_mutex);
1642 return ret;
1643}
1644
1645static void nbd_release(struct gendisk *disk)
1646{
1647 struct nbd_device *nbd = disk->private_data;
1648
1649 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1650 disk_openers(disk) == 0)
1651 nbd_disconnect_and_put(nbd);
1652
1653 nbd_config_put(nbd);
1654 nbd_put(nbd);
1655}
1656
1657static void nbd_free_disk(struct gendisk *disk)
1658{
1659 struct nbd_device *nbd = disk->private_data;
1660
1661 kfree(nbd);
1662}
1663
1664static const struct block_device_operations nbd_fops =
1665{
1666 .owner = THIS_MODULE,
1667 .open = nbd_open,
1668 .release = nbd_release,
1669 .ioctl = nbd_ioctl,
1670 .compat_ioctl = nbd_ioctl,
1671 .free_disk = nbd_free_disk,
1672};
1673
1674#if IS_ENABLED(CONFIG_DEBUG_FS)
1675
1676static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1677{
1678 struct nbd_device *nbd = s->private;
1679
1680 if (nbd->pid)
1681 seq_printf(s, "recv: %d\n", nbd->pid);
1682
1683 return 0;
1684}
1685
1686DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
1687
1688static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1689{
1690 struct nbd_device *nbd = s->private;
1691 u32 flags = nbd->config->flags;
1692
1693 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1694
1695 seq_puts(s, "Known flags:\n");
1696
1697 if (flags & NBD_FLAG_HAS_FLAGS)
1698 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1699 if (flags & NBD_FLAG_READ_ONLY)
1700 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1701 if (flags & NBD_FLAG_SEND_FLUSH)
1702 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1703 if (flags & NBD_FLAG_SEND_FUA)
1704 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1705 if (flags & NBD_FLAG_SEND_TRIM)
1706 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1707
1708 return 0;
1709}
1710
1711DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
1712
1713static int nbd_dev_dbg_init(struct nbd_device *nbd)
1714{
1715 struct dentry *dir;
1716 struct nbd_config *config = nbd->config;
1717
1718 if (!nbd_dbg_dir)
1719 return -EIO;
1720
1721 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1722 if (IS_ERR(dir)) {
1723 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1724 nbd_name(nbd));
1725 return -EIO;
1726 }
1727 config->dbg_dir = dir;
1728
1729 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1730 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1731 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1732 debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
1733 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1734
1735 return 0;
1736}
1737
1738static void nbd_dev_dbg_close(struct nbd_device *nbd)
1739{
1740 debugfs_remove_recursive(nbd->config->dbg_dir);
1741}
1742
1743static int nbd_dbg_init(void)
1744{
1745 struct dentry *dbg_dir;
1746
1747 dbg_dir = debugfs_create_dir("nbd", NULL);
1748 if (IS_ERR(dbg_dir))
1749 return -EIO;
1750
1751 nbd_dbg_dir = dbg_dir;
1752
1753 return 0;
1754}
1755
1756static void nbd_dbg_close(void)
1757{
1758 debugfs_remove_recursive(nbd_dbg_dir);
1759}
1760
1761#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1762
1763static int nbd_dev_dbg_init(struct nbd_device *nbd)
1764{
1765 return 0;
1766}
1767
1768static void nbd_dev_dbg_close(struct nbd_device *nbd)
1769{
1770}
1771
1772static int nbd_dbg_init(void)
1773{
1774 return 0;
1775}
1776
1777static void nbd_dbg_close(void)
1778{
1779}
1780
1781#endif
1782
1783static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1784 unsigned int hctx_idx, unsigned int numa_node)
1785{
1786 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1787 cmd->nbd = set->driver_data;
1788 cmd->flags = 0;
1789 mutex_init(&cmd->lock);
1790 return 0;
1791}
1792
1793static const struct blk_mq_ops nbd_mq_ops = {
1794 .queue_rq = nbd_queue_rq,
1795 .complete = nbd_complete_rq,
1796 .init_request = nbd_init_request,
1797 .timeout = nbd_xmit_timeout,
1798};
1799
1800static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
1801{
1802 struct queue_limits lim = {
1803 .max_hw_sectors = 65536,
1804 .max_user_sectors = 256,
1805 .max_segments = USHRT_MAX,
1806 .max_segment_size = UINT_MAX,
1807 };
1808 struct nbd_device *nbd;
1809 struct gendisk *disk;
1810 int err = -ENOMEM;
1811
1812 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1813 if (!nbd)
1814 goto out;
1815
1816 nbd->tag_set.ops = &nbd_mq_ops;
1817 nbd->tag_set.nr_hw_queues = 1;
1818 nbd->tag_set.queue_depth = 128;
1819 nbd->tag_set.numa_node = NUMA_NO_NODE;
1820 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1821 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1822 BLK_MQ_F_BLOCKING;
1823 nbd->tag_set.driver_data = nbd;
1824 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
1825 nbd->backend = NULL;
1826
1827 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1828 if (err)
1829 goto out_free_nbd;
1830
1831 mutex_lock(&nbd_index_mutex);
1832 if (index >= 0) {
1833 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1834 GFP_KERNEL);
1835 if (err == -ENOSPC)
1836 err = -EEXIST;
1837 } else {
1838 err = idr_alloc(&nbd_index_idr, nbd, 0,
1839 (MINORMASK >> part_shift) + 1, GFP_KERNEL);
1840 if (err >= 0)
1841 index = err;
1842 }
1843 nbd->index = index;
1844 mutex_unlock(&nbd_index_mutex);
1845 if (err < 0)
1846 goto out_free_tags;
1847
1848 disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL);
1849 if (IS_ERR(disk)) {
1850 err = PTR_ERR(disk);
1851 goto out_free_idr;
1852 }
1853 nbd->disk = disk;
1854
1855 nbd->recv_workq = alloc_workqueue("nbd%d-recv",
1856 WQ_MEM_RECLAIM | WQ_HIGHPRI |
1857 WQ_UNBOUND, 0, nbd->index);
1858 if (!nbd->recv_workq) {
1859 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1860 err = -ENOMEM;
1861 goto out_err_disk;
1862 }
1863
1864 /*
1865 * Tell the block layer that we are not a rotational device
1866 */
1867 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1868
1869 mutex_init(&nbd->config_lock);
1870 refcount_set(&nbd->config_refs, 0);
1871 /*
1872 * Start out with a zero references to keep other threads from using
1873 * this device until it is fully initialized.
1874 */
1875 refcount_set(&nbd->refs, 0);
1876 INIT_LIST_HEAD(&nbd->list);
1877 disk->major = NBD_MAJOR;
1878 disk->first_minor = index << part_shift;
1879 disk->minors = 1 << part_shift;
1880 disk->fops = &nbd_fops;
1881 disk->private_data = nbd;
1882 sprintf(disk->disk_name, "nbd%d", index);
1883 err = add_disk(disk);
1884 if (err)
1885 goto out_free_work;
1886
1887 /*
1888 * Now publish the device.
1889 */
1890 refcount_set(&nbd->refs, refs);
1891 nbd_total_devices++;
1892 return nbd;
1893
1894out_free_work:
1895 destroy_workqueue(nbd->recv_workq);
1896out_err_disk:
1897 put_disk(disk);
1898out_free_idr:
1899 mutex_lock(&nbd_index_mutex);
1900 idr_remove(&nbd_index_idr, index);
1901 mutex_unlock(&nbd_index_mutex);
1902out_free_tags:
1903 blk_mq_free_tag_set(&nbd->tag_set);
1904out_free_nbd:
1905 kfree(nbd);
1906out:
1907 return ERR_PTR(err);
1908}
1909
1910static struct nbd_device *nbd_find_get_unused(void)
1911{
1912 struct nbd_device *nbd;
1913 int id;
1914
1915 lockdep_assert_held(&nbd_index_mutex);
1916
1917 idr_for_each_entry(&nbd_index_idr, nbd, id) {
1918 if (refcount_read(&nbd->config_refs) ||
1919 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
1920 continue;
1921 if (refcount_inc_not_zero(&nbd->refs))
1922 return nbd;
1923 }
1924
1925 return NULL;
1926}
1927
1928/* Netlink interface. */
1929static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1930 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1931 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1932 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1933 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1934 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1935 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1936 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1937 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
1938 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
1939 [NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING},
1940};
1941
1942static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1943 [NBD_SOCK_FD] = { .type = NLA_U32 },
1944};
1945
1946/* We don't use this right now since we don't parse the incoming list, but we
1947 * still want it here so userspace knows what to expect.
1948 */
1949static const struct nla_policy __attribute__((unused))
1950nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1951 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1952 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1953};
1954
1955static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1956{
1957 struct nbd_config *config = nbd->config;
1958 u64 bsize = nbd_blksize(config);
1959 u64 bytes = config->bytesize;
1960
1961 if (info->attrs[NBD_ATTR_SIZE_BYTES])
1962 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1963
1964 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
1965 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1966
1967 if (bytes != config->bytesize || bsize != nbd_blksize(config))
1968 return nbd_set_size(nbd, bytes, bsize);
1969 return 0;
1970}
1971
1972static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1973{
1974 struct nbd_device *nbd;
1975 struct nbd_config *config;
1976 int index = -1;
1977 int ret;
1978 bool put_dev = false;
1979
1980 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1981 return -EPERM;
1982
1983 if (info->attrs[NBD_ATTR_INDEX]) {
1984 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1985
1986 /*
1987 * Too big first_minor can cause duplicate creation of
1988 * sysfs files/links, since index << part_shift might overflow, or
1989 * MKDEV() expect that the max bits of first_minor is 20.
1990 */
1991 if (index < 0 || index > MINORMASK >> part_shift) {
1992 pr_err("illegal input index %d\n", index);
1993 return -EINVAL;
1994 }
1995 }
1996 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SOCKETS)) {
1997 pr_err("must specify at least one socket\n");
1998 return -EINVAL;
1999 }
2000 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SIZE_BYTES)) {
2001 pr_err("must specify a size in bytes for the device\n");
2002 return -EINVAL;
2003 }
2004again:
2005 mutex_lock(&nbd_index_mutex);
2006 if (index == -1) {
2007 nbd = nbd_find_get_unused();
2008 } else {
2009 nbd = idr_find(&nbd_index_idr, index);
2010 if (nbd) {
2011 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
2012 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
2013 !refcount_inc_not_zero(&nbd->refs)) {
2014 mutex_unlock(&nbd_index_mutex);
2015 pr_err("device at index %d is going down\n",
2016 index);
2017 return -EINVAL;
2018 }
2019 }
2020 }
2021 mutex_unlock(&nbd_index_mutex);
2022
2023 if (!nbd) {
2024 nbd = nbd_dev_add(index, 2);
2025 if (IS_ERR(nbd)) {
2026 pr_err("failed to add new device\n");
2027 return PTR_ERR(nbd);
2028 }
2029 }
2030
2031 mutex_lock(&nbd->config_lock);
2032 if (refcount_read(&nbd->config_refs)) {
2033 mutex_unlock(&nbd->config_lock);
2034 nbd_put(nbd);
2035 if (index == -1)
2036 goto again;
2037 pr_err("nbd%d already in use\n", index);
2038 return -EBUSY;
2039 }
2040
2041 ret = nbd_alloc_and_init_config(nbd);
2042 if (ret) {
2043 mutex_unlock(&nbd->config_lock);
2044 nbd_put(nbd);
2045 pr_err("couldn't allocate config\n");
2046 return ret;
2047 }
2048
2049 config = nbd->config;
2050 set_bit(NBD_RT_BOUND, &config->runtime_flags);
2051 ret = nbd_genl_size_set(info, nbd);
2052 if (ret)
2053 goto out;
2054
2055 if (info->attrs[NBD_ATTR_TIMEOUT])
2056 nbd_set_cmd_timeout(nbd,
2057 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2058 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2059 config->dead_conn_timeout =
2060 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2061 config->dead_conn_timeout *= HZ;
2062 }
2063 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
2064 config->flags =
2065 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
2066 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2067 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2068 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2069 /*
2070 * We have 1 ref to keep the device around, and then 1
2071 * ref for our current operation here, which will be
2072 * inherited by the config. If we already have
2073 * DESTROY_ON_DISCONNECT set then we know we don't have
2074 * that extra ref already held so we don't need the
2075 * put_dev.
2076 */
2077 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2078 &nbd->flags))
2079 put_dev = true;
2080 } else {
2081 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2082 &nbd->flags))
2083 refcount_inc(&nbd->refs);
2084 }
2085 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2086 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2087 &config->runtime_flags);
2088 }
2089 }
2090
2091 if (info->attrs[NBD_ATTR_SOCKETS]) {
2092 struct nlattr *attr;
2093 int rem, fd;
2094
2095 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2096 rem) {
2097 struct nlattr *socks[NBD_SOCK_MAX+1];
2098
2099 if (nla_type(attr) != NBD_SOCK_ITEM) {
2100 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2101 ret = -EINVAL;
2102 goto out;
2103 }
2104 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2105 attr,
2106 nbd_sock_policy,
2107 info->extack);
2108 if (ret != 0) {
2109 pr_err("error processing sock list\n");
2110 ret = -EINVAL;
2111 goto out;
2112 }
2113 if (!socks[NBD_SOCK_FD])
2114 continue;
2115 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2116 ret = nbd_add_socket(nbd, fd, true);
2117 if (ret)
2118 goto out;
2119 }
2120 }
2121 ret = nbd_start_device(nbd);
2122 if (ret)
2123 goto out;
2124 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2125 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2126 GFP_KERNEL);
2127 if (!nbd->backend) {
2128 ret = -ENOMEM;
2129 goto out;
2130 }
2131 }
2132 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
2133 if (ret) {
2134 dev_err(disk_to_dev(nbd->disk),
2135 "device_create_file failed for backend!\n");
2136 goto out;
2137 }
2138 set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
2139out:
2140 mutex_unlock(&nbd->config_lock);
2141 if (!ret) {
2142 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
2143 refcount_inc(&nbd->config_refs);
2144 nbd_connect_reply(info, nbd->index);
2145 }
2146 nbd_config_put(nbd);
2147 if (put_dev)
2148 nbd_put(nbd);
2149 return ret;
2150}
2151
2152static void nbd_disconnect_and_put(struct nbd_device *nbd)
2153{
2154 mutex_lock(&nbd->config_lock);
2155 nbd_disconnect(nbd);
2156 sock_shutdown(nbd);
2157 wake_up(&nbd->config->conn_wait);
2158 /*
2159 * Make sure recv thread has finished, we can safely call nbd_clear_que()
2160 * to cancel the inflight I/Os.
2161 */
2162 flush_workqueue(nbd->recv_workq);
2163 nbd_clear_que(nbd);
2164 nbd->task_setup = NULL;
2165 mutex_unlock(&nbd->config_lock);
2166
2167 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
2168 &nbd->config->runtime_flags))
2169 nbd_config_put(nbd);
2170}
2171
2172static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
2173{
2174 struct nbd_device *nbd;
2175 int index;
2176
2177 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2178 return -EPERM;
2179
2180 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2181 pr_err("must specify an index to disconnect\n");
2182 return -EINVAL;
2183 }
2184 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2185 mutex_lock(&nbd_index_mutex);
2186 nbd = idr_find(&nbd_index_idr, index);
2187 if (!nbd) {
2188 mutex_unlock(&nbd_index_mutex);
2189 pr_err("couldn't find device at index %d\n", index);
2190 return -EINVAL;
2191 }
2192 if (!refcount_inc_not_zero(&nbd->refs)) {
2193 mutex_unlock(&nbd_index_mutex);
2194 pr_err("device at index %d is going down\n", index);
2195 return -EINVAL;
2196 }
2197 mutex_unlock(&nbd_index_mutex);
2198 if (!refcount_inc_not_zero(&nbd->config_refs))
2199 goto put_nbd;
2200 nbd_disconnect_and_put(nbd);
2201 nbd_config_put(nbd);
2202put_nbd:
2203 nbd_put(nbd);
2204 return 0;
2205}
2206
2207static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2208{
2209 struct nbd_device *nbd = NULL;
2210 struct nbd_config *config;
2211 int index;
2212 int ret = 0;
2213 bool put_dev = false;
2214
2215 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2216 return -EPERM;
2217
2218 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2219 pr_err("must specify a device to reconfigure\n");
2220 return -EINVAL;
2221 }
2222 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2223 mutex_lock(&nbd_index_mutex);
2224 nbd = idr_find(&nbd_index_idr, index);
2225 if (!nbd) {
2226 mutex_unlock(&nbd_index_mutex);
2227 pr_err("couldn't find a device at index %d\n", index);
2228 return -EINVAL;
2229 }
2230 if (nbd->backend) {
2231 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2232 if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2233 nbd->backend)) {
2234 mutex_unlock(&nbd_index_mutex);
2235 dev_err(nbd_to_dev(nbd),
2236 "backend image doesn't match with %s\n",
2237 nbd->backend);
2238 return -EINVAL;
2239 }
2240 } else {
2241 mutex_unlock(&nbd_index_mutex);
2242 dev_err(nbd_to_dev(nbd), "must specify backend\n");
2243 return -EINVAL;
2244 }
2245 }
2246 if (!refcount_inc_not_zero(&nbd->refs)) {
2247 mutex_unlock(&nbd_index_mutex);
2248 pr_err("device at index %d is going down\n", index);
2249 return -EINVAL;
2250 }
2251 mutex_unlock(&nbd_index_mutex);
2252
2253 config = nbd_get_config_unlocked(nbd);
2254 if (!config) {
2255 dev_err(nbd_to_dev(nbd),
2256 "not configured, cannot reconfigure\n");
2257 nbd_put(nbd);
2258 return -EINVAL;
2259 }
2260
2261 mutex_lock(&nbd->config_lock);
2262 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2263 !nbd->pid) {
2264 dev_err(nbd_to_dev(nbd),
2265 "not configured, cannot reconfigure\n");
2266 ret = -EINVAL;
2267 goto out;
2268 }
2269
2270 ret = nbd_genl_size_set(info, nbd);
2271 if (ret)
2272 goto out;
2273
2274 if (info->attrs[NBD_ATTR_TIMEOUT])
2275 nbd_set_cmd_timeout(nbd,
2276 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2277 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2278 config->dead_conn_timeout =
2279 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2280 config->dead_conn_timeout *= HZ;
2281 }
2282 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2283 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2284 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2285 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2286 &nbd->flags))
2287 put_dev = true;
2288 } else {
2289 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2290 &nbd->flags))
2291 refcount_inc(&nbd->refs);
2292 }
2293
2294 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2295 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2296 &config->runtime_flags);
2297 } else {
2298 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2299 &config->runtime_flags);
2300 }
2301 }
2302
2303 if (info->attrs[NBD_ATTR_SOCKETS]) {
2304 struct nlattr *attr;
2305 int rem, fd;
2306
2307 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2308 rem) {
2309 struct nlattr *socks[NBD_SOCK_MAX+1];
2310
2311 if (nla_type(attr) != NBD_SOCK_ITEM) {
2312 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2313 ret = -EINVAL;
2314 goto out;
2315 }
2316 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2317 attr,
2318 nbd_sock_policy,
2319 info->extack);
2320 if (ret != 0) {
2321 pr_err("error processing sock list\n");
2322 ret = -EINVAL;
2323 goto out;
2324 }
2325 if (!socks[NBD_SOCK_FD])
2326 continue;
2327 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2328 ret = nbd_reconnect_socket(nbd, fd);
2329 if (ret) {
2330 if (ret == -ENOSPC)
2331 ret = 0;
2332 goto out;
2333 }
2334 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2335 }
2336 }
2337out:
2338 mutex_unlock(&nbd->config_lock);
2339 nbd_config_put(nbd);
2340 nbd_put(nbd);
2341 if (put_dev)
2342 nbd_put(nbd);
2343 return ret;
2344}
2345
2346static const struct genl_small_ops nbd_connect_genl_ops[] = {
2347 {
2348 .cmd = NBD_CMD_CONNECT,
2349 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2350 .doit = nbd_genl_connect,
2351 },
2352 {
2353 .cmd = NBD_CMD_DISCONNECT,
2354 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2355 .doit = nbd_genl_disconnect,
2356 },
2357 {
2358 .cmd = NBD_CMD_RECONFIGURE,
2359 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2360 .doit = nbd_genl_reconfigure,
2361 },
2362 {
2363 .cmd = NBD_CMD_STATUS,
2364 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2365 .doit = nbd_genl_status,
2366 },
2367};
2368
2369static const struct genl_multicast_group nbd_mcast_grps[] = {
2370 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2371};
2372
2373static struct genl_family nbd_genl_family __ro_after_init = {
2374 .hdrsize = 0,
2375 .name = NBD_GENL_FAMILY_NAME,
2376 .version = NBD_GENL_VERSION,
2377 .module = THIS_MODULE,
2378 .small_ops = nbd_connect_genl_ops,
2379 .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2380 .resv_start_op = NBD_CMD_STATUS + 1,
2381 .maxattr = NBD_ATTR_MAX,
2382 .netnsok = 1,
2383 .policy = nbd_attr_policy,
2384 .mcgrps = nbd_mcast_grps,
2385 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
2386};
2387MODULE_ALIAS_GENL_FAMILY(NBD_GENL_FAMILY_NAME);
2388
2389static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2390{
2391 struct nlattr *dev_opt;
2392 u8 connected = 0;
2393 int ret;
2394
2395 /* This is a little racey, but for status it's ok. The
2396 * reason we don't take a ref here is because we can't
2397 * take a ref in the index == -1 case as we would need
2398 * to put under the nbd_index_mutex, which could
2399 * deadlock if we are configured to remove ourselves
2400 * once we're disconnected.
2401 */
2402 if (refcount_read(&nbd->config_refs))
2403 connected = 1;
2404 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2405 if (!dev_opt)
2406 return -EMSGSIZE;
2407 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2408 if (ret)
2409 return -EMSGSIZE;
2410 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2411 connected);
2412 if (ret)
2413 return -EMSGSIZE;
2414 nla_nest_end(reply, dev_opt);
2415 return 0;
2416}
2417
2418static int status_cb(int id, void *ptr, void *data)
2419{
2420 struct nbd_device *nbd = ptr;
2421 return populate_nbd_status(nbd, (struct sk_buff *)data);
2422}
2423
2424static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2425{
2426 struct nlattr *dev_list;
2427 struct sk_buff *reply;
2428 void *reply_head;
2429 size_t msg_size;
2430 int index = -1;
2431 int ret = -ENOMEM;
2432
2433 if (info->attrs[NBD_ATTR_INDEX])
2434 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2435
2436 mutex_lock(&nbd_index_mutex);
2437
2438 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2439 nla_attr_size(sizeof(u8)));
2440 msg_size *= (index == -1) ? nbd_total_devices : 1;
2441
2442 reply = genlmsg_new(msg_size, GFP_KERNEL);
2443 if (!reply)
2444 goto out;
2445 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2446 NBD_CMD_STATUS);
2447 if (!reply_head) {
2448 nlmsg_free(reply);
2449 goto out;
2450 }
2451
2452 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2453 if (!dev_list) {
2454 nlmsg_free(reply);
2455 ret = -EMSGSIZE;
2456 goto out;
2457 }
2458
2459 if (index == -1) {
2460 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2461 if (ret) {
2462 nlmsg_free(reply);
2463 goto out;
2464 }
2465 } else {
2466 struct nbd_device *nbd;
2467 nbd = idr_find(&nbd_index_idr, index);
2468 if (nbd) {
2469 ret = populate_nbd_status(nbd, reply);
2470 if (ret) {
2471 nlmsg_free(reply);
2472 goto out;
2473 }
2474 }
2475 }
2476 nla_nest_end(reply, dev_list);
2477 genlmsg_end(reply, reply_head);
2478 ret = genlmsg_reply(reply, info);
2479out:
2480 mutex_unlock(&nbd_index_mutex);
2481 return ret;
2482}
2483
2484static void nbd_connect_reply(struct genl_info *info, int index)
2485{
2486 struct sk_buff *skb;
2487 void *msg_head;
2488 int ret;
2489
2490 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2491 if (!skb)
2492 return;
2493 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2494 NBD_CMD_CONNECT);
2495 if (!msg_head) {
2496 nlmsg_free(skb);
2497 return;
2498 }
2499 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2500 if (ret) {
2501 nlmsg_free(skb);
2502 return;
2503 }
2504 genlmsg_end(skb, msg_head);
2505 genlmsg_reply(skb, info);
2506}
2507
2508static void nbd_mcast_index(int index)
2509{
2510 struct sk_buff *skb;
2511 void *msg_head;
2512 int ret;
2513
2514 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2515 if (!skb)
2516 return;
2517 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2518 NBD_CMD_LINK_DEAD);
2519 if (!msg_head) {
2520 nlmsg_free(skb);
2521 return;
2522 }
2523 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2524 if (ret) {
2525 nlmsg_free(skb);
2526 return;
2527 }
2528 genlmsg_end(skb, msg_head);
2529 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2530}
2531
2532static void nbd_dead_link_work(struct work_struct *work)
2533{
2534 struct link_dead_args *args = container_of(work, struct link_dead_args,
2535 work);
2536 nbd_mcast_index(args->index);
2537 kfree(args);
2538}
2539
2540static int __init nbd_init(void)
2541{
2542 int i;
2543
2544 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2545
2546 if (max_part < 0) {
2547 pr_err("max_part must be >= 0\n");
2548 return -EINVAL;
2549 }
2550
2551 part_shift = 0;
2552 if (max_part > 0) {
2553 part_shift = fls(max_part);
2554
2555 /*
2556 * Adjust max_part according to part_shift as it is exported
2557 * to user space so that user can know the max number of
2558 * partition kernel should be able to manage.
2559 *
2560 * Note that -1 is required because partition 0 is reserved
2561 * for the whole disk.
2562 */
2563 max_part = (1UL << part_shift) - 1;
2564 }
2565
2566 if ((1UL << part_shift) > DISK_MAX_PARTS)
2567 return -EINVAL;
2568
2569 if (nbds_max > 1UL << (MINORBITS - part_shift))
2570 return -EINVAL;
2571
2572 if (register_blkdev(NBD_MAJOR, "nbd"))
2573 return -EIO;
2574
2575 nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
2576 if (!nbd_del_wq) {
2577 unregister_blkdev(NBD_MAJOR, "nbd");
2578 return -ENOMEM;
2579 }
2580
2581 if (genl_register_family(&nbd_genl_family)) {
2582 destroy_workqueue(nbd_del_wq);
2583 unregister_blkdev(NBD_MAJOR, "nbd");
2584 return -EINVAL;
2585 }
2586 nbd_dbg_init();
2587
2588 for (i = 0; i < nbds_max; i++)
2589 nbd_dev_add(i, 1);
2590 return 0;
2591}
2592
2593static int nbd_exit_cb(int id, void *ptr, void *data)
2594{
2595 struct list_head *list = (struct list_head *)data;
2596 struct nbd_device *nbd = ptr;
2597
2598 /* Skip nbd that is being removed asynchronously */
2599 if (refcount_read(&nbd->refs))
2600 list_add_tail(&nbd->list, list);
2601
2602 return 0;
2603}
2604
2605static void __exit nbd_cleanup(void)
2606{
2607 struct nbd_device *nbd;
2608 LIST_HEAD(del_list);
2609
2610 /*
2611 * Unregister netlink interface prior to waiting
2612 * for the completion of netlink commands.
2613 */
2614 genl_unregister_family(&nbd_genl_family);
2615
2616 nbd_dbg_close();
2617
2618 mutex_lock(&nbd_index_mutex);
2619 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2620 mutex_unlock(&nbd_index_mutex);
2621
2622 while (!list_empty(&del_list)) {
2623 nbd = list_first_entry(&del_list, struct nbd_device, list);
2624 list_del_init(&nbd->list);
2625 if (refcount_read(&nbd->config_refs))
2626 pr_err("possibly leaking nbd_config (ref %d)\n",
2627 refcount_read(&nbd->config_refs));
2628 if (refcount_read(&nbd->refs) != 1)
2629 pr_err("possibly leaking a device\n");
2630 nbd_put(nbd);
2631 }
2632
2633 /* Also wait for nbd_dev_remove_work() completes */
2634 destroy_workqueue(nbd_del_wq);
2635
2636 idr_destroy(&nbd_index_idr);
2637 unregister_blkdev(NBD_MAJOR, "nbd");
2638}
2639
2640module_init(nbd_init);
2641module_exit(nbd_cleanup);
2642
2643MODULE_DESCRIPTION("Network Block Device");
2644MODULE_LICENSE("GPL");
2645
2646module_param(nbds_max, int, 0444);
2647MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2648module_param(max_part, int, 0444);
2649MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");