Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Network block device - make block devices work over TCP
4 *
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
7 *
8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 *
11 * (part of code stolen from loop.c)
12 */
13
14#define pr_fmt(fmt) "nbd: " fmt
15
16#include <linux/major.h>
17
18#include <linux/blkdev.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/sched/mm.h>
23#include <linux/fs.h>
24#include <linux/bio.h>
25#include <linux/stat.h>
26#include <linux/errno.h>
27#include <linux/file.h>
28#include <linux/ioctl.h>
29#include <linux/mutex.h>
30#include <linux/compiler.h>
31#include <linux/completion.h>
32#include <linux/err.h>
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <net/sock.h>
36#include <linux/net.h>
37#include <linux/kthread.h>
38#include <linux/types.h>
39#include <linux/debugfs.h>
40#include <linux/blk-mq.h>
41
42#include <linux/uaccess.h>
43#include <asm/types.h>
44
45#include <linux/nbd.h>
46#include <linux/nbd-netlink.h>
47#include <net/genetlink.h>
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/nbd.h>
51
52static DEFINE_IDR(nbd_index_idr);
53static DEFINE_MUTEX(nbd_index_mutex);
54static struct workqueue_struct *nbd_del_wq;
55static int nbd_total_devices = 0;
56
57struct nbd_sock {
58 struct socket *sock;
59 struct mutex tx_lock;
60 struct request *pending;
61 int sent;
62 bool dead;
63 int fallback_index;
64 int cookie;
65};
66
67struct recv_thread_args {
68 struct work_struct work;
69 struct nbd_device *nbd;
70 int index;
71};
72
73struct link_dead_args {
74 struct work_struct work;
75 int index;
76};
77
78#define NBD_RT_TIMEDOUT 0
79#define NBD_RT_DISCONNECT_REQUESTED 1
80#define NBD_RT_DISCONNECTED 2
81#define NBD_RT_HAS_PID_FILE 3
82#define NBD_RT_HAS_CONFIG_REF 4
83#define NBD_RT_BOUND 5
84#define NBD_RT_DISCONNECT_ON_CLOSE 6
85#define NBD_RT_HAS_BACKEND_FILE 7
86
87#define NBD_DESTROY_ON_DISCONNECT 0
88#define NBD_DISCONNECT_REQUESTED 1
89
90struct nbd_config {
91 u32 flags;
92 unsigned long runtime_flags;
93 u64 dead_conn_timeout;
94
95 struct nbd_sock **socks;
96 int num_connections;
97 atomic_t live_connections;
98 wait_queue_head_t conn_wait;
99
100 atomic_t recv_threads;
101 wait_queue_head_t recv_wq;
102 unsigned int blksize_bits;
103 loff_t bytesize;
104#if IS_ENABLED(CONFIG_DEBUG_FS)
105 struct dentry *dbg_dir;
106#endif
107};
108
109static inline unsigned int nbd_blksize(struct nbd_config *config)
110{
111 return 1u << config->blksize_bits;
112}
113
114struct nbd_device {
115 struct blk_mq_tag_set tag_set;
116
117 int index;
118 refcount_t config_refs;
119 refcount_t refs;
120 struct nbd_config *config;
121 struct mutex config_lock;
122 struct gendisk *disk;
123 struct workqueue_struct *recv_workq;
124 struct work_struct remove_work;
125
126 struct list_head list;
127 struct task_struct *task_setup;
128
129 unsigned long flags;
130 pid_t pid; /* pid of nbd-client, if attached */
131
132 char *backend;
133};
134
135#define NBD_CMD_REQUEUED 1
136/*
137 * This flag will be set if nbd_queue_rq() succeed, and will be checked and
138 * cleared in completion. Both setting and clearing of the flag are protected
139 * by cmd->lock.
140 */
141#define NBD_CMD_INFLIGHT 2
142
143struct nbd_cmd {
144 struct nbd_device *nbd;
145 struct mutex lock;
146 int index;
147 int cookie;
148 int retries;
149 blk_status_t status;
150 unsigned long flags;
151 u32 cmd_cookie;
152};
153
154#if IS_ENABLED(CONFIG_DEBUG_FS)
155static struct dentry *nbd_dbg_dir;
156#endif
157
158#define nbd_name(nbd) ((nbd)->disk->disk_name)
159
160#define NBD_DEF_BLKSIZE_BITS 10
161
162static unsigned int nbds_max = 16;
163static int max_part = 16;
164static int part_shift;
165
166static int nbd_dev_dbg_init(struct nbd_device *nbd);
167static void nbd_dev_dbg_close(struct nbd_device *nbd);
168static void nbd_config_put(struct nbd_device *nbd);
169static void nbd_connect_reply(struct genl_info *info, int index);
170static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
171static void nbd_dead_link_work(struct work_struct *work);
172static void nbd_disconnect_and_put(struct nbd_device *nbd);
173
174static inline struct device *nbd_to_dev(struct nbd_device *nbd)
175{
176 return disk_to_dev(nbd->disk);
177}
178
179static void nbd_requeue_cmd(struct nbd_cmd *cmd)
180{
181 struct request *req = blk_mq_rq_from_pdu(cmd);
182
183 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
184 blk_mq_requeue_request(req, true);
185}
186
187#define NBD_COOKIE_BITS 32
188
189static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
190{
191 struct request *req = blk_mq_rq_from_pdu(cmd);
192 u32 tag = blk_mq_unique_tag(req);
193 u64 cookie = cmd->cmd_cookie;
194
195 return (cookie << NBD_COOKIE_BITS) | tag;
196}
197
198static u32 nbd_handle_to_tag(u64 handle)
199{
200 return (u32)handle;
201}
202
203static u32 nbd_handle_to_cookie(u64 handle)
204{
205 return (u32)(handle >> NBD_COOKIE_BITS);
206}
207
208static const char *nbdcmd_to_ascii(int cmd)
209{
210 switch (cmd) {
211 case NBD_CMD_READ: return "read";
212 case NBD_CMD_WRITE: return "write";
213 case NBD_CMD_DISC: return "disconnect";
214 case NBD_CMD_FLUSH: return "flush";
215 case NBD_CMD_TRIM: return "trim/discard";
216 }
217 return "invalid";
218}
219
220static ssize_t pid_show(struct device *dev,
221 struct device_attribute *attr, char *buf)
222{
223 struct gendisk *disk = dev_to_disk(dev);
224 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
225
226 return sprintf(buf, "%d\n", nbd->pid);
227}
228
229static const struct device_attribute pid_attr = {
230 .attr = { .name = "pid", .mode = 0444},
231 .show = pid_show,
232};
233
234static ssize_t backend_show(struct device *dev,
235 struct device_attribute *attr, char *buf)
236{
237 struct gendisk *disk = dev_to_disk(dev);
238 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
239
240 return sprintf(buf, "%s\n", nbd->backend ?: "");
241}
242
243static const struct device_attribute backend_attr = {
244 .attr = { .name = "backend", .mode = 0444},
245 .show = backend_show,
246};
247
248static void nbd_dev_remove(struct nbd_device *nbd)
249{
250 struct gendisk *disk = nbd->disk;
251
252 del_gendisk(disk);
253 put_disk(disk);
254 blk_mq_free_tag_set(&nbd->tag_set);
255
256 /*
257 * Remove from idr after del_gendisk() completes, so if the same ID is
258 * reused, the following add_disk() will succeed.
259 */
260 mutex_lock(&nbd_index_mutex);
261 idr_remove(&nbd_index_idr, nbd->index);
262 mutex_unlock(&nbd_index_mutex);
263 destroy_workqueue(nbd->recv_workq);
264 kfree(nbd);
265}
266
267static void nbd_dev_remove_work(struct work_struct *work)
268{
269 nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
270}
271
272static void nbd_put(struct nbd_device *nbd)
273{
274 if (!refcount_dec_and_test(&nbd->refs))
275 return;
276
277 /* Call del_gendisk() asynchrounously to prevent deadlock */
278 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
279 queue_work(nbd_del_wq, &nbd->remove_work);
280 else
281 nbd_dev_remove(nbd);
282}
283
284static int nbd_disconnected(struct nbd_config *config)
285{
286 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
287 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
288}
289
290static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
291 int notify)
292{
293 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
294 struct link_dead_args *args;
295 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
296 if (args) {
297 INIT_WORK(&args->work, nbd_dead_link_work);
298 args->index = nbd->index;
299 queue_work(system_wq, &args->work);
300 }
301 }
302 if (!nsock->dead) {
303 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
304 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
305 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
306 &nbd->config->runtime_flags)) {
307 set_bit(NBD_RT_DISCONNECTED,
308 &nbd->config->runtime_flags);
309 dev_info(nbd_to_dev(nbd),
310 "Disconnected due to user request.\n");
311 }
312 }
313 }
314 nsock->dead = true;
315 nsock->pending = NULL;
316 nsock->sent = 0;
317}
318
319static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
320 loff_t blksize)
321{
322 if (!blksize)
323 blksize = 1u << NBD_DEF_BLKSIZE_BITS;
324
325 if (blk_validate_block_size(blksize))
326 return -EINVAL;
327
328 if (bytesize < 0)
329 return -EINVAL;
330
331 nbd->config->bytesize = bytesize;
332 nbd->config->blksize_bits = __ffs(blksize);
333
334 if (!nbd->pid)
335 return 0;
336
337 if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
338 nbd->disk->queue->limits.discard_granularity = blksize;
339 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
340 }
341 blk_queue_logical_block_size(nbd->disk->queue, blksize);
342 blk_queue_physical_block_size(nbd->disk->queue, blksize);
343
344 if (max_part)
345 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
346 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
347 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
348 return 0;
349}
350
351static void nbd_complete_rq(struct request *req)
352{
353 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
354
355 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
356 cmd->status ? "failed" : "done");
357
358 blk_mq_end_request(req, cmd->status);
359}
360
361/*
362 * Forcibly shutdown the socket causing all listeners to error
363 */
364static void sock_shutdown(struct nbd_device *nbd)
365{
366 struct nbd_config *config = nbd->config;
367 int i;
368
369 if (config->num_connections == 0)
370 return;
371 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
372 return;
373
374 for (i = 0; i < config->num_connections; i++) {
375 struct nbd_sock *nsock = config->socks[i];
376 mutex_lock(&nsock->tx_lock);
377 nbd_mark_nsock_dead(nbd, nsock, 0);
378 mutex_unlock(&nsock->tx_lock);
379 }
380 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
381}
382
383static u32 req_to_nbd_cmd_type(struct request *req)
384{
385 switch (req_op(req)) {
386 case REQ_OP_DISCARD:
387 return NBD_CMD_TRIM;
388 case REQ_OP_FLUSH:
389 return NBD_CMD_FLUSH;
390 case REQ_OP_WRITE:
391 return NBD_CMD_WRITE;
392 case REQ_OP_READ:
393 return NBD_CMD_READ;
394 default:
395 return U32_MAX;
396 }
397}
398
399static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
400{
401 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
402 struct nbd_device *nbd = cmd->nbd;
403 struct nbd_config *config;
404
405 if (!mutex_trylock(&cmd->lock))
406 return BLK_EH_RESET_TIMER;
407
408 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
409 mutex_unlock(&cmd->lock);
410 return BLK_EH_DONE;
411 }
412
413 if (!refcount_inc_not_zero(&nbd->config_refs)) {
414 cmd->status = BLK_STS_TIMEOUT;
415 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
416 mutex_unlock(&cmd->lock);
417 goto done;
418 }
419 config = nbd->config;
420
421 if (config->num_connections > 1 ||
422 (config->num_connections == 1 && nbd->tag_set.timeout)) {
423 dev_err_ratelimited(nbd_to_dev(nbd),
424 "Connection timed out, retrying (%d/%d alive)\n",
425 atomic_read(&config->live_connections),
426 config->num_connections);
427 /*
428 * Hooray we have more connections, requeue this IO, the submit
429 * path will put it on a real connection. Or if only one
430 * connection is configured, the submit path will wait util
431 * a new connection is reconfigured or util dead timeout.
432 */
433 if (config->socks) {
434 if (cmd->index < config->num_connections) {
435 struct nbd_sock *nsock =
436 config->socks[cmd->index];
437 mutex_lock(&nsock->tx_lock);
438 /* We can have multiple outstanding requests, so
439 * we don't want to mark the nsock dead if we've
440 * already reconnected with a new socket, so
441 * only mark it dead if its the same socket we
442 * were sent out on.
443 */
444 if (cmd->cookie == nsock->cookie)
445 nbd_mark_nsock_dead(nbd, nsock, 1);
446 mutex_unlock(&nsock->tx_lock);
447 }
448 mutex_unlock(&cmd->lock);
449 nbd_requeue_cmd(cmd);
450 nbd_config_put(nbd);
451 return BLK_EH_DONE;
452 }
453 }
454
455 if (!nbd->tag_set.timeout) {
456 /*
457 * Userspace sets timeout=0 to disable socket disconnection,
458 * so just warn and reset the timer.
459 */
460 struct nbd_sock *nsock = config->socks[cmd->index];
461 cmd->retries++;
462 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
463 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
464 (unsigned long long)blk_rq_pos(req) << 9,
465 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
466
467 mutex_lock(&nsock->tx_lock);
468 if (cmd->cookie != nsock->cookie) {
469 nbd_requeue_cmd(cmd);
470 mutex_unlock(&nsock->tx_lock);
471 mutex_unlock(&cmd->lock);
472 nbd_config_put(nbd);
473 return BLK_EH_DONE;
474 }
475 mutex_unlock(&nsock->tx_lock);
476 mutex_unlock(&cmd->lock);
477 nbd_config_put(nbd);
478 return BLK_EH_RESET_TIMER;
479 }
480
481 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
482 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
483 cmd->status = BLK_STS_IOERR;
484 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
485 mutex_unlock(&cmd->lock);
486 sock_shutdown(nbd);
487 nbd_config_put(nbd);
488done:
489 blk_mq_complete_request(req);
490 return BLK_EH_DONE;
491}
492
493/*
494 * Send or receive packet. Return a positive value on success and
495 * negtive value on failue, and never return 0.
496 */
497static int sock_xmit(struct nbd_device *nbd, int index, int send,
498 struct iov_iter *iter, int msg_flags, int *sent)
499{
500 struct nbd_config *config = nbd->config;
501 struct socket *sock = config->socks[index]->sock;
502 int result;
503 struct msghdr msg;
504 unsigned int noreclaim_flag;
505
506 if (unlikely(!sock)) {
507 dev_err_ratelimited(disk_to_dev(nbd->disk),
508 "Attempted %s on closed socket in sock_xmit\n",
509 (send ? "send" : "recv"));
510 return -EINVAL;
511 }
512
513 msg.msg_iter = *iter;
514
515 noreclaim_flag = memalloc_noreclaim_save();
516 do {
517 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
518 sock->sk->sk_use_task_frag = false;
519 msg.msg_name = NULL;
520 msg.msg_namelen = 0;
521 msg.msg_control = NULL;
522 msg.msg_controllen = 0;
523 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
524
525 if (send)
526 result = sock_sendmsg(sock, &msg);
527 else
528 result = sock_recvmsg(sock, &msg, msg.msg_flags);
529
530 if (result <= 0) {
531 if (result == 0)
532 result = -EPIPE; /* short read */
533 break;
534 }
535 if (sent)
536 *sent += result;
537 } while (msg_data_left(&msg));
538
539 memalloc_noreclaim_restore(noreclaim_flag);
540
541 return result;
542}
543
544/*
545 * Different settings for sk->sk_sndtimeo can result in different return values
546 * if there is a signal pending when we enter sendmsg, because reasons?
547 */
548static inline int was_interrupted(int result)
549{
550 return result == -ERESTARTSYS || result == -EINTR;
551}
552
553/* always call with the tx_lock held */
554static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
555{
556 struct request *req = blk_mq_rq_from_pdu(cmd);
557 struct nbd_config *config = nbd->config;
558 struct nbd_sock *nsock = config->socks[index];
559 int result;
560 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
561 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
562 struct iov_iter from;
563 unsigned long size = blk_rq_bytes(req);
564 struct bio *bio;
565 u64 handle;
566 u32 type;
567 u32 nbd_cmd_flags = 0;
568 int sent = nsock->sent, skip = 0;
569
570 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
571
572 type = req_to_nbd_cmd_type(req);
573 if (type == U32_MAX)
574 return -EIO;
575
576 if (rq_data_dir(req) == WRITE &&
577 (config->flags & NBD_FLAG_READ_ONLY)) {
578 dev_err_ratelimited(disk_to_dev(nbd->disk),
579 "Write on read-only\n");
580 return -EIO;
581 }
582
583 if (req->cmd_flags & REQ_FUA)
584 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
585
586 /* We did a partial send previously, and we at least sent the whole
587 * request struct, so just go and send the rest of the pages in the
588 * request.
589 */
590 if (sent) {
591 if (sent >= sizeof(request)) {
592 skip = sent - sizeof(request);
593
594 /* initialize handle for tracing purposes */
595 handle = nbd_cmd_handle(cmd);
596
597 goto send_pages;
598 }
599 iov_iter_advance(&from, sent);
600 } else {
601 cmd->cmd_cookie++;
602 }
603 cmd->index = index;
604 cmd->cookie = nsock->cookie;
605 cmd->retries = 0;
606 request.type = htonl(type | nbd_cmd_flags);
607 if (type != NBD_CMD_FLUSH) {
608 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
609 request.len = htonl(size);
610 }
611 handle = nbd_cmd_handle(cmd);
612 request.cookie = cpu_to_be64(handle);
613
614 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
615
616 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
617 req, nbdcmd_to_ascii(type),
618 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
619 result = sock_xmit(nbd, index, 1, &from,
620 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
621 trace_nbd_header_sent(req, handle);
622 if (result < 0) {
623 if (was_interrupted(result)) {
624 /* If we haven't sent anything we can just return BUSY,
625 * however if we have sent something we need to make
626 * sure we only allow this req to be sent until we are
627 * completely done.
628 */
629 if (sent) {
630 nsock->pending = req;
631 nsock->sent = sent;
632 }
633 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
634 return BLK_STS_RESOURCE;
635 }
636 dev_err_ratelimited(disk_to_dev(nbd->disk),
637 "Send control failed (result %d)\n", result);
638 return -EAGAIN;
639 }
640send_pages:
641 if (type != NBD_CMD_WRITE)
642 goto out;
643
644 bio = req->bio;
645 while (bio) {
646 struct bio *next = bio->bi_next;
647 struct bvec_iter iter;
648 struct bio_vec bvec;
649
650 bio_for_each_segment(bvec, bio, iter) {
651 bool is_last = !next && bio_iter_last(bvec, iter);
652 int flags = is_last ? 0 : MSG_MORE;
653
654 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
655 req, bvec.bv_len);
656 iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len);
657 if (skip) {
658 if (skip >= iov_iter_count(&from)) {
659 skip -= iov_iter_count(&from);
660 continue;
661 }
662 iov_iter_advance(&from, skip);
663 skip = 0;
664 }
665 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
666 if (result < 0) {
667 if (was_interrupted(result)) {
668 /* We've already sent the header, we
669 * have no choice but to set pending and
670 * return BUSY.
671 */
672 nsock->pending = req;
673 nsock->sent = sent;
674 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
675 return BLK_STS_RESOURCE;
676 }
677 dev_err(disk_to_dev(nbd->disk),
678 "Send data failed (result %d)\n",
679 result);
680 return -EAGAIN;
681 }
682 /*
683 * The completion might already have come in,
684 * so break for the last one instead of letting
685 * the iterator do it. This prevents use-after-free
686 * of the bio.
687 */
688 if (is_last)
689 break;
690 }
691 bio = next;
692 }
693out:
694 trace_nbd_payload_sent(req, handle);
695 nsock->pending = NULL;
696 nsock->sent = 0;
697 return 0;
698}
699
700static int nbd_read_reply(struct nbd_device *nbd, int index,
701 struct nbd_reply *reply)
702{
703 struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
704 struct iov_iter to;
705 int result;
706
707 reply->magic = 0;
708 iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
709 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
710 if (result < 0) {
711 if (!nbd_disconnected(nbd->config))
712 dev_err(disk_to_dev(nbd->disk),
713 "Receive control failed (result %d)\n", result);
714 return result;
715 }
716
717 if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
718 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
719 (unsigned long)ntohl(reply->magic));
720 return -EPROTO;
721 }
722
723 return 0;
724}
725
726/* NULL returned = something went wrong, inform userspace */
727static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
728 struct nbd_reply *reply)
729{
730 int result;
731 struct nbd_cmd *cmd;
732 struct request *req = NULL;
733 u64 handle;
734 u16 hwq;
735 u32 tag;
736 int ret = 0;
737
738 handle = be64_to_cpu(reply->cookie);
739 tag = nbd_handle_to_tag(handle);
740 hwq = blk_mq_unique_tag_to_hwq(tag);
741 if (hwq < nbd->tag_set.nr_hw_queues)
742 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
743 blk_mq_unique_tag_to_tag(tag));
744 if (!req || !blk_mq_request_started(req)) {
745 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
746 tag, req);
747 return ERR_PTR(-ENOENT);
748 }
749 trace_nbd_header_received(req, handle);
750 cmd = blk_mq_rq_to_pdu(req);
751
752 mutex_lock(&cmd->lock);
753 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
754 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
755 tag, cmd->status, cmd->flags);
756 ret = -ENOENT;
757 goto out;
758 }
759 if (cmd->index != index) {
760 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
761 tag, index, cmd->index);
762 ret = -ENOENT;
763 goto out;
764 }
765 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
766 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
767 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
768 ret = -ENOENT;
769 goto out;
770 }
771 if (cmd->status != BLK_STS_OK) {
772 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
773 req);
774 ret = -ENOENT;
775 goto out;
776 }
777 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
778 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
779 req);
780 ret = -ENOENT;
781 goto out;
782 }
783 if (ntohl(reply->error)) {
784 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
785 ntohl(reply->error));
786 cmd->status = BLK_STS_IOERR;
787 goto out;
788 }
789
790 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
791 if (rq_data_dir(req) != WRITE) {
792 struct req_iterator iter;
793 struct bio_vec bvec;
794 struct iov_iter to;
795
796 rq_for_each_segment(bvec, req, iter) {
797 iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len);
798 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
799 if (result < 0) {
800 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
801 result);
802 /*
803 * If we've disconnected, we need to make sure we
804 * complete this request, otherwise error out
805 * and let the timeout stuff handle resubmitting
806 * this request onto another connection.
807 */
808 if (nbd_disconnected(nbd->config)) {
809 cmd->status = BLK_STS_IOERR;
810 goto out;
811 }
812 ret = -EIO;
813 goto out;
814 }
815 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
816 req, bvec.bv_len);
817 }
818 }
819out:
820 trace_nbd_payload_received(req, handle);
821 mutex_unlock(&cmd->lock);
822 return ret ? ERR_PTR(ret) : cmd;
823}
824
825static void recv_work(struct work_struct *work)
826{
827 struct recv_thread_args *args = container_of(work,
828 struct recv_thread_args,
829 work);
830 struct nbd_device *nbd = args->nbd;
831 struct nbd_config *config = nbd->config;
832 struct request_queue *q = nbd->disk->queue;
833 struct nbd_sock *nsock;
834 struct nbd_cmd *cmd;
835 struct request *rq;
836
837 while (1) {
838 struct nbd_reply reply;
839
840 if (nbd_read_reply(nbd, args->index, &reply))
841 break;
842
843 /*
844 * Grab .q_usage_counter so request pool won't go away, then no
845 * request use-after-free is possible during nbd_handle_reply().
846 * If queue is frozen, there won't be any inflight requests, we
847 * needn't to handle the incoming garbage message.
848 */
849 if (!percpu_ref_tryget(&q->q_usage_counter)) {
850 dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
851 __func__);
852 break;
853 }
854
855 cmd = nbd_handle_reply(nbd, args->index, &reply);
856 if (IS_ERR(cmd)) {
857 percpu_ref_put(&q->q_usage_counter);
858 break;
859 }
860
861 rq = blk_mq_rq_from_pdu(cmd);
862 if (likely(!blk_should_fake_timeout(rq->q))) {
863 bool complete;
864
865 mutex_lock(&cmd->lock);
866 complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
867 &cmd->flags);
868 mutex_unlock(&cmd->lock);
869 if (complete)
870 blk_mq_complete_request(rq);
871 }
872 percpu_ref_put(&q->q_usage_counter);
873 }
874
875 nsock = config->socks[args->index];
876 mutex_lock(&nsock->tx_lock);
877 nbd_mark_nsock_dead(nbd, nsock, 1);
878 mutex_unlock(&nsock->tx_lock);
879
880 nbd_config_put(nbd);
881 atomic_dec(&config->recv_threads);
882 wake_up(&config->recv_wq);
883 kfree(args);
884}
885
886static bool nbd_clear_req(struct request *req, void *data)
887{
888 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
889
890 /* don't abort one completed request */
891 if (blk_mq_request_completed(req))
892 return true;
893
894 mutex_lock(&cmd->lock);
895 if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
896 mutex_unlock(&cmd->lock);
897 return true;
898 }
899 cmd->status = BLK_STS_IOERR;
900 mutex_unlock(&cmd->lock);
901
902 blk_mq_complete_request(req);
903 return true;
904}
905
906static void nbd_clear_que(struct nbd_device *nbd)
907{
908 blk_mq_quiesce_queue(nbd->disk->queue);
909 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
910 blk_mq_unquiesce_queue(nbd->disk->queue);
911 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
912}
913
914static int find_fallback(struct nbd_device *nbd, int index)
915{
916 struct nbd_config *config = nbd->config;
917 int new_index = -1;
918 struct nbd_sock *nsock = config->socks[index];
919 int fallback = nsock->fallback_index;
920
921 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
922 return new_index;
923
924 if (config->num_connections <= 1) {
925 dev_err_ratelimited(disk_to_dev(nbd->disk),
926 "Dead connection, failed to find a fallback\n");
927 return new_index;
928 }
929
930 if (fallback >= 0 && fallback < config->num_connections &&
931 !config->socks[fallback]->dead)
932 return fallback;
933
934 if (nsock->fallback_index < 0 ||
935 nsock->fallback_index >= config->num_connections ||
936 config->socks[nsock->fallback_index]->dead) {
937 int i;
938 for (i = 0; i < config->num_connections; i++) {
939 if (i == index)
940 continue;
941 if (!config->socks[i]->dead) {
942 new_index = i;
943 break;
944 }
945 }
946 nsock->fallback_index = new_index;
947 if (new_index < 0) {
948 dev_err_ratelimited(disk_to_dev(nbd->disk),
949 "Dead connection, failed to find a fallback\n");
950 return new_index;
951 }
952 }
953 new_index = nsock->fallback_index;
954 return new_index;
955}
956
957static int wait_for_reconnect(struct nbd_device *nbd)
958{
959 struct nbd_config *config = nbd->config;
960 if (!config->dead_conn_timeout)
961 return 0;
962
963 if (!wait_event_timeout(config->conn_wait,
964 test_bit(NBD_RT_DISCONNECTED,
965 &config->runtime_flags) ||
966 atomic_read(&config->live_connections) > 0,
967 config->dead_conn_timeout))
968 return 0;
969
970 return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
971}
972
973static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
974{
975 struct request *req = blk_mq_rq_from_pdu(cmd);
976 struct nbd_device *nbd = cmd->nbd;
977 struct nbd_config *config;
978 struct nbd_sock *nsock;
979 int ret;
980
981 if (!refcount_inc_not_zero(&nbd->config_refs)) {
982 dev_err_ratelimited(disk_to_dev(nbd->disk),
983 "Socks array is empty\n");
984 return -EINVAL;
985 }
986 config = nbd->config;
987
988 if (index >= config->num_connections) {
989 dev_err_ratelimited(disk_to_dev(nbd->disk),
990 "Attempted send on invalid socket\n");
991 nbd_config_put(nbd);
992 return -EINVAL;
993 }
994 cmd->status = BLK_STS_OK;
995again:
996 nsock = config->socks[index];
997 mutex_lock(&nsock->tx_lock);
998 if (nsock->dead) {
999 int old_index = index;
1000 index = find_fallback(nbd, index);
1001 mutex_unlock(&nsock->tx_lock);
1002 if (index < 0) {
1003 if (wait_for_reconnect(nbd)) {
1004 index = old_index;
1005 goto again;
1006 }
1007 /* All the sockets should already be down at this point,
1008 * we just want to make sure that DISCONNECTED is set so
1009 * any requests that come in that were queue'ed waiting
1010 * for the reconnect timer don't trigger the timer again
1011 * and instead just error out.
1012 */
1013 sock_shutdown(nbd);
1014 nbd_config_put(nbd);
1015 return -EIO;
1016 }
1017 goto again;
1018 }
1019
1020 /* Handle the case that we have a pending request that was partially
1021 * transmitted that _has_ to be serviced first. We need to call requeue
1022 * here so that it gets put _after_ the request that is already on the
1023 * dispatch list.
1024 */
1025 blk_mq_start_request(req);
1026 if (unlikely(nsock->pending && nsock->pending != req)) {
1027 nbd_requeue_cmd(cmd);
1028 ret = 0;
1029 goto out;
1030 }
1031 /*
1032 * Some failures are related to the link going down, so anything that
1033 * returns EAGAIN can be retried on a different socket.
1034 */
1035 ret = nbd_send_cmd(nbd, cmd, index);
1036 /*
1037 * Access to this flag is protected by cmd->lock, thus it's safe to set
1038 * the flag after nbd_send_cmd() succeed to send request to server.
1039 */
1040 if (!ret)
1041 __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
1042 else if (ret == -EAGAIN) {
1043 dev_err_ratelimited(disk_to_dev(nbd->disk),
1044 "Request send failed, requeueing\n");
1045 nbd_mark_nsock_dead(nbd, nsock, 1);
1046 nbd_requeue_cmd(cmd);
1047 ret = 0;
1048 }
1049out:
1050 mutex_unlock(&nsock->tx_lock);
1051 nbd_config_put(nbd);
1052 return ret;
1053}
1054
1055static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1056 const struct blk_mq_queue_data *bd)
1057{
1058 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1059 int ret;
1060
1061 /*
1062 * Since we look at the bio's to send the request over the network we
1063 * need to make sure the completion work doesn't mark this request done
1064 * before we are done doing our send. This keeps us from dereferencing
1065 * freed data if we have particularly fast completions (ie we get the
1066 * completion before we exit sock_xmit on the last bvec) or in the case
1067 * that the server is misbehaving (or there was an error) before we're
1068 * done sending everything over the wire.
1069 */
1070 mutex_lock(&cmd->lock);
1071 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
1072
1073 /* We can be called directly from the user space process, which means we
1074 * could possibly have signals pending so our sendmsg will fail. In
1075 * this case we need to return that we are busy, otherwise error out as
1076 * appropriate.
1077 */
1078 ret = nbd_handle_cmd(cmd, hctx->queue_num);
1079 if (ret < 0)
1080 ret = BLK_STS_IOERR;
1081 else if (!ret)
1082 ret = BLK_STS_OK;
1083 mutex_unlock(&cmd->lock);
1084
1085 return ret;
1086}
1087
1088static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1089 int *err)
1090{
1091 struct socket *sock;
1092
1093 *err = 0;
1094 sock = sockfd_lookup(fd, err);
1095 if (!sock)
1096 return NULL;
1097
1098 if (sock->ops->shutdown == sock_no_shutdown) {
1099 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1100 *err = -EINVAL;
1101 sockfd_put(sock);
1102 return NULL;
1103 }
1104
1105 return sock;
1106}
1107
1108static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1109 bool netlink)
1110{
1111 struct nbd_config *config = nbd->config;
1112 struct socket *sock;
1113 struct nbd_sock **socks;
1114 struct nbd_sock *nsock;
1115 int err;
1116
1117 /* Arg will be cast to int, check it to avoid overflow */
1118 if (arg > INT_MAX)
1119 return -EINVAL;
1120 sock = nbd_get_socket(nbd, arg, &err);
1121 if (!sock)
1122 return err;
1123
1124 /*
1125 * We need to make sure we don't get any errant requests while we're
1126 * reallocating the ->socks array.
1127 */
1128 blk_mq_freeze_queue(nbd->disk->queue);
1129
1130 if (!netlink && !nbd->task_setup &&
1131 !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1132 nbd->task_setup = current;
1133
1134 if (!netlink &&
1135 (nbd->task_setup != current ||
1136 test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1137 dev_err(disk_to_dev(nbd->disk),
1138 "Device being setup by another task");
1139 err = -EBUSY;
1140 goto put_socket;
1141 }
1142
1143 nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1144 if (!nsock) {
1145 err = -ENOMEM;
1146 goto put_socket;
1147 }
1148
1149 socks = krealloc(config->socks, (config->num_connections + 1) *
1150 sizeof(struct nbd_sock *), GFP_KERNEL);
1151 if (!socks) {
1152 kfree(nsock);
1153 err = -ENOMEM;
1154 goto put_socket;
1155 }
1156
1157 config->socks = socks;
1158
1159 nsock->fallback_index = -1;
1160 nsock->dead = false;
1161 mutex_init(&nsock->tx_lock);
1162 nsock->sock = sock;
1163 nsock->pending = NULL;
1164 nsock->sent = 0;
1165 nsock->cookie = 0;
1166 socks[config->num_connections++] = nsock;
1167 atomic_inc(&config->live_connections);
1168 blk_mq_unfreeze_queue(nbd->disk->queue);
1169
1170 return 0;
1171
1172put_socket:
1173 blk_mq_unfreeze_queue(nbd->disk->queue);
1174 sockfd_put(sock);
1175 return err;
1176}
1177
1178static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1179{
1180 struct nbd_config *config = nbd->config;
1181 struct socket *sock, *old;
1182 struct recv_thread_args *args;
1183 int i;
1184 int err;
1185
1186 sock = nbd_get_socket(nbd, arg, &err);
1187 if (!sock)
1188 return err;
1189
1190 args = kzalloc(sizeof(*args), GFP_KERNEL);
1191 if (!args) {
1192 sockfd_put(sock);
1193 return -ENOMEM;
1194 }
1195
1196 for (i = 0; i < config->num_connections; i++) {
1197 struct nbd_sock *nsock = config->socks[i];
1198
1199 if (!nsock->dead)
1200 continue;
1201
1202 mutex_lock(&nsock->tx_lock);
1203 if (!nsock->dead) {
1204 mutex_unlock(&nsock->tx_lock);
1205 continue;
1206 }
1207 sk_set_memalloc(sock->sk);
1208 if (nbd->tag_set.timeout)
1209 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1210 atomic_inc(&config->recv_threads);
1211 refcount_inc(&nbd->config_refs);
1212 old = nsock->sock;
1213 nsock->fallback_index = -1;
1214 nsock->sock = sock;
1215 nsock->dead = false;
1216 INIT_WORK(&args->work, recv_work);
1217 args->index = i;
1218 args->nbd = nbd;
1219 nsock->cookie++;
1220 mutex_unlock(&nsock->tx_lock);
1221 sockfd_put(old);
1222
1223 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1224
1225 /* We take the tx_mutex in an error path in the recv_work, so we
1226 * need to queue_work outside of the tx_mutex.
1227 */
1228 queue_work(nbd->recv_workq, &args->work);
1229
1230 atomic_inc(&config->live_connections);
1231 wake_up(&config->conn_wait);
1232 return 0;
1233 }
1234 sockfd_put(sock);
1235 kfree(args);
1236 return -ENOSPC;
1237}
1238
1239static void nbd_bdev_reset(struct nbd_device *nbd)
1240{
1241 if (disk_openers(nbd->disk) > 1)
1242 return;
1243 set_capacity(nbd->disk, 0);
1244}
1245
1246static void nbd_parse_flags(struct nbd_device *nbd)
1247{
1248 struct nbd_config *config = nbd->config;
1249 if (config->flags & NBD_FLAG_READ_ONLY)
1250 set_disk_ro(nbd->disk, true);
1251 else
1252 set_disk_ro(nbd->disk, false);
1253 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1254 if (config->flags & NBD_FLAG_SEND_FUA)
1255 blk_queue_write_cache(nbd->disk->queue, true, true);
1256 else
1257 blk_queue_write_cache(nbd->disk->queue, true, false);
1258 }
1259 else
1260 blk_queue_write_cache(nbd->disk->queue, false, false);
1261}
1262
1263static void send_disconnects(struct nbd_device *nbd)
1264{
1265 struct nbd_config *config = nbd->config;
1266 struct nbd_request request = {
1267 .magic = htonl(NBD_REQUEST_MAGIC),
1268 .type = htonl(NBD_CMD_DISC),
1269 };
1270 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1271 struct iov_iter from;
1272 int i, ret;
1273
1274 for (i = 0; i < config->num_connections; i++) {
1275 struct nbd_sock *nsock = config->socks[i];
1276
1277 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
1278 mutex_lock(&nsock->tx_lock);
1279 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1280 if (ret < 0)
1281 dev_err(disk_to_dev(nbd->disk),
1282 "Send disconnect failed %d\n", ret);
1283 mutex_unlock(&nsock->tx_lock);
1284 }
1285}
1286
1287static int nbd_disconnect(struct nbd_device *nbd)
1288{
1289 struct nbd_config *config = nbd->config;
1290
1291 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1292 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1293 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1294 send_disconnects(nbd);
1295 return 0;
1296}
1297
1298static void nbd_clear_sock(struct nbd_device *nbd)
1299{
1300 sock_shutdown(nbd);
1301 nbd_clear_que(nbd);
1302 nbd->task_setup = NULL;
1303}
1304
1305static void nbd_config_put(struct nbd_device *nbd)
1306{
1307 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1308 &nbd->config_lock)) {
1309 struct nbd_config *config = nbd->config;
1310 nbd_dev_dbg_close(nbd);
1311 invalidate_disk(nbd->disk);
1312 if (nbd->config->bytesize)
1313 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
1314 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1315 &config->runtime_flags))
1316 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1317 nbd->pid = 0;
1318 if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
1319 &config->runtime_flags)) {
1320 device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
1321 kfree(nbd->backend);
1322 nbd->backend = NULL;
1323 }
1324 nbd_clear_sock(nbd);
1325 if (config->num_connections) {
1326 int i;
1327 for (i = 0; i < config->num_connections; i++) {
1328 sockfd_put(config->socks[i]->sock);
1329 kfree(config->socks[i]);
1330 }
1331 kfree(config->socks);
1332 }
1333 kfree(nbd->config);
1334 nbd->config = NULL;
1335
1336 nbd->tag_set.timeout = 0;
1337 nbd->disk->queue->limits.discard_granularity = 0;
1338 blk_queue_max_discard_sectors(nbd->disk->queue, 0);
1339
1340 mutex_unlock(&nbd->config_lock);
1341 nbd_put(nbd);
1342 module_put(THIS_MODULE);
1343 }
1344}
1345
1346static int nbd_start_device(struct nbd_device *nbd)
1347{
1348 struct nbd_config *config = nbd->config;
1349 int num_connections = config->num_connections;
1350 int error = 0, i;
1351
1352 if (nbd->pid)
1353 return -EBUSY;
1354 if (!config->socks)
1355 return -EINVAL;
1356 if (num_connections > 1 &&
1357 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1358 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1359 return -EINVAL;
1360 }
1361
1362 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1363 nbd->pid = task_pid_nr(current);
1364
1365 nbd_parse_flags(nbd);
1366
1367 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1368 if (error) {
1369 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
1370 return error;
1371 }
1372 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1373
1374 nbd_dev_dbg_init(nbd);
1375 for (i = 0; i < num_connections; i++) {
1376 struct recv_thread_args *args;
1377
1378 args = kzalloc(sizeof(*args), GFP_KERNEL);
1379 if (!args) {
1380 sock_shutdown(nbd);
1381 /*
1382 * If num_connections is m (2 < m),
1383 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1384 * But NO.(n + 1) failed. We still have n recv threads.
1385 * So, add flush_workqueue here to prevent recv threads
1386 * dropping the last config_refs and trying to destroy
1387 * the workqueue from inside the workqueue.
1388 */
1389 if (i)
1390 flush_workqueue(nbd->recv_workq);
1391 return -ENOMEM;
1392 }
1393 sk_set_memalloc(config->socks[i]->sock->sk);
1394 if (nbd->tag_set.timeout)
1395 config->socks[i]->sock->sk->sk_sndtimeo =
1396 nbd->tag_set.timeout;
1397 atomic_inc(&config->recv_threads);
1398 refcount_inc(&nbd->config_refs);
1399 INIT_WORK(&args->work, recv_work);
1400 args->nbd = nbd;
1401 args->index = i;
1402 queue_work(nbd->recv_workq, &args->work);
1403 }
1404 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
1405}
1406
1407static int nbd_start_device_ioctl(struct nbd_device *nbd)
1408{
1409 struct nbd_config *config = nbd->config;
1410 int ret;
1411
1412 ret = nbd_start_device(nbd);
1413 if (ret)
1414 return ret;
1415
1416 if (max_part)
1417 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1418 mutex_unlock(&nbd->config_lock);
1419 ret = wait_event_interruptible(config->recv_wq,
1420 atomic_read(&config->recv_threads) == 0);
1421 if (ret) {
1422 sock_shutdown(nbd);
1423 nbd_clear_que(nbd);
1424 }
1425
1426 flush_workqueue(nbd->recv_workq);
1427 mutex_lock(&nbd->config_lock);
1428 nbd_bdev_reset(nbd);
1429 /* user requested, ignore socket errors */
1430 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1431 ret = 0;
1432 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1433 ret = -ETIMEDOUT;
1434 return ret;
1435}
1436
1437static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
1438{
1439 blk_mark_disk_dead(nbd->disk);
1440 nbd_clear_sock(nbd);
1441 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1442 &nbd->config->runtime_flags))
1443 nbd_config_put(nbd);
1444}
1445
1446static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1447{
1448 nbd->tag_set.timeout = timeout * HZ;
1449 if (timeout)
1450 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1451 else
1452 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1453}
1454
1455/* Must be called with config_lock held */
1456static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1457 unsigned int cmd, unsigned long arg)
1458{
1459 struct nbd_config *config = nbd->config;
1460 loff_t bytesize;
1461
1462 switch (cmd) {
1463 case NBD_DISCONNECT:
1464 return nbd_disconnect(nbd);
1465 case NBD_CLEAR_SOCK:
1466 nbd_clear_sock_ioctl(nbd);
1467 return 0;
1468 case NBD_SET_SOCK:
1469 return nbd_add_socket(nbd, arg, false);
1470 case NBD_SET_BLKSIZE:
1471 return nbd_set_size(nbd, config->bytesize, arg);
1472 case NBD_SET_SIZE:
1473 return nbd_set_size(nbd, arg, nbd_blksize(config));
1474 case NBD_SET_SIZE_BLOCKS:
1475 if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
1476 return -EINVAL;
1477 return nbd_set_size(nbd, bytesize, nbd_blksize(config));
1478 case NBD_SET_TIMEOUT:
1479 nbd_set_cmd_timeout(nbd, arg);
1480 return 0;
1481
1482 case NBD_SET_FLAGS:
1483 config->flags = arg;
1484 return 0;
1485 case NBD_DO_IT:
1486 return nbd_start_device_ioctl(nbd);
1487 case NBD_CLEAR_QUE:
1488 /*
1489 * This is for compatibility only. The queue is always cleared
1490 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1491 */
1492 return 0;
1493 case NBD_PRINT_DEBUG:
1494 /*
1495 * For compatibility only, we no longer keep a list of
1496 * outstanding requests.
1497 */
1498 return 0;
1499 }
1500 return -ENOTTY;
1501}
1502
1503static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
1504 unsigned int cmd, unsigned long arg)
1505{
1506 struct nbd_device *nbd = bdev->bd_disk->private_data;
1507 struct nbd_config *config = nbd->config;
1508 int error = -EINVAL;
1509
1510 if (!capable(CAP_SYS_ADMIN))
1511 return -EPERM;
1512
1513 /* The block layer will pass back some non-nbd ioctls in case we have
1514 * special handling for them, but we don't so just return an error.
1515 */
1516 if (_IOC_TYPE(cmd) != 0xab)
1517 return -EINVAL;
1518
1519 mutex_lock(&nbd->config_lock);
1520
1521 /* Don't allow ioctl operations on a nbd device that was created with
1522 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1523 */
1524 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1525 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1526 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1527 else
1528 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1529 mutex_unlock(&nbd->config_lock);
1530 return error;
1531}
1532
1533static struct nbd_config *nbd_alloc_config(void)
1534{
1535 struct nbd_config *config;
1536
1537 if (!try_module_get(THIS_MODULE))
1538 return ERR_PTR(-ENODEV);
1539
1540 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1541 if (!config) {
1542 module_put(THIS_MODULE);
1543 return ERR_PTR(-ENOMEM);
1544 }
1545
1546 atomic_set(&config->recv_threads, 0);
1547 init_waitqueue_head(&config->recv_wq);
1548 init_waitqueue_head(&config->conn_wait);
1549 config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
1550 atomic_set(&config->live_connections, 0);
1551 return config;
1552}
1553
1554static int nbd_open(struct gendisk *disk, blk_mode_t mode)
1555{
1556 struct nbd_device *nbd;
1557 int ret = 0;
1558
1559 mutex_lock(&nbd_index_mutex);
1560 nbd = disk->private_data;
1561 if (!nbd) {
1562 ret = -ENXIO;
1563 goto out;
1564 }
1565 if (!refcount_inc_not_zero(&nbd->refs)) {
1566 ret = -ENXIO;
1567 goto out;
1568 }
1569 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1570 struct nbd_config *config;
1571
1572 mutex_lock(&nbd->config_lock);
1573 if (refcount_inc_not_zero(&nbd->config_refs)) {
1574 mutex_unlock(&nbd->config_lock);
1575 goto out;
1576 }
1577 config = nbd_alloc_config();
1578 if (IS_ERR(config)) {
1579 ret = PTR_ERR(config);
1580 mutex_unlock(&nbd->config_lock);
1581 goto out;
1582 }
1583 nbd->config = config;
1584 refcount_set(&nbd->config_refs, 1);
1585 refcount_inc(&nbd->refs);
1586 mutex_unlock(&nbd->config_lock);
1587 if (max_part)
1588 set_bit(GD_NEED_PART_SCAN, &disk->state);
1589 } else if (nbd_disconnected(nbd->config)) {
1590 if (max_part)
1591 set_bit(GD_NEED_PART_SCAN, &disk->state);
1592 }
1593out:
1594 mutex_unlock(&nbd_index_mutex);
1595 return ret;
1596}
1597
1598static void nbd_release(struct gendisk *disk)
1599{
1600 struct nbd_device *nbd = disk->private_data;
1601
1602 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1603 disk_openers(disk) == 0)
1604 nbd_disconnect_and_put(nbd);
1605
1606 nbd_config_put(nbd);
1607 nbd_put(nbd);
1608}
1609
1610static const struct block_device_operations nbd_fops =
1611{
1612 .owner = THIS_MODULE,
1613 .open = nbd_open,
1614 .release = nbd_release,
1615 .ioctl = nbd_ioctl,
1616 .compat_ioctl = nbd_ioctl,
1617};
1618
1619#if IS_ENABLED(CONFIG_DEBUG_FS)
1620
1621static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1622{
1623 struct nbd_device *nbd = s->private;
1624
1625 if (nbd->pid)
1626 seq_printf(s, "recv: %d\n", nbd->pid);
1627
1628 return 0;
1629}
1630
1631DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
1632
1633static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1634{
1635 struct nbd_device *nbd = s->private;
1636 u32 flags = nbd->config->flags;
1637
1638 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1639
1640 seq_puts(s, "Known flags:\n");
1641
1642 if (flags & NBD_FLAG_HAS_FLAGS)
1643 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1644 if (flags & NBD_FLAG_READ_ONLY)
1645 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1646 if (flags & NBD_FLAG_SEND_FLUSH)
1647 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1648 if (flags & NBD_FLAG_SEND_FUA)
1649 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1650 if (flags & NBD_FLAG_SEND_TRIM)
1651 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1652
1653 return 0;
1654}
1655
1656DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
1657
1658static int nbd_dev_dbg_init(struct nbd_device *nbd)
1659{
1660 struct dentry *dir;
1661 struct nbd_config *config = nbd->config;
1662
1663 if (!nbd_dbg_dir)
1664 return -EIO;
1665
1666 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1667 if (IS_ERR(dir)) {
1668 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1669 nbd_name(nbd));
1670 return -EIO;
1671 }
1672 config->dbg_dir = dir;
1673
1674 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1675 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1676 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1677 debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
1678 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1679
1680 return 0;
1681}
1682
1683static void nbd_dev_dbg_close(struct nbd_device *nbd)
1684{
1685 debugfs_remove_recursive(nbd->config->dbg_dir);
1686}
1687
1688static int nbd_dbg_init(void)
1689{
1690 struct dentry *dbg_dir;
1691
1692 dbg_dir = debugfs_create_dir("nbd", NULL);
1693 if (IS_ERR(dbg_dir))
1694 return -EIO;
1695
1696 nbd_dbg_dir = dbg_dir;
1697
1698 return 0;
1699}
1700
1701static void nbd_dbg_close(void)
1702{
1703 debugfs_remove_recursive(nbd_dbg_dir);
1704}
1705
1706#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1707
1708static int nbd_dev_dbg_init(struct nbd_device *nbd)
1709{
1710 return 0;
1711}
1712
1713static void nbd_dev_dbg_close(struct nbd_device *nbd)
1714{
1715}
1716
1717static int nbd_dbg_init(void)
1718{
1719 return 0;
1720}
1721
1722static void nbd_dbg_close(void)
1723{
1724}
1725
1726#endif
1727
1728static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1729 unsigned int hctx_idx, unsigned int numa_node)
1730{
1731 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1732 cmd->nbd = set->driver_data;
1733 cmd->flags = 0;
1734 mutex_init(&cmd->lock);
1735 return 0;
1736}
1737
1738static const struct blk_mq_ops nbd_mq_ops = {
1739 .queue_rq = nbd_queue_rq,
1740 .complete = nbd_complete_rq,
1741 .init_request = nbd_init_request,
1742 .timeout = nbd_xmit_timeout,
1743};
1744
1745static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
1746{
1747 struct nbd_device *nbd;
1748 struct gendisk *disk;
1749 int err = -ENOMEM;
1750
1751 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1752 if (!nbd)
1753 goto out;
1754
1755 nbd->tag_set.ops = &nbd_mq_ops;
1756 nbd->tag_set.nr_hw_queues = 1;
1757 nbd->tag_set.queue_depth = 128;
1758 nbd->tag_set.numa_node = NUMA_NO_NODE;
1759 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1760 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1761 BLK_MQ_F_BLOCKING;
1762 nbd->tag_set.driver_data = nbd;
1763 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
1764 nbd->backend = NULL;
1765
1766 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1767 if (err)
1768 goto out_free_nbd;
1769
1770 mutex_lock(&nbd_index_mutex);
1771 if (index >= 0) {
1772 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1773 GFP_KERNEL);
1774 if (err == -ENOSPC)
1775 err = -EEXIST;
1776 } else {
1777 err = idr_alloc(&nbd_index_idr, nbd, 0,
1778 (MINORMASK >> part_shift) + 1, GFP_KERNEL);
1779 if (err >= 0)
1780 index = err;
1781 }
1782 nbd->index = index;
1783 mutex_unlock(&nbd_index_mutex);
1784 if (err < 0)
1785 goto out_free_tags;
1786
1787 disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
1788 if (IS_ERR(disk)) {
1789 err = PTR_ERR(disk);
1790 goto out_free_idr;
1791 }
1792 nbd->disk = disk;
1793
1794 nbd->recv_workq = alloc_workqueue("nbd%d-recv",
1795 WQ_MEM_RECLAIM | WQ_HIGHPRI |
1796 WQ_UNBOUND, 0, nbd->index);
1797 if (!nbd->recv_workq) {
1798 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1799 err = -ENOMEM;
1800 goto out_err_disk;
1801 }
1802
1803 /*
1804 * Tell the block layer that we are not a rotational device
1805 */
1806 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1807 disk->queue->limits.discard_granularity = 0;
1808 blk_queue_max_discard_sectors(disk->queue, 0);
1809 blk_queue_max_segment_size(disk->queue, UINT_MAX);
1810 blk_queue_max_segments(disk->queue, USHRT_MAX);
1811 blk_queue_max_hw_sectors(disk->queue, 65536);
1812 disk->queue->limits.max_sectors = 256;
1813
1814 mutex_init(&nbd->config_lock);
1815 refcount_set(&nbd->config_refs, 0);
1816 /*
1817 * Start out with a zero references to keep other threads from using
1818 * this device until it is fully initialized.
1819 */
1820 refcount_set(&nbd->refs, 0);
1821 INIT_LIST_HEAD(&nbd->list);
1822 disk->major = NBD_MAJOR;
1823 disk->first_minor = index << part_shift;
1824 disk->minors = 1 << part_shift;
1825 disk->fops = &nbd_fops;
1826 disk->private_data = nbd;
1827 sprintf(disk->disk_name, "nbd%d", index);
1828 err = add_disk(disk);
1829 if (err)
1830 goto out_free_work;
1831
1832 /*
1833 * Now publish the device.
1834 */
1835 refcount_set(&nbd->refs, refs);
1836 nbd_total_devices++;
1837 return nbd;
1838
1839out_free_work:
1840 destroy_workqueue(nbd->recv_workq);
1841out_err_disk:
1842 put_disk(disk);
1843out_free_idr:
1844 mutex_lock(&nbd_index_mutex);
1845 idr_remove(&nbd_index_idr, index);
1846 mutex_unlock(&nbd_index_mutex);
1847out_free_tags:
1848 blk_mq_free_tag_set(&nbd->tag_set);
1849out_free_nbd:
1850 kfree(nbd);
1851out:
1852 return ERR_PTR(err);
1853}
1854
1855static struct nbd_device *nbd_find_get_unused(void)
1856{
1857 struct nbd_device *nbd;
1858 int id;
1859
1860 lockdep_assert_held(&nbd_index_mutex);
1861
1862 idr_for_each_entry(&nbd_index_idr, nbd, id) {
1863 if (refcount_read(&nbd->config_refs) ||
1864 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
1865 continue;
1866 if (refcount_inc_not_zero(&nbd->refs))
1867 return nbd;
1868 }
1869
1870 return NULL;
1871}
1872
1873/* Netlink interface. */
1874static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1875 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1876 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1877 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1878 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1879 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1880 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1881 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1882 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
1883 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
1884 [NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING},
1885};
1886
1887static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1888 [NBD_SOCK_FD] = { .type = NLA_U32 },
1889};
1890
1891/* We don't use this right now since we don't parse the incoming list, but we
1892 * still want it here so userspace knows what to expect.
1893 */
1894static const struct nla_policy __attribute__((unused))
1895nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1896 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1897 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1898};
1899
1900static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1901{
1902 struct nbd_config *config = nbd->config;
1903 u64 bsize = nbd_blksize(config);
1904 u64 bytes = config->bytesize;
1905
1906 if (info->attrs[NBD_ATTR_SIZE_BYTES])
1907 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1908
1909 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
1910 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1911
1912 if (bytes != config->bytesize || bsize != nbd_blksize(config))
1913 return nbd_set_size(nbd, bytes, bsize);
1914 return 0;
1915}
1916
1917static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1918{
1919 struct nbd_device *nbd;
1920 struct nbd_config *config;
1921 int index = -1;
1922 int ret;
1923 bool put_dev = false;
1924
1925 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1926 return -EPERM;
1927
1928 if (info->attrs[NBD_ATTR_INDEX]) {
1929 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1930
1931 /*
1932 * Too big first_minor can cause duplicate creation of
1933 * sysfs files/links, since index << part_shift might overflow, or
1934 * MKDEV() expect that the max bits of first_minor is 20.
1935 */
1936 if (index < 0 || index > MINORMASK >> part_shift) {
1937 pr_err("illegal input index %d\n", index);
1938 return -EINVAL;
1939 }
1940 }
1941 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SOCKETS)) {
1942 pr_err("must specify at least one socket\n");
1943 return -EINVAL;
1944 }
1945 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SIZE_BYTES)) {
1946 pr_err("must specify a size in bytes for the device\n");
1947 return -EINVAL;
1948 }
1949again:
1950 mutex_lock(&nbd_index_mutex);
1951 if (index == -1) {
1952 nbd = nbd_find_get_unused();
1953 } else {
1954 nbd = idr_find(&nbd_index_idr, index);
1955 if (nbd) {
1956 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
1957 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
1958 !refcount_inc_not_zero(&nbd->refs)) {
1959 mutex_unlock(&nbd_index_mutex);
1960 pr_err("device at index %d is going down\n",
1961 index);
1962 return -EINVAL;
1963 }
1964 }
1965 }
1966 mutex_unlock(&nbd_index_mutex);
1967
1968 if (!nbd) {
1969 nbd = nbd_dev_add(index, 2);
1970 if (IS_ERR(nbd)) {
1971 pr_err("failed to add new device\n");
1972 return PTR_ERR(nbd);
1973 }
1974 }
1975
1976 mutex_lock(&nbd->config_lock);
1977 if (refcount_read(&nbd->config_refs)) {
1978 mutex_unlock(&nbd->config_lock);
1979 nbd_put(nbd);
1980 if (index == -1)
1981 goto again;
1982 pr_err("nbd%d already in use\n", index);
1983 return -EBUSY;
1984 }
1985 if (WARN_ON(nbd->config)) {
1986 mutex_unlock(&nbd->config_lock);
1987 nbd_put(nbd);
1988 return -EINVAL;
1989 }
1990 config = nbd_alloc_config();
1991 if (IS_ERR(config)) {
1992 mutex_unlock(&nbd->config_lock);
1993 nbd_put(nbd);
1994 pr_err("couldn't allocate config\n");
1995 return PTR_ERR(config);
1996 }
1997 nbd->config = config;
1998 refcount_set(&nbd->config_refs, 1);
1999 set_bit(NBD_RT_BOUND, &config->runtime_flags);
2000
2001 ret = nbd_genl_size_set(info, nbd);
2002 if (ret)
2003 goto out;
2004
2005 if (info->attrs[NBD_ATTR_TIMEOUT])
2006 nbd_set_cmd_timeout(nbd,
2007 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2008 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2009 config->dead_conn_timeout =
2010 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2011 config->dead_conn_timeout *= HZ;
2012 }
2013 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
2014 config->flags =
2015 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
2016 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2017 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2018 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2019 /*
2020 * We have 1 ref to keep the device around, and then 1
2021 * ref for our current operation here, which will be
2022 * inherited by the config. If we already have
2023 * DESTROY_ON_DISCONNECT set then we know we don't have
2024 * that extra ref already held so we don't need the
2025 * put_dev.
2026 */
2027 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2028 &nbd->flags))
2029 put_dev = true;
2030 } else {
2031 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2032 &nbd->flags))
2033 refcount_inc(&nbd->refs);
2034 }
2035 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2036 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2037 &config->runtime_flags);
2038 }
2039 }
2040
2041 if (info->attrs[NBD_ATTR_SOCKETS]) {
2042 struct nlattr *attr;
2043 int rem, fd;
2044
2045 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2046 rem) {
2047 struct nlattr *socks[NBD_SOCK_MAX+1];
2048
2049 if (nla_type(attr) != NBD_SOCK_ITEM) {
2050 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2051 ret = -EINVAL;
2052 goto out;
2053 }
2054 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2055 attr,
2056 nbd_sock_policy,
2057 info->extack);
2058 if (ret != 0) {
2059 pr_err("error processing sock list\n");
2060 ret = -EINVAL;
2061 goto out;
2062 }
2063 if (!socks[NBD_SOCK_FD])
2064 continue;
2065 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2066 ret = nbd_add_socket(nbd, fd, true);
2067 if (ret)
2068 goto out;
2069 }
2070 }
2071 ret = nbd_start_device(nbd);
2072 if (ret)
2073 goto out;
2074 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2075 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2076 GFP_KERNEL);
2077 if (!nbd->backend) {
2078 ret = -ENOMEM;
2079 goto out;
2080 }
2081 }
2082 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
2083 if (ret) {
2084 dev_err(disk_to_dev(nbd->disk),
2085 "device_create_file failed for backend!\n");
2086 goto out;
2087 }
2088 set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
2089out:
2090 mutex_unlock(&nbd->config_lock);
2091 if (!ret) {
2092 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
2093 refcount_inc(&nbd->config_refs);
2094 nbd_connect_reply(info, nbd->index);
2095 }
2096 nbd_config_put(nbd);
2097 if (put_dev)
2098 nbd_put(nbd);
2099 return ret;
2100}
2101
2102static void nbd_disconnect_and_put(struct nbd_device *nbd)
2103{
2104 mutex_lock(&nbd->config_lock);
2105 nbd_disconnect(nbd);
2106 sock_shutdown(nbd);
2107 wake_up(&nbd->config->conn_wait);
2108 /*
2109 * Make sure recv thread has finished, we can safely call nbd_clear_que()
2110 * to cancel the inflight I/Os.
2111 */
2112 flush_workqueue(nbd->recv_workq);
2113 nbd_clear_que(nbd);
2114 nbd->task_setup = NULL;
2115 mutex_unlock(&nbd->config_lock);
2116
2117 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
2118 &nbd->config->runtime_flags))
2119 nbd_config_put(nbd);
2120}
2121
2122static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
2123{
2124 struct nbd_device *nbd;
2125 int index;
2126
2127 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2128 return -EPERM;
2129
2130 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2131 pr_err("must specify an index to disconnect\n");
2132 return -EINVAL;
2133 }
2134 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2135 mutex_lock(&nbd_index_mutex);
2136 nbd = idr_find(&nbd_index_idr, index);
2137 if (!nbd) {
2138 mutex_unlock(&nbd_index_mutex);
2139 pr_err("couldn't find device at index %d\n", index);
2140 return -EINVAL;
2141 }
2142 if (!refcount_inc_not_zero(&nbd->refs)) {
2143 mutex_unlock(&nbd_index_mutex);
2144 pr_err("device at index %d is going down\n", index);
2145 return -EINVAL;
2146 }
2147 mutex_unlock(&nbd_index_mutex);
2148 if (!refcount_inc_not_zero(&nbd->config_refs))
2149 goto put_nbd;
2150 nbd_disconnect_and_put(nbd);
2151 nbd_config_put(nbd);
2152put_nbd:
2153 nbd_put(nbd);
2154 return 0;
2155}
2156
2157static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2158{
2159 struct nbd_device *nbd = NULL;
2160 struct nbd_config *config;
2161 int index;
2162 int ret = 0;
2163 bool put_dev = false;
2164
2165 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2166 return -EPERM;
2167
2168 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2169 pr_err("must specify a device to reconfigure\n");
2170 return -EINVAL;
2171 }
2172 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2173 mutex_lock(&nbd_index_mutex);
2174 nbd = idr_find(&nbd_index_idr, index);
2175 if (!nbd) {
2176 mutex_unlock(&nbd_index_mutex);
2177 pr_err("couldn't find a device at index %d\n", index);
2178 return -EINVAL;
2179 }
2180 if (nbd->backend) {
2181 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2182 if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2183 nbd->backend)) {
2184 mutex_unlock(&nbd_index_mutex);
2185 dev_err(nbd_to_dev(nbd),
2186 "backend image doesn't match with %s\n",
2187 nbd->backend);
2188 return -EINVAL;
2189 }
2190 } else {
2191 mutex_unlock(&nbd_index_mutex);
2192 dev_err(nbd_to_dev(nbd), "must specify backend\n");
2193 return -EINVAL;
2194 }
2195 }
2196 if (!refcount_inc_not_zero(&nbd->refs)) {
2197 mutex_unlock(&nbd_index_mutex);
2198 pr_err("device at index %d is going down\n", index);
2199 return -EINVAL;
2200 }
2201 mutex_unlock(&nbd_index_mutex);
2202
2203 if (!refcount_inc_not_zero(&nbd->config_refs)) {
2204 dev_err(nbd_to_dev(nbd),
2205 "not configured, cannot reconfigure\n");
2206 nbd_put(nbd);
2207 return -EINVAL;
2208 }
2209
2210 mutex_lock(&nbd->config_lock);
2211 config = nbd->config;
2212 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2213 !nbd->pid) {
2214 dev_err(nbd_to_dev(nbd),
2215 "not configured, cannot reconfigure\n");
2216 ret = -EINVAL;
2217 goto out;
2218 }
2219
2220 ret = nbd_genl_size_set(info, nbd);
2221 if (ret)
2222 goto out;
2223
2224 if (info->attrs[NBD_ATTR_TIMEOUT])
2225 nbd_set_cmd_timeout(nbd,
2226 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2227 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2228 config->dead_conn_timeout =
2229 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2230 config->dead_conn_timeout *= HZ;
2231 }
2232 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2233 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2234 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2235 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2236 &nbd->flags))
2237 put_dev = true;
2238 } else {
2239 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2240 &nbd->flags))
2241 refcount_inc(&nbd->refs);
2242 }
2243
2244 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2245 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2246 &config->runtime_flags);
2247 } else {
2248 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2249 &config->runtime_flags);
2250 }
2251 }
2252
2253 if (info->attrs[NBD_ATTR_SOCKETS]) {
2254 struct nlattr *attr;
2255 int rem, fd;
2256
2257 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2258 rem) {
2259 struct nlattr *socks[NBD_SOCK_MAX+1];
2260
2261 if (nla_type(attr) != NBD_SOCK_ITEM) {
2262 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2263 ret = -EINVAL;
2264 goto out;
2265 }
2266 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2267 attr,
2268 nbd_sock_policy,
2269 info->extack);
2270 if (ret != 0) {
2271 pr_err("error processing sock list\n");
2272 ret = -EINVAL;
2273 goto out;
2274 }
2275 if (!socks[NBD_SOCK_FD])
2276 continue;
2277 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2278 ret = nbd_reconnect_socket(nbd, fd);
2279 if (ret) {
2280 if (ret == -ENOSPC)
2281 ret = 0;
2282 goto out;
2283 }
2284 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2285 }
2286 }
2287out:
2288 mutex_unlock(&nbd->config_lock);
2289 nbd_config_put(nbd);
2290 nbd_put(nbd);
2291 if (put_dev)
2292 nbd_put(nbd);
2293 return ret;
2294}
2295
2296static const struct genl_small_ops nbd_connect_genl_ops[] = {
2297 {
2298 .cmd = NBD_CMD_CONNECT,
2299 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2300 .doit = nbd_genl_connect,
2301 },
2302 {
2303 .cmd = NBD_CMD_DISCONNECT,
2304 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2305 .doit = nbd_genl_disconnect,
2306 },
2307 {
2308 .cmd = NBD_CMD_RECONFIGURE,
2309 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2310 .doit = nbd_genl_reconfigure,
2311 },
2312 {
2313 .cmd = NBD_CMD_STATUS,
2314 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2315 .doit = nbd_genl_status,
2316 },
2317};
2318
2319static const struct genl_multicast_group nbd_mcast_grps[] = {
2320 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2321};
2322
2323static struct genl_family nbd_genl_family __ro_after_init = {
2324 .hdrsize = 0,
2325 .name = NBD_GENL_FAMILY_NAME,
2326 .version = NBD_GENL_VERSION,
2327 .module = THIS_MODULE,
2328 .small_ops = nbd_connect_genl_ops,
2329 .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2330 .resv_start_op = NBD_CMD_STATUS + 1,
2331 .maxattr = NBD_ATTR_MAX,
2332 .netnsok = 1,
2333 .policy = nbd_attr_policy,
2334 .mcgrps = nbd_mcast_grps,
2335 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
2336};
2337MODULE_ALIAS_GENL_FAMILY(NBD_GENL_FAMILY_NAME);
2338
2339static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2340{
2341 struct nlattr *dev_opt;
2342 u8 connected = 0;
2343 int ret;
2344
2345 /* This is a little racey, but for status it's ok. The
2346 * reason we don't take a ref here is because we can't
2347 * take a ref in the index == -1 case as we would need
2348 * to put under the nbd_index_mutex, which could
2349 * deadlock if we are configured to remove ourselves
2350 * once we're disconnected.
2351 */
2352 if (refcount_read(&nbd->config_refs))
2353 connected = 1;
2354 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2355 if (!dev_opt)
2356 return -EMSGSIZE;
2357 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2358 if (ret)
2359 return -EMSGSIZE;
2360 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2361 connected);
2362 if (ret)
2363 return -EMSGSIZE;
2364 nla_nest_end(reply, dev_opt);
2365 return 0;
2366}
2367
2368static int status_cb(int id, void *ptr, void *data)
2369{
2370 struct nbd_device *nbd = ptr;
2371 return populate_nbd_status(nbd, (struct sk_buff *)data);
2372}
2373
2374static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2375{
2376 struct nlattr *dev_list;
2377 struct sk_buff *reply;
2378 void *reply_head;
2379 size_t msg_size;
2380 int index = -1;
2381 int ret = -ENOMEM;
2382
2383 if (info->attrs[NBD_ATTR_INDEX])
2384 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2385
2386 mutex_lock(&nbd_index_mutex);
2387
2388 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2389 nla_attr_size(sizeof(u8)));
2390 msg_size *= (index == -1) ? nbd_total_devices : 1;
2391
2392 reply = genlmsg_new(msg_size, GFP_KERNEL);
2393 if (!reply)
2394 goto out;
2395 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2396 NBD_CMD_STATUS);
2397 if (!reply_head) {
2398 nlmsg_free(reply);
2399 goto out;
2400 }
2401
2402 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2403 if (index == -1) {
2404 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2405 if (ret) {
2406 nlmsg_free(reply);
2407 goto out;
2408 }
2409 } else {
2410 struct nbd_device *nbd;
2411 nbd = idr_find(&nbd_index_idr, index);
2412 if (nbd) {
2413 ret = populate_nbd_status(nbd, reply);
2414 if (ret) {
2415 nlmsg_free(reply);
2416 goto out;
2417 }
2418 }
2419 }
2420 nla_nest_end(reply, dev_list);
2421 genlmsg_end(reply, reply_head);
2422 ret = genlmsg_reply(reply, info);
2423out:
2424 mutex_unlock(&nbd_index_mutex);
2425 return ret;
2426}
2427
2428static void nbd_connect_reply(struct genl_info *info, int index)
2429{
2430 struct sk_buff *skb;
2431 void *msg_head;
2432 int ret;
2433
2434 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2435 if (!skb)
2436 return;
2437 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2438 NBD_CMD_CONNECT);
2439 if (!msg_head) {
2440 nlmsg_free(skb);
2441 return;
2442 }
2443 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2444 if (ret) {
2445 nlmsg_free(skb);
2446 return;
2447 }
2448 genlmsg_end(skb, msg_head);
2449 genlmsg_reply(skb, info);
2450}
2451
2452static void nbd_mcast_index(int index)
2453{
2454 struct sk_buff *skb;
2455 void *msg_head;
2456 int ret;
2457
2458 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2459 if (!skb)
2460 return;
2461 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2462 NBD_CMD_LINK_DEAD);
2463 if (!msg_head) {
2464 nlmsg_free(skb);
2465 return;
2466 }
2467 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2468 if (ret) {
2469 nlmsg_free(skb);
2470 return;
2471 }
2472 genlmsg_end(skb, msg_head);
2473 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2474}
2475
2476static void nbd_dead_link_work(struct work_struct *work)
2477{
2478 struct link_dead_args *args = container_of(work, struct link_dead_args,
2479 work);
2480 nbd_mcast_index(args->index);
2481 kfree(args);
2482}
2483
2484static int __init nbd_init(void)
2485{
2486 int i;
2487
2488 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2489
2490 if (max_part < 0) {
2491 pr_err("max_part must be >= 0\n");
2492 return -EINVAL;
2493 }
2494
2495 part_shift = 0;
2496 if (max_part > 0) {
2497 part_shift = fls(max_part);
2498
2499 /*
2500 * Adjust max_part according to part_shift as it is exported
2501 * to user space so that user can know the max number of
2502 * partition kernel should be able to manage.
2503 *
2504 * Note that -1 is required because partition 0 is reserved
2505 * for the whole disk.
2506 */
2507 max_part = (1UL << part_shift) - 1;
2508 }
2509
2510 if ((1UL << part_shift) > DISK_MAX_PARTS)
2511 return -EINVAL;
2512
2513 if (nbds_max > 1UL << (MINORBITS - part_shift))
2514 return -EINVAL;
2515
2516 if (register_blkdev(NBD_MAJOR, "nbd"))
2517 return -EIO;
2518
2519 nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
2520 if (!nbd_del_wq) {
2521 unregister_blkdev(NBD_MAJOR, "nbd");
2522 return -ENOMEM;
2523 }
2524
2525 if (genl_register_family(&nbd_genl_family)) {
2526 destroy_workqueue(nbd_del_wq);
2527 unregister_blkdev(NBD_MAJOR, "nbd");
2528 return -EINVAL;
2529 }
2530 nbd_dbg_init();
2531
2532 for (i = 0; i < nbds_max; i++)
2533 nbd_dev_add(i, 1);
2534 return 0;
2535}
2536
2537static int nbd_exit_cb(int id, void *ptr, void *data)
2538{
2539 struct list_head *list = (struct list_head *)data;
2540 struct nbd_device *nbd = ptr;
2541
2542 /* Skip nbd that is being removed asynchronously */
2543 if (refcount_read(&nbd->refs))
2544 list_add_tail(&nbd->list, list);
2545
2546 return 0;
2547}
2548
2549static void __exit nbd_cleanup(void)
2550{
2551 struct nbd_device *nbd;
2552 LIST_HEAD(del_list);
2553
2554 /*
2555 * Unregister netlink interface prior to waiting
2556 * for the completion of netlink commands.
2557 */
2558 genl_unregister_family(&nbd_genl_family);
2559
2560 nbd_dbg_close();
2561
2562 mutex_lock(&nbd_index_mutex);
2563 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2564 mutex_unlock(&nbd_index_mutex);
2565
2566 while (!list_empty(&del_list)) {
2567 nbd = list_first_entry(&del_list, struct nbd_device, list);
2568 list_del_init(&nbd->list);
2569 if (refcount_read(&nbd->config_refs))
2570 pr_err("possibly leaking nbd_config (ref %d)\n",
2571 refcount_read(&nbd->config_refs));
2572 if (refcount_read(&nbd->refs) != 1)
2573 pr_err("possibly leaking a device\n");
2574 nbd_put(nbd);
2575 }
2576
2577 /* Also wait for nbd_dev_remove_work() completes */
2578 destroy_workqueue(nbd_del_wq);
2579
2580 idr_destroy(&nbd_index_idr);
2581 unregister_blkdev(NBD_MAJOR, "nbd");
2582}
2583
2584module_init(nbd_init);
2585module_exit(nbd_cleanup);
2586
2587MODULE_DESCRIPTION("Network Block Device");
2588MODULE_LICENSE("GPL");
2589
2590module_param(nbds_max, int, 0444);
2591MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2592module_param(max_part, int, 0444);
2593MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");