Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/module.h>
32#include <linux/jiffies.h>
33#include <linux/drbd.h>
34#include <linux/uaccess.h>
35#include <asm/types.h>
36#include <net/sock.h>
37#include <linux/ctype.h>
38#include <linux/mutex.h>
39#include <linux/fs.h>
40#include <linux/file.h>
41#include <linux/proc_fs.h>
42#include <linux/init.h>
43#include <linux/mm.h>
44#include <linux/memcontrol.h>
45#include <linux/mm_inline.h>
46#include <linux/slab.h>
47#include <linux/random.h>
48#include <linux/reboot.h>
49#include <linux/notifier.h>
50#include <linux/kthread.h>
51#include <linux/workqueue.h>
52#define __KERNEL_SYSCALLS__
53#include <linux/unistd.h>
54#include <linux/vmalloc.h>
55#include <linux/sched/signal.h>
56
57#include <linux/drbd_limits.h>
58#include "drbd_int.h"
59#include "drbd_protocol.h"
60#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
61#include "drbd_vli.h"
62#include "drbd_debugfs.h"
63
64static DEFINE_MUTEX(drbd_main_mutex);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static void drbd_release(struct gendisk *gd, fmode_t mode);
67static void md_sync_timer_fn(unsigned long data);
68static int w_bitmap_io(struct drbd_work *w, int unused);
69
70MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
71 "Lars Ellenberg <lars@linbit.com>");
72MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
73MODULE_VERSION(REL_VERSION);
74MODULE_LICENSE("GPL");
75MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
76 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
77MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
78
79#include <linux/moduleparam.h>
80/* thanks to these macros, if compiled into the kernel (not-module),
81 * these become boot parameters (e.g., drbd.minor_count) */
82
83#ifdef CONFIG_DRBD_FAULT_INJECTION
84int drbd_enable_faults;
85int drbd_fault_rate;
86static int drbd_fault_count;
87static int drbd_fault_devs;
88/* bitmap of enabled faults */
89module_param_named(enable_faults, drbd_enable_faults, int, 0664);
90/* fault rate % value - applies to all enabled faults */
91module_param_named(fault_rate, drbd_fault_rate, int, 0664);
92/* count of faults inserted */
93module_param_named(fault_count, drbd_fault_count, int, 0664);
94/* bitmap of devices to insert faults on */
95module_param_named(fault_devs, drbd_fault_devs, int, 0644);
96#endif
97
98/* module parameters we can keep static */
99static bool drbd_allow_oos; /* allow_open_on_secondary */
100static bool drbd_disable_sendpage;
101MODULE_PARM_DESC(allow_oos, "DONT USE!");
102module_param_named(allow_oos, drbd_allow_oos, bool, 0);
103module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
104
105/* module parameters we share */
106int drbd_proc_details; /* Detail level in proc drbd*/
107module_param_named(proc_details, drbd_proc_details, int, 0644);
108/* module parameters shared with defaults */
109unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
110/* Module parameter for setting the user mode helper program
111 * to run. Default is /sbin/drbdadm */
112char drbd_usermode_helper[80] = "/sbin/drbdadm";
113module_param_named(minor_count, drbd_minor_count, uint, 0444);
114module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
115
116/* in 2.6.x, our device mapping and config info contains our virtual gendisks
117 * as member "struct gendisk *vdisk;"
118 */
119struct idr drbd_devices;
120struct list_head drbd_resources;
121struct mutex resources_mutex;
122
123struct kmem_cache *drbd_request_cache;
124struct kmem_cache *drbd_ee_cache; /* peer requests */
125struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
126struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
127mempool_t *drbd_request_mempool;
128mempool_t *drbd_ee_mempool;
129mempool_t *drbd_md_io_page_pool;
130struct bio_set *drbd_md_io_bio_set;
131struct bio_set *drbd_io_bio_set;
132
133/* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
138 */
139struct page *drbd_pp_pool;
140spinlock_t drbd_pp_lock;
141int drbd_pp_vacant;
142wait_queue_head_t drbd_pp_wait;
143
144DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
145
146static const struct block_device_operations drbd_ops = {
147 .owner = THIS_MODULE,
148 .open = drbd_open,
149 .release = drbd_release,
150};
151
152struct bio *bio_alloc_drbd(gfp_t gfp_mask)
153{
154 struct bio *bio;
155
156 if (!drbd_md_io_bio_set)
157 return bio_alloc(gfp_mask, 1);
158
159 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
160 if (!bio)
161 return NULL;
162 return bio;
163}
164
165#ifdef __CHECKER__
166/* When checking with sparse, and this is an inline function, sparse will
167 give tons of false positives. When this is a real functions sparse works.
168 */
169int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
170{
171 int io_allowed;
172
173 atomic_inc(&device->local_cnt);
174 io_allowed = (device->state.disk >= mins);
175 if (!io_allowed) {
176 if (atomic_dec_and_test(&device->local_cnt))
177 wake_up(&device->misc_wait);
178 }
179 return io_allowed;
180}
181
182#endif
183
184/**
185 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
186 * @connection: DRBD connection.
187 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
188 * @set_size: Expected number of requests before that barrier.
189 *
190 * In case the passed barrier_nr or set_size does not match the oldest
191 * epoch of not yet barrier-acked requests, this function will cause a
192 * termination of the connection.
193 */
194void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
195 unsigned int set_size)
196{
197 struct drbd_request *r;
198 struct drbd_request *req = NULL;
199 int expect_epoch = 0;
200 int expect_size = 0;
201
202 spin_lock_irq(&connection->resource->req_lock);
203
204 /* find oldest not yet barrier-acked write request,
205 * count writes in its epoch. */
206 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
207 const unsigned s = r->rq_state;
208 if (!req) {
209 if (!(s & RQ_WRITE))
210 continue;
211 if (!(s & RQ_NET_MASK))
212 continue;
213 if (s & RQ_NET_DONE)
214 continue;
215 req = r;
216 expect_epoch = req->epoch;
217 expect_size ++;
218 } else {
219 if (r->epoch != expect_epoch)
220 break;
221 if (!(s & RQ_WRITE))
222 continue;
223 /* if (s & RQ_DONE): not expected */
224 /* if (!(s & RQ_NET_MASK)): not expected */
225 expect_size++;
226 }
227 }
228
229 /* first some paranoia code */
230 if (req == NULL) {
231 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
232 barrier_nr);
233 goto bail;
234 }
235 if (expect_epoch != barrier_nr) {
236 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
237 barrier_nr, expect_epoch);
238 goto bail;
239 }
240
241 if (expect_size != set_size) {
242 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
243 barrier_nr, set_size, expect_size);
244 goto bail;
245 }
246
247 /* Clean up list of requests processed during current epoch. */
248 /* this extra list walk restart is paranoia,
249 * to catch requests being barrier-acked "unexpectedly".
250 * It usually should find the same req again, or some READ preceding it. */
251 list_for_each_entry(req, &connection->transfer_log, tl_requests)
252 if (req->epoch == expect_epoch)
253 break;
254 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
255 if (req->epoch != expect_epoch)
256 break;
257 _req_mod(req, BARRIER_ACKED);
258 }
259 spin_unlock_irq(&connection->resource->req_lock);
260
261 return;
262
263bail:
264 spin_unlock_irq(&connection->resource->req_lock);
265 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
266}
267
268
269/**
270 * _tl_restart() - Walks the transfer log, and applies an action to all requests
271 * @connection: DRBD connection to operate on.
272 * @what: The action/event to perform with all request objects
273 *
274 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
275 * RESTART_FROZEN_DISK_IO.
276 */
277/* must hold resource->req_lock */
278void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
279{
280 struct drbd_request *req, *r;
281
282 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
283 _req_mod(req, what);
284}
285
286void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
287{
288 spin_lock_irq(&connection->resource->req_lock);
289 _tl_restart(connection, what);
290 spin_unlock_irq(&connection->resource->req_lock);
291}
292
293/**
294 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
295 * @device: DRBD device.
296 *
297 * This is called after the connection to the peer was lost. The storage covered
298 * by the requests on the transfer gets marked as our of sync. Called from the
299 * receiver thread and the worker thread.
300 */
301void tl_clear(struct drbd_connection *connection)
302{
303 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
304}
305
306/**
307 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
308 * @device: DRBD device.
309 */
310void tl_abort_disk_io(struct drbd_device *device)
311{
312 struct drbd_connection *connection = first_peer_device(device)->connection;
313 struct drbd_request *req, *r;
314
315 spin_lock_irq(&connection->resource->req_lock);
316 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
317 if (!(req->rq_state & RQ_LOCAL_PENDING))
318 continue;
319 if (req->device != device)
320 continue;
321 _req_mod(req, ABORT_DISK_IO);
322 }
323 spin_unlock_irq(&connection->resource->req_lock);
324}
325
326static int drbd_thread_setup(void *arg)
327{
328 struct drbd_thread *thi = (struct drbd_thread *) arg;
329 struct drbd_resource *resource = thi->resource;
330 unsigned long flags;
331 int retval;
332
333 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
334 thi->name[0],
335 resource->name);
336
337restart:
338 retval = thi->function(thi);
339
340 spin_lock_irqsave(&thi->t_lock, flags);
341
342 /* if the receiver has been "EXITING", the last thing it did
343 * was set the conn state to "StandAlone",
344 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
345 * and receiver thread will be "started".
346 * drbd_thread_start needs to set "RESTARTING" in that case.
347 * t_state check and assignment needs to be within the same spinlock,
348 * so either thread_start sees EXITING, and can remap to RESTARTING,
349 * or thread_start see NONE, and can proceed as normal.
350 */
351
352 if (thi->t_state == RESTARTING) {
353 drbd_info(resource, "Restarting %s thread\n", thi->name);
354 thi->t_state = RUNNING;
355 spin_unlock_irqrestore(&thi->t_lock, flags);
356 goto restart;
357 }
358
359 thi->task = NULL;
360 thi->t_state = NONE;
361 smp_mb();
362 complete_all(&thi->stop);
363 spin_unlock_irqrestore(&thi->t_lock, flags);
364
365 drbd_info(resource, "Terminating %s\n", current->comm);
366
367 /* Release mod reference taken when thread was started */
368
369 if (thi->connection)
370 kref_put(&thi->connection->kref, drbd_destroy_connection);
371 kref_put(&resource->kref, drbd_destroy_resource);
372 module_put(THIS_MODULE);
373 return retval;
374}
375
376static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
377 int (*func) (struct drbd_thread *), const char *name)
378{
379 spin_lock_init(&thi->t_lock);
380 thi->task = NULL;
381 thi->t_state = NONE;
382 thi->function = func;
383 thi->resource = resource;
384 thi->connection = NULL;
385 thi->name = name;
386}
387
388int drbd_thread_start(struct drbd_thread *thi)
389{
390 struct drbd_resource *resource = thi->resource;
391 struct task_struct *nt;
392 unsigned long flags;
393
394 /* is used from state engine doing drbd_thread_stop_nowait,
395 * while holding the req lock irqsave */
396 spin_lock_irqsave(&thi->t_lock, flags);
397
398 switch (thi->t_state) {
399 case NONE:
400 drbd_info(resource, "Starting %s thread (from %s [%d])\n",
401 thi->name, current->comm, current->pid);
402
403 /* Get ref on module for thread - this is released when thread exits */
404 if (!try_module_get(THIS_MODULE)) {
405 drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
406 spin_unlock_irqrestore(&thi->t_lock, flags);
407 return false;
408 }
409
410 kref_get(&resource->kref);
411 if (thi->connection)
412 kref_get(&thi->connection->kref);
413
414 init_completion(&thi->stop);
415 thi->reset_cpu_mask = 1;
416 thi->t_state = RUNNING;
417 spin_unlock_irqrestore(&thi->t_lock, flags);
418 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
419
420 nt = kthread_create(drbd_thread_setup, (void *) thi,
421 "drbd_%c_%s", thi->name[0], thi->resource->name);
422
423 if (IS_ERR(nt)) {
424 drbd_err(resource, "Couldn't start thread\n");
425
426 if (thi->connection)
427 kref_put(&thi->connection->kref, drbd_destroy_connection);
428 kref_put(&resource->kref, drbd_destroy_resource);
429 module_put(THIS_MODULE);
430 return false;
431 }
432 spin_lock_irqsave(&thi->t_lock, flags);
433 thi->task = nt;
434 thi->t_state = RUNNING;
435 spin_unlock_irqrestore(&thi->t_lock, flags);
436 wake_up_process(nt);
437 break;
438 case EXITING:
439 thi->t_state = RESTARTING;
440 drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
441 thi->name, current->comm, current->pid);
442 /* fall through */
443 case RUNNING:
444 case RESTARTING:
445 default:
446 spin_unlock_irqrestore(&thi->t_lock, flags);
447 break;
448 }
449
450 return true;
451}
452
453
454void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
455{
456 unsigned long flags;
457
458 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
459
460 /* may be called from state engine, holding the req lock irqsave */
461 spin_lock_irqsave(&thi->t_lock, flags);
462
463 if (thi->t_state == NONE) {
464 spin_unlock_irqrestore(&thi->t_lock, flags);
465 if (restart)
466 drbd_thread_start(thi);
467 return;
468 }
469
470 if (thi->t_state != ns) {
471 if (thi->task == NULL) {
472 spin_unlock_irqrestore(&thi->t_lock, flags);
473 return;
474 }
475
476 thi->t_state = ns;
477 smp_mb();
478 init_completion(&thi->stop);
479 if (thi->task != current)
480 force_sig(DRBD_SIGKILL, thi->task);
481 }
482
483 spin_unlock_irqrestore(&thi->t_lock, flags);
484
485 if (wait)
486 wait_for_completion(&thi->stop);
487}
488
489int conn_lowest_minor(struct drbd_connection *connection)
490{
491 struct drbd_peer_device *peer_device;
492 int vnr = 0, minor = -1;
493
494 rcu_read_lock();
495 peer_device = idr_get_next(&connection->peer_devices, &vnr);
496 if (peer_device)
497 minor = device_to_minor(peer_device->device);
498 rcu_read_unlock();
499
500 return minor;
501}
502
503#ifdef CONFIG_SMP
504/**
505 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
506 *
507 * Forces all threads of a resource onto the same CPU. This is beneficial for
508 * DRBD's performance. May be overwritten by user's configuration.
509 */
510static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
511{
512 unsigned int *resources_per_cpu, min_index = ~0;
513
514 resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL);
515 if (resources_per_cpu) {
516 struct drbd_resource *resource;
517 unsigned int cpu, min = ~0;
518
519 rcu_read_lock();
520 for_each_resource_rcu(resource, &drbd_resources) {
521 for_each_cpu(cpu, resource->cpu_mask)
522 resources_per_cpu[cpu]++;
523 }
524 rcu_read_unlock();
525 for_each_online_cpu(cpu) {
526 if (resources_per_cpu[cpu] < min) {
527 min = resources_per_cpu[cpu];
528 min_index = cpu;
529 }
530 }
531 kfree(resources_per_cpu);
532 }
533 if (min_index == ~0) {
534 cpumask_setall(*cpu_mask);
535 return;
536 }
537 cpumask_set_cpu(min_index, *cpu_mask);
538}
539
540/**
541 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
542 * @device: DRBD device.
543 * @thi: drbd_thread object
544 *
545 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
546 * prematurely.
547 */
548void drbd_thread_current_set_cpu(struct drbd_thread *thi)
549{
550 struct drbd_resource *resource = thi->resource;
551 struct task_struct *p = current;
552
553 if (!thi->reset_cpu_mask)
554 return;
555 thi->reset_cpu_mask = 0;
556 set_cpus_allowed_ptr(p, resource->cpu_mask);
557}
558#else
559#define drbd_calc_cpu_mask(A) ({})
560#endif
561
562/**
563 * drbd_header_size - size of a packet header
564 *
565 * The header size is a multiple of 8, so any payload following the header is
566 * word aligned on 64-bit architectures. (The bitmap send and receive code
567 * relies on this.)
568 */
569unsigned int drbd_header_size(struct drbd_connection *connection)
570{
571 if (connection->agreed_pro_version >= 100) {
572 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
573 return sizeof(struct p_header100);
574 } else {
575 BUILD_BUG_ON(sizeof(struct p_header80) !=
576 sizeof(struct p_header95));
577 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
578 return sizeof(struct p_header80);
579 }
580}
581
582static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
583{
584 h->magic = cpu_to_be32(DRBD_MAGIC);
585 h->command = cpu_to_be16(cmd);
586 h->length = cpu_to_be16(size);
587 return sizeof(struct p_header80);
588}
589
590static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
591{
592 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
593 h->command = cpu_to_be16(cmd);
594 h->length = cpu_to_be32(size);
595 return sizeof(struct p_header95);
596}
597
598static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
599 int size, int vnr)
600{
601 h->magic = cpu_to_be32(DRBD_MAGIC_100);
602 h->volume = cpu_to_be16(vnr);
603 h->command = cpu_to_be16(cmd);
604 h->length = cpu_to_be32(size);
605 h->pad = 0;
606 return sizeof(struct p_header100);
607}
608
609static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
610 void *buffer, enum drbd_packet cmd, int size)
611{
612 if (connection->agreed_pro_version >= 100)
613 return prepare_header100(buffer, cmd, size, vnr);
614 else if (connection->agreed_pro_version >= 95 &&
615 size > DRBD_MAX_SIZE_H80_PACKET)
616 return prepare_header95(buffer, cmd, size);
617 else
618 return prepare_header80(buffer, cmd, size);
619}
620
621static void *__conn_prepare_command(struct drbd_connection *connection,
622 struct drbd_socket *sock)
623{
624 if (!sock->socket)
625 return NULL;
626 return sock->sbuf + drbd_header_size(connection);
627}
628
629void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
630{
631 void *p;
632
633 mutex_lock(&sock->mutex);
634 p = __conn_prepare_command(connection, sock);
635 if (!p)
636 mutex_unlock(&sock->mutex);
637
638 return p;
639}
640
641void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
642{
643 return conn_prepare_command(peer_device->connection, sock);
644}
645
646static int __send_command(struct drbd_connection *connection, int vnr,
647 struct drbd_socket *sock, enum drbd_packet cmd,
648 unsigned int header_size, void *data,
649 unsigned int size)
650{
651 int msg_flags;
652 int err;
653
654 /*
655 * Called with @data == NULL and the size of the data blocks in @size
656 * for commands that send data blocks. For those commands, omit the
657 * MSG_MORE flag: this will increase the likelihood that data blocks
658 * which are page aligned on the sender will end up page aligned on the
659 * receiver.
660 */
661 msg_flags = data ? MSG_MORE : 0;
662
663 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
664 header_size + size);
665 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
666 msg_flags);
667 if (data && !err)
668 err = drbd_send_all(connection, sock->socket, data, size, 0);
669 /* DRBD protocol "pings" are latency critical.
670 * This is supposed to trigger tcp_push_pending_frames() */
671 if (!err && (cmd == P_PING || cmd == P_PING_ACK))
672 drbd_tcp_nodelay(sock->socket);
673
674 return err;
675}
676
677static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
678 enum drbd_packet cmd, unsigned int header_size,
679 void *data, unsigned int size)
680{
681 return __send_command(connection, 0, sock, cmd, header_size, data, size);
682}
683
684int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
685 enum drbd_packet cmd, unsigned int header_size,
686 void *data, unsigned int size)
687{
688 int err;
689
690 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
691 mutex_unlock(&sock->mutex);
692 return err;
693}
694
695int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
696 enum drbd_packet cmd, unsigned int header_size,
697 void *data, unsigned int size)
698{
699 int err;
700
701 err = __send_command(peer_device->connection, peer_device->device->vnr,
702 sock, cmd, header_size, data, size);
703 mutex_unlock(&sock->mutex);
704 return err;
705}
706
707int drbd_send_ping(struct drbd_connection *connection)
708{
709 struct drbd_socket *sock;
710
711 sock = &connection->meta;
712 if (!conn_prepare_command(connection, sock))
713 return -EIO;
714 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
715}
716
717int drbd_send_ping_ack(struct drbd_connection *connection)
718{
719 struct drbd_socket *sock;
720
721 sock = &connection->meta;
722 if (!conn_prepare_command(connection, sock))
723 return -EIO;
724 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
725}
726
727int drbd_send_sync_param(struct drbd_peer_device *peer_device)
728{
729 struct drbd_socket *sock;
730 struct p_rs_param_95 *p;
731 int size;
732 const int apv = peer_device->connection->agreed_pro_version;
733 enum drbd_packet cmd;
734 struct net_conf *nc;
735 struct disk_conf *dc;
736
737 sock = &peer_device->connection->data;
738 p = drbd_prepare_command(peer_device, sock);
739 if (!p)
740 return -EIO;
741
742 rcu_read_lock();
743 nc = rcu_dereference(peer_device->connection->net_conf);
744
745 size = apv <= 87 ? sizeof(struct p_rs_param)
746 : apv == 88 ? sizeof(struct p_rs_param)
747 + strlen(nc->verify_alg) + 1
748 : apv <= 94 ? sizeof(struct p_rs_param_89)
749 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
750
751 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
752
753 /* initialize verify_alg and csums_alg */
754 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
755
756 if (get_ldev(peer_device->device)) {
757 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
758 p->resync_rate = cpu_to_be32(dc->resync_rate);
759 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
760 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
761 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
762 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
763 put_ldev(peer_device->device);
764 } else {
765 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
766 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
767 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
768 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
769 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
770 }
771
772 if (apv >= 88)
773 strcpy(p->verify_alg, nc->verify_alg);
774 if (apv >= 89)
775 strcpy(p->csums_alg, nc->csums_alg);
776 rcu_read_unlock();
777
778 return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
779}
780
781int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
782{
783 struct drbd_socket *sock;
784 struct p_protocol *p;
785 struct net_conf *nc;
786 int size, cf;
787
788 sock = &connection->data;
789 p = __conn_prepare_command(connection, sock);
790 if (!p)
791 return -EIO;
792
793 rcu_read_lock();
794 nc = rcu_dereference(connection->net_conf);
795
796 if (nc->tentative && connection->agreed_pro_version < 92) {
797 rcu_read_unlock();
798 mutex_unlock(&sock->mutex);
799 drbd_err(connection, "--dry-run is not supported by peer");
800 return -EOPNOTSUPP;
801 }
802
803 size = sizeof(*p);
804 if (connection->agreed_pro_version >= 87)
805 size += strlen(nc->integrity_alg) + 1;
806
807 p->protocol = cpu_to_be32(nc->wire_protocol);
808 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
809 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
810 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
811 p->two_primaries = cpu_to_be32(nc->two_primaries);
812 cf = 0;
813 if (nc->discard_my_data)
814 cf |= CF_DISCARD_MY_DATA;
815 if (nc->tentative)
816 cf |= CF_DRY_RUN;
817 p->conn_flags = cpu_to_be32(cf);
818
819 if (connection->agreed_pro_version >= 87)
820 strcpy(p->integrity_alg, nc->integrity_alg);
821 rcu_read_unlock();
822
823 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
824}
825
826int drbd_send_protocol(struct drbd_connection *connection)
827{
828 int err;
829
830 mutex_lock(&connection->data.mutex);
831 err = __drbd_send_protocol(connection, P_PROTOCOL);
832 mutex_unlock(&connection->data.mutex);
833
834 return err;
835}
836
837static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
838{
839 struct drbd_device *device = peer_device->device;
840 struct drbd_socket *sock;
841 struct p_uuids *p;
842 int i;
843
844 if (!get_ldev_if_state(device, D_NEGOTIATING))
845 return 0;
846
847 sock = &peer_device->connection->data;
848 p = drbd_prepare_command(peer_device, sock);
849 if (!p) {
850 put_ldev(device);
851 return -EIO;
852 }
853 spin_lock_irq(&device->ldev->md.uuid_lock);
854 for (i = UI_CURRENT; i < UI_SIZE; i++)
855 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
856 spin_unlock_irq(&device->ldev->md.uuid_lock);
857
858 device->comm_bm_set = drbd_bm_total_weight(device);
859 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
860 rcu_read_lock();
861 uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
862 rcu_read_unlock();
863 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
864 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
865 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
866
867 put_ldev(device);
868 return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
869}
870
871int drbd_send_uuids(struct drbd_peer_device *peer_device)
872{
873 return _drbd_send_uuids(peer_device, 0);
874}
875
876int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
877{
878 return _drbd_send_uuids(peer_device, 8);
879}
880
881void drbd_print_uuids(struct drbd_device *device, const char *text)
882{
883 if (get_ldev_if_state(device, D_NEGOTIATING)) {
884 u64 *uuid = device->ldev->md.uuid;
885 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
886 text,
887 (unsigned long long)uuid[UI_CURRENT],
888 (unsigned long long)uuid[UI_BITMAP],
889 (unsigned long long)uuid[UI_HISTORY_START],
890 (unsigned long long)uuid[UI_HISTORY_END]);
891 put_ldev(device);
892 } else {
893 drbd_info(device, "%s effective data uuid: %016llX\n",
894 text,
895 (unsigned long long)device->ed_uuid);
896 }
897}
898
899void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
900{
901 struct drbd_device *device = peer_device->device;
902 struct drbd_socket *sock;
903 struct p_rs_uuid *p;
904 u64 uuid;
905
906 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
907
908 uuid = device->ldev->md.uuid[UI_BITMAP];
909 if (uuid && uuid != UUID_JUST_CREATED)
910 uuid = uuid + UUID_NEW_BM_OFFSET;
911 else
912 get_random_bytes(&uuid, sizeof(u64));
913 drbd_uuid_set(device, UI_BITMAP, uuid);
914 drbd_print_uuids(device, "updated sync UUID");
915 drbd_md_sync(device);
916
917 sock = &peer_device->connection->data;
918 p = drbd_prepare_command(peer_device, sock);
919 if (p) {
920 p->uuid = cpu_to_be64(uuid);
921 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
922 }
923}
924
925/* communicated if (agreed_features & DRBD_FF_WSAME) */
926static void
927assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
928 struct request_queue *q)
929{
930 if (q) {
931 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
932 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
933 p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
934 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
935 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
936 p->qlim->discard_enabled = blk_queue_discard(q);
937 p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
938 } else {
939 q = device->rq_queue;
940 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
941 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
942 p->qlim->alignment_offset = 0;
943 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
944 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
945 p->qlim->discard_enabled = 0;
946 p->qlim->write_same_capable = 0;
947 }
948}
949
950int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
951{
952 struct drbd_device *device = peer_device->device;
953 struct drbd_socket *sock;
954 struct p_sizes *p;
955 sector_t d_size, u_size;
956 int q_order_type;
957 unsigned int max_bio_size;
958 unsigned int packet_size;
959
960 sock = &peer_device->connection->data;
961 p = drbd_prepare_command(peer_device, sock);
962 if (!p)
963 return -EIO;
964
965 packet_size = sizeof(*p);
966 if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
967 packet_size += sizeof(p->qlim[0]);
968
969 memset(p, 0, packet_size);
970 if (get_ldev_if_state(device, D_NEGOTIATING)) {
971 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
972 d_size = drbd_get_max_capacity(device->ldev);
973 rcu_read_lock();
974 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
975 rcu_read_unlock();
976 q_order_type = drbd_queue_order_type(device);
977 max_bio_size = queue_max_hw_sectors(q) << 9;
978 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
979 assign_p_sizes_qlim(device, p, q);
980 put_ldev(device);
981 } else {
982 d_size = 0;
983 u_size = 0;
984 q_order_type = QUEUE_ORDERED_NONE;
985 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
986 assign_p_sizes_qlim(device, p, NULL);
987 }
988
989 if (peer_device->connection->agreed_pro_version <= 94)
990 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
991 else if (peer_device->connection->agreed_pro_version < 100)
992 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
993
994 p->d_size = cpu_to_be64(d_size);
995 p->u_size = cpu_to_be64(u_size);
996 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
997 p->max_bio_size = cpu_to_be32(max_bio_size);
998 p->queue_order_type = cpu_to_be16(q_order_type);
999 p->dds_flags = cpu_to_be16(flags);
1000
1001 return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
1002}
1003
1004/**
1005 * drbd_send_current_state() - Sends the drbd state to the peer
1006 * @peer_device: DRBD peer device.
1007 */
1008int drbd_send_current_state(struct drbd_peer_device *peer_device)
1009{
1010 struct drbd_socket *sock;
1011 struct p_state *p;
1012
1013 sock = &peer_device->connection->data;
1014 p = drbd_prepare_command(peer_device, sock);
1015 if (!p)
1016 return -EIO;
1017 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
1018 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1019}
1020
1021/**
1022 * drbd_send_state() - After a state change, sends the new state to the peer
1023 * @peer_device: DRBD peer device.
1024 * @state: the state to send, not necessarily the current state.
1025 *
1026 * Each state change queues an "after_state_ch" work, which will eventually
1027 * send the resulting new state to the peer. If more state changes happen
1028 * between queuing and processing of the after_state_ch work, we still
1029 * want to send each intermediary state in the order it occurred.
1030 */
1031int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
1032{
1033 struct drbd_socket *sock;
1034 struct p_state *p;
1035
1036 sock = &peer_device->connection->data;
1037 p = drbd_prepare_command(peer_device, sock);
1038 if (!p)
1039 return -EIO;
1040 p->state = cpu_to_be32(state.i); /* Within the send mutex */
1041 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1042}
1043
1044int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
1045{
1046 struct drbd_socket *sock;
1047 struct p_req_state *p;
1048
1049 sock = &peer_device->connection->data;
1050 p = drbd_prepare_command(peer_device, sock);
1051 if (!p)
1052 return -EIO;
1053 p->mask = cpu_to_be32(mask.i);
1054 p->val = cpu_to_be32(val.i);
1055 return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1056}
1057
1058int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1059{
1060 enum drbd_packet cmd;
1061 struct drbd_socket *sock;
1062 struct p_req_state *p;
1063
1064 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1065 sock = &connection->data;
1066 p = conn_prepare_command(connection, sock);
1067 if (!p)
1068 return -EIO;
1069 p->mask = cpu_to_be32(mask.i);
1070 p->val = cpu_to_be32(val.i);
1071 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1072}
1073
1074void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
1075{
1076 struct drbd_socket *sock;
1077 struct p_req_state_reply *p;
1078
1079 sock = &peer_device->connection->meta;
1080 p = drbd_prepare_command(peer_device, sock);
1081 if (p) {
1082 p->retcode = cpu_to_be32(retcode);
1083 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1084 }
1085}
1086
1087void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1088{
1089 struct drbd_socket *sock;
1090 struct p_req_state_reply *p;
1091 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1092
1093 sock = &connection->meta;
1094 p = conn_prepare_command(connection, sock);
1095 if (p) {
1096 p->retcode = cpu_to_be32(retcode);
1097 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1098 }
1099}
1100
1101static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1102{
1103 BUG_ON(code & ~0xf);
1104 p->encoding = (p->encoding & ~0xf) | code;
1105}
1106
1107static void dcbp_set_start(struct p_compressed_bm *p, int set)
1108{
1109 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1110}
1111
1112static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1113{
1114 BUG_ON(n & ~0x7);
1115 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1116}
1117
1118static int fill_bitmap_rle_bits(struct drbd_device *device,
1119 struct p_compressed_bm *p,
1120 unsigned int size,
1121 struct bm_xfer_ctx *c)
1122{
1123 struct bitstream bs;
1124 unsigned long plain_bits;
1125 unsigned long tmp;
1126 unsigned long rl;
1127 unsigned len;
1128 unsigned toggle;
1129 int bits, use_rle;
1130
1131 /* may we use this feature? */
1132 rcu_read_lock();
1133 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1134 rcu_read_unlock();
1135 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1136 return 0;
1137
1138 if (c->bit_offset >= c->bm_bits)
1139 return 0; /* nothing to do. */
1140
1141 /* use at most thus many bytes */
1142 bitstream_init(&bs, p->code, size, 0);
1143 memset(p->code, 0, size);
1144 /* plain bits covered in this code string */
1145 plain_bits = 0;
1146
1147 /* p->encoding & 0x80 stores whether the first run length is set.
1148 * bit offset is implicit.
1149 * start with toggle == 2 to be able to tell the first iteration */
1150 toggle = 2;
1151
1152 /* see how much plain bits we can stuff into one packet
1153 * using RLE and VLI. */
1154 do {
1155 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1156 : _drbd_bm_find_next(device, c->bit_offset);
1157 if (tmp == -1UL)
1158 tmp = c->bm_bits;
1159 rl = tmp - c->bit_offset;
1160
1161 if (toggle == 2) { /* first iteration */
1162 if (rl == 0) {
1163 /* the first checked bit was set,
1164 * store start value, */
1165 dcbp_set_start(p, 1);
1166 /* but skip encoding of zero run length */
1167 toggle = !toggle;
1168 continue;
1169 }
1170 dcbp_set_start(p, 0);
1171 }
1172
1173 /* paranoia: catch zero runlength.
1174 * can only happen if bitmap is modified while we scan it. */
1175 if (rl == 0) {
1176 drbd_err(device, "unexpected zero runlength while encoding bitmap "
1177 "t:%u bo:%lu\n", toggle, c->bit_offset);
1178 return -1;
1179 }
1180
1181 bits = vli_encode_bits(&bs, rl);
1182 if (bits == -ENOBUFS) /* buffer full */
1183 break;
1184 if (bits <= 0) {
1185 drbd_err(device, "error while encoding bitmap: %d\n", bits);
1186 return 0;
1187 }
1188
1189 toggle = !toggle;
1190 plain_bits += rl;
1191 c->bit_offset = tmp;
1192 } while (c->bit_offset < c->bm_bits);
1193
1194 len = bs.cur.b - p->code + !!bs.cur.bit;
1195
1196 if (plain_bits < (len << 3)) {
1197 /* incompressible with this method.
1198 * we need to rewind both word and bit position. */
1199 c->bit_offset -= plain_bits;
1200 bm_xfer_ctx_bit_to_word_offset(c);
1201 c->bit_offset = c->word_offset * BITS_PER_LONG;
1202 return 0;
1203 }
1204
1205 /* RLE + VLI was able to compress it just fine.
1206 * update c->word_offset. */
1207 bm_xfer_ctx_bit_to_word_offset(c);
1208
1209 /* store pad_bits */
1210 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1211
1212 return len;
1213}
1214
1215/**
1216 * send_bitmap_rle_or_plain
1217 *
1218 * Return 0 when done, 1 when another iteration is needed, and a negative error
1219 * code upon failure.
1220 */
1221static int
1222send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1223{
1224 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1225 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1226 struct p_compressed_bm *p = sock->sbuf + header_size;
1227 int len, err;
1228
1229 len = fill_bitmap_rle_bits(device, p,
1230 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1231 if (len < 0)
1232 return -EIO;
1233
1234 if (len) {
1235 dcbp_set_code(p, RLE_VLI_Bits);
1236 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1237 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1238 NULL, 0);
1239 c->packets[0]++;
1240 c->bytes[0] += header_size + sizeof(*p) + len;
1241
1242 if (c->bit_offset >= c->bm_bits)
1243 len = 0; /* DONE */
1244 } else {
1245 /* was not compressible.
1246 * send a buffer full of plain text bits instead. */
1247 unsigned int data_size;
1248 unsigned long num_words;
1249 unsigned long *p = sock->sbuf + header_size;
1250
1251 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1252 num_words = min_t(size_t, data_size / sizeof(*p),
1253 c->bm_words - c->word_offset);
1254 len = num_words * sizeof(*p);
1255 if (len)
1256 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1257 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1258 c->word_offset += num_words;
1259 c->bit_offset = c->word_offset * BITS_PER_LONG;
1260
1261 c->packets[1]++;
1262 c->bytes[1] += header_size + len;
1263
1264 if (c->bit_offset > c->bm_bits)
1265 c->bit_offset = c->bm_bits;
1266 }
1267 if (!err) {
1268 if (len == 0) {
1269 INFO_bm_xfer_stats(device, "send", c);
1270 return 0;
1271 } else
1272 return 1;
1273 }
1274 return -EIO;
1275}
1276
1277/* See the comment at receive_bitmap() */
1278static int _drbd_send_bitmap(struct drbd_device *device)
1279{
1280 struct bm_xfer_ctx c;
1281 int err;
1282
1283 if (!expect(device->bitmap))
1284 return false;
1285
1286 if (get_ldev(device)) {
1287 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1288 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1289 drbd_bm_set_all(device);
1290 if (drbd_bm_write(device)) {
1291 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1292 * but otherwise process as per normal - need to tell other
1293 * side that a full resync is required! */
1294 drbd_err(device, "Failed to write bitmap to disk!\n");
1295 } else {
1296 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1297 drbd_md_sync(device);
1298 }
1299 }
1300 put_ldev(device);
1301 }
1302
1303 c = (struct bm_xfer_ctx) {
1304 .bm_bits = drbd_bm_bits(device),
1305 .bm_words = drbd_bm_words(device),
1306 };
1307
1308 do {
1309 err = send_bitmap_rle_or_plain(device, &c);
1310 } while (err > 0);
1311
1312 return err == 0;
1313}
1314
1315int drbd_send_bitmap(struct drbd_device *device)
1316{
1317 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1318 int err = -1;
1319
1320 mutex_lock(&sock->mutex);
1321 if (sock->socket)
1322 err = !_drbd_send_bitmap(device);
1323 mutex_unlock(&sock->mutex);
1324 return err;
1325}
1326
1327void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1328{
1329 struct drbd_socket *sock;
1330 struct p_barrier_ack *p;
1331
1332 if (connection->cstate < C_WF_REPORT_PARAMS)
1333 return;
1334
1335 sock = &connection->meta;
1336 p = conn_prepare_command(connection, sock);
1337 if (!p)
1338 return;
1339 p->barrier = barrier_nr;
1340 p->set_size = cpu_to_be32(set_size);
1341 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1342}
1343
1344/**
1345 * _drbd_send_ack() - Sends an ack packet
1346 * @device: DRBD device.
1347 * @cmd: Packet command code.
1348 * @sector: sector, needs to be in big endian byte order
1349 * @blksize: size in byte, needs to be in big endian byte order
1350 * @block_id: Id, big endian byte order
1351 */
1352static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1353 u64 sector, u32 blksize, u64 block_id)
1354{
1355 struct drbd_socket *sock;
1356 struct p_block_ack *p;
1357
1358 if (peer_device->device->state.conn < C_CONNECTED)
1359 return -EIO;
1360
1361 sock = &peer_device->connection->meta;
1362 p = drbd_prepare_command(peer_device, sock);
1363 if (!p)
1364 return -EIO;
1365 p->sector = sector;
1366 p->block_id = block_id;
1367 p->blksize = blksize;
1368 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1369 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1370}
1371
1372/* dp->sector and dp->block_id already/still in network byte order,
1373 * data_size is payload size according to dp->head,
1374 * and may need to be corrected for digest size. */
1375void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1376 struct p_data *dp, int data_size)
1377{
1378 if (peer_device->connection->peer_integrity_tfm)
1379 data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
1380 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
1381 dp->block_id);
1382}
1383
1384void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1385 struct p_block_req *rp)
1386{
1387 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
1388}
1389
1390/**
1391 * drbd_send_ack() - Sends an ack packet
1392 * @device: DRBD device
1393 * @cmd: packet command code
1394 * @peer_req: peer request
1395 */
1396int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1397 struct drbd_peer_request *peer_req)
1398{
1399 return _drbd_send_ack(peer_device, cmd,
1400 cpu_to_be64(peer_req->i.sector),
1401 cpu_to_be32(peer_req->i.size),
1402 peer_req->block_id);
1403}
1404
1405/* This function misuses the block_id field to signal if the blocks
1406 * are is sync or not. */
1407int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1408 sector_t sector, int blksize, u64 block_id)
1409{
1410 return _drbd_send_ack(peer_device, cmd,
1411 cpu_to_be64(sector),
1412 cpu_to_be32(blksize),
1413 cpu_to_be64(block_id));
1414}
1415
1416int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
1417 struct drbd_peer_request *peer_req)
1418{
1419 struct drbd_socket *sock;
1420 struct p_block_desc *p;
1421
1422 sock = &peer_device->connection->data;
1423 p = drbd_prepare_command(peer_device, sock);
1424 if (!p)
1425 return -EIO;
1426 p->sector = cpu_to_be64(peer_req->i.sector);
1427 p->blksize = cpu_to_be32(peer_req->i.size);
1428 p->pad = 0;
1429 return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
1430}
1431
1432int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
1433 sector_t sector, int size, u64 block_id)
1434{
1435 struct drbd_socket *sock;
1436 struct p_block_req *p;
1437
1438 sock = &peer_device->connection->data;
1439 p = drbd_prepare_command(peer_device, sock);
1440 if (!p)
1441 return -EIO;
1442 p->sector = cpu_to_be64(sector);
1443 p->block_id = block_id;
1444 p->blksize = cpu_to_be32(size);
1445 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1446}
1447
1448int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
1449 void *digest, int digest_size, enum drbd_packet cmd)
1450{
1451 struct drbd_socket *sock;
1452 struct p_block_req *p;
1453
1454 /* FIXME: Put the digest into the preallocated socket buffer. */
1455
1456 sock = &peer_device->connection->data;
1457 p = drbd_prepare_command(peer_device, sock);
1458 if (!p)
1459 return -EIO;
1460 p->sector = cpu_to_be64(sector);
1461 p->block_id = ID_SYNCER /* unused */;
1462 p->blksize = cpu_to_be32(size);
1463 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
1464}
1465
1466int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
1467{
1468 struct drbd_socket *sock;
1469 struct p_block_req *p;
1470
1471 sock = &peer_device->connection->data;
1472 p = drbd_prepare_command(peer_device, sock);
1473 if (!p)
1474 return -EIO;
1475 p->sector = cpu_to_be64(sector);
1476 p->block_id = ID_SYNCER /* unused */;
1477 p->blksize = cpu_to_be32(size);
1478 return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1479}
1480
1481/* called on sndtimeo
1482 * returns false if we should retry,
1483 * true if we think connection is dead
1484 */
1485static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1486{
1487 int drop_it;
1488 /* long elapsed = (long)(jiffies - device->last_received); */
1489
1490 drop_it = connection->meta.socket == sock
1491 || !connection->ack_receiver.task
1492 || get_t_state(&connection->ack_receiver) != RUNNING
1493 || connection->cstate < C_WF_REPORT_PARAMS;
1494
1495 if (drop_it)
1496 return true;
1497
1498 drop_it = !--connection->ko_count;
1499 if (!drop_it) {
1500 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1501 current->comm, current->pid, connection->ko_count);
1502 request_ping(connection);
1503 }
1504
1505 return drop_it; /* && (device->state == R_PRIMARY) */;
1506}
1507
1508static void drbd_update_congested(struct drbd_connection *connection)
1509{
1510 struct sock *sk = connection->data.socket->sk;
1511 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1512 set_bit(NET_CONGESTED, &connection->flags);
1513}
1514
1515/* The idea of sendpage seems to be to put some kind of reference
1516 * to the page into the skb, and to hand it over to the NIC. In
1517 * this process get_page() gets called.
1518 *
1519 * As soon as the page was really sent over the network put_page()
1520 * gets called by some part of the network layer. [ NIC driver? ]
1521 *
1522 * [ get_page() / put_page() increment/decrement the count. If count
1523 * reaches 0 the page will be freed. ]
1524 *
1525 * This works nicely with pages from FSs.
1526 * But this means that in protocol A we might signal IO completion too early!
1527 *
1528 * In order not to corrupt data during a resync we must make sure
1529 * that we do not reuse our own buffer pages (EEs) to early, therefore
1530 * we have the net_ee list.
1531 *
1532 * XFS seems to have problems, still, it submits pages with page_count == 0!
1533 * As a workaround, we disable sendpage on pages
1534 * with page_count == 0 or PageSlab.
1535 */
1536static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
1537 int offset, size_t size, unsigned msg_flags)
1538{
1539 struct socket *socket;
1540 void *addr;
1541 int err;
1542
1543 socket = peer_device->connection->data.socket;
1544 addr = kmap(page) + offset;
1545 err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
1546 kunmap(page);
1547 if (!err)
1548 peer_device->device->send_cnt += size >> 9;
1549 return err;
1550}
1551
1552static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
1553 int offset, size_t size, unsigned msg_flags)
1554{
1555 struct socket *socket = peer_device->connection->data.socket;
1556 int len = size;
1557 int err = -EIO;
1558
1559 /* e.g. XFS meta- & log-data is in slab pages, which have a
1560 * page_count of 0 and/or have PageSlab() set.
1561 * we cannot use send_page for those, as that does get_page();
1562 * put_page(); and would cause either a VM_BUG directly, or
1563 * __page_cache_release a page that would actually still be referenced
1564 * by someone, leading to some obscure delayed Oops somewhere else. */
1565 if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1566 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
1567
1568 msg_flags |= MSG_NOSIGNAL;
1569 drbd_update_congested(peer_device->connection);
1570 do {
1571 int sent;
1572
1573 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1574 if (sent <= 0) {
1575 if (sent == -EAGAIN) {
1576 if (we_should_drop_the_connection(peer_device->connection, socket))
1577 break;
1578 continue;
1579 }
1580 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1581 __func__, (int)size, len, sent);
1582 if (sent < 0)
1583 err = sent;
1584 break;
1585 }
1586 len -= sent;
1587 offset += sent;
1588 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1589 clear_bit(NET_CONGESTED, &peer_device->connection->flags);
1590
1591 if (len == 0) {
1592 err = 0;
1593 peer_device->device->send_cnt += size >> 9;
1594 }
1595 return err;
1596}
1597
1598static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1599{
1600 struct bio_vec bvec;
1601 struct bvec_iter iter;
1602
1603 /* hint all but last page with MSG_MORE */
1604 bio_for_each_segment(bvec, bio, iter) {
1605 int err;
1606
1607 err = _drbd_no_send_page(peer_device, bvec.bv_page,
1608 bvec.bv_offset, bvec.bv_len,
1609 bio_iter_last(bvec, iter)
1610 ? 0 : MSG_MORE);
1611 if (err)
1612 return err;
1613 /* REQ_OP_WRITE_SAME has only one segment */
1614 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1615 break;
1616 }
1617 return 0;
1618}
1619
1620static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1621{
1622 struct bio_vec bvec;
1623 struct bvec_iter iter;
1624
1625 /* hint all but last page with MSG_MORE */
1626 bio_for_each_segment(bvec, bio, iter) {
1627 int err;
1628
1629 err = _drbd_send_page(peer_device, bvec.bv_page,
1630 bvec.bv_offset, bvec.bv_len,
1631 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1632 if (err)
1633 return err;
1634 /* REQ_OP_WRITE_SAME has only one segment */
1635 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1636 break;
1637 }
1638 return 0;
1639}
1640
1641static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1642 struct drbd_peer_request *peer_req)
1643{
1644 struct page *page = peer_req->pages;
1645 unsigned len = peer_req->i.size;
1646 int err;
1647
1648 /* hint all but last page with MSG_MORE */
1649 page_chain_for_each(page) {
1650 unsigned l = min_t(unsigned, len, PAGE_SIZE);
1651
1652 err = _drbd_send_page(peer_device, page, 0, l,
1653 page_chain_next(page) ? MSG_MORE : 0);
1654 if (err)
1655 return err;
1656 len -= l;
1657 }
1658 return 0;
1659}
1660
1661static u32 bio_flags_to_wire(struct drbd_connection *connection,
1662 struct bio *bio)
1663{
1664 if (connection->agreed_pro_version >= 95)
1665 return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1666 (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1667 (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1668 (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
1669 (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
1670 (bio_op(bio) == REQ_OP_WRITE_ZEROES ? DP_DISCARD : 0);
1671 else
1672 return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1673}
1674
1675/* Used to send write or TRIM aka REQ_DISCARD requests
1676 * R_PRIMARY -> Peer (P_DATA, P_TRIM)
1677 */
1678int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
1679{
1680 struct drbd_device *device = peer_device->device;
1681 struct drbd_socket *sock;
1682 struct p_data *p;
1683 struct p_wsame *wsame = NULL;
1684 void *digest_out;
1685 unsigned int dp_flags = 0;
1686 int digest_size;
1687 int err;
1688
1689 sock = &peer_device->connection->data;
1690 p = drbd_prepare_command(peer_device, sock);
1691 digest_size = peer_device->connection->integrity_tfm ?
1692 crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1693
1694 if (!p)
1695 return -EIO;
1696 p->sector = cpu_to_be64(req->i.sector);
1697 p->block_id = (unsigned long)req;
1698 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1699 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
1700 if (device->state.conn >= C_SYNC_SOURCE &&
1701 device->state.conn <= C_PAUSED_SYNC_T)
1702 dp_flags |= DP_MAY_SET_IN_SYNC;
1703 if (peer_device->connection->agreed_pro_version >= 100) {
1704 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1705 dp_flags |= DP_SEND_RECEIVE_ACK;
1706 /* During resync, request an explicit write ack,
1707 * even in protocol != C */
1708 if (req->rq_state & RQ_EXP_WRITE_ACK
1709 || (dp_flags & DP_MAY_SET_IN_SYNC))
1710 dp_flags |= DP_SEND_WRITE_ACK;
1711 }
1712 p->dp_flags = cpu_to_be32(dp_flags);
1713
1714 if (dp_flags & DP_DISCARD) {
1715 struct p_trim *t = (struct p_trim*)p;
1716 t->size = cpu_to_be32(req->i.size);
1717 err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
1718 goto out;
1719 }
1720 if (dp_flags & DP_WSAME) {
1721 /* this will only work if DRBD_FF_WSAME is set AND the
1722 * handshake agreed that all nodes and backend devices are
1723 * WRITE_SAME capable and agree on logical_block_size */
1724 wsame = (struct p_wsame*)p;
1725 digest_out = wsame + 1;
1726 wsame->size = cpu_to_be32(req->i.size);
1727 } else
1728 digest_out = p + 1;
1729
1730 /* our digest is still only over the payload.
1731 * TRIM does not carry any payload. */
1732 if (digest_size)
1733 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
1734 if (wsame) {
1735 err =
1736 __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
1737 sizeof(*wsame) + digest_size, NULL,
1738 bio_iovec(req->master_bio).bv_len);
1739 } else
1740 err =
1741 __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1742 sizeof(*p) + digest_size, NULL, req->i.size);
1743 if (!err) {
1744 /* For protocol A, we have to memcpy the payload into
1745 * socket buffers, as we may complete right away
1746 * as soon as we handed it over to tcp, at which point the data
1747 * pages may become invalid.
1748 *
1749 * For data-integrity enabled, we copy it as well, so we can be
1750 * sure that even if the bio pages may still be modified, it
1751 * won't change the data on the wire, thus if the digest checks
1752 * out ok after sending on this side, but does not fit on the
1753 * receiving side, we sure have detected corruption elsewhere.
1754 */
1755 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
1756 err = _drbd_send_bio(peer_device, req->master_bio);
1757 else
1758 err = _drbd_send_zc_bio(peer_device, req->master_bio);
1759
1760 /* double check digest, sometimes buffers have been modified in flight. */
1761 if (digest_size > 0 && digest_size <= 64) {
1762 /* 64 byte, 512 bit, is the largest digest size
1763 * currently supported in kernel crypto. */
1764 unsigned char digest[64];
1765 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
1766 if (memcmp(p + 1, digest, digest_size)) {
1767 drbd_warn(device,
1768 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1769 (unsigned long long)req->i.sector, req->i.size);
1770 }
1771 } /* else if (digest_size > 64) {
1772 ... Be noisy about digest too large ...
1773 } */
1774 }
1775out:
1776 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1777
1778 return err;
1779}
1780
1781/* answer packet, used to send data back for read requests:
1782 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1783 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1784 */
1785int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1786 struct drbd_peer_request *peer_req)
1787{
1788 struct drbd_device *device = peer_device->device;
1789 struct drbd_socket *sock;
1790 struct p_data *p;
1791 int err;
1792 int digest_size;
1793
1794 sock = &peer_device->connection->data;
1795 p = drbd_prepare_command(peer_device, sock);
1796
1797 digest_size = peer_device->connection->integrity_tfm ?
1798 crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1799
1800 if (!p)
1801 return -EIO;
1802 p->sector = cpu_to_be64(peer_req->i.sector);
1803 p->block_id = peer_req->block_id;
1804 p->seq_num = 0; /* unused */
1805 p->dp_flags = 0;
1806 if (digest_size)
1807 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1808 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1809 if (!err)
1810 err = _drbd_send_zc_ee(peer_device, peer_req);
1811 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1812
1813 return err;
1814}
1815
1816int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
1817{
1818 struct drbd_socket *sock;
1819 struct p_block_desc *p;
1820
1821 sock = &peer_device->connection->data;
1822 p = drbd_prepare_command(peer_device, sock);
1823 if (!p)
1824 return -EIO;
1825 p->sector = cpu_to_be64(req->i.sector);
1826 p->blksize = cpu_to_be32(req->i.size);
1827 return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1828}
1829
1830/*
1831 drbd_send distinguishes two cases:
1832
1833 Packets sent via the data socket "sock"
1834 and packets sent via the meta data socket "msock"
1835
1836 sock msock
1837 -----------------+-------------------------+------------------------------
1838 timeout conf.timeout / 2 conf.timeout / 2
1839 timeout action send a ping via msock Abort communication
1840 and close all sockets
1841*/
1842
1843/*
1844 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1845 */
1846int drbd_send(struct drbd_connection *connection, struct socket *sock,
1847 void *buf, size_t size, unsigned msg_flags)
1848{
1849 struct kvec iov = {.iov_base = buf, .iov_len = size};
1850 struct msghdr msg;
1851 int rv, sent = 0;
1852
1853 if (!sock)
1854 return -EBADR;
1855
1856 /* THINK if (signal_pending) return ... ? */
1857
1858 msg.msg_name = NULL;
1859 msg.msg_namelen = 0;
1860 msg.msg_control = NULL;
1861 msg.msg_controllen = 0;
1862 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1863
1864 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
1865
1866 if (sock == connection->data.socket) {
1867 rcu_read_lock();
1868 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1869 rcu_read_unlock();
1870 drbd_update_congested(connection);
1871 }
1872 do {
1873 rv = sock_sendmsg(sock, &msg);
1874 if (rv == -EAGAIN) {
1875 if (we_should_drop_the_connection(connection, sock))
1876 break;
1877 else
1878 continue;
1879 }
1880 if (rv == -EINTR) {
1881 flush_signals(current);
1882 rv = 0;
1883 }
1884 if (rv < 0)
1885 break;
1886 sent += rv;
1887 } while (sent < size);
1888
1889 if (sock == connection->data.socket)
1890 clear_bit(NET_CONGESTED, &connection->flags);
1891
1892 if (rv <= 0) {
1893 if (rv != -EAGAIN) {
1894 drbd_err(connection, "%s_sendmsg returned %d\n",
1895 sock == connection->meta.socket ? "msock" : "sock",
1896 rv);
1897 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1898 } else
1899 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1900 }
1901
1902 return sent;
1903}
1904
1905/**
1906 * drbd_send_all - Send an entire buffer
1907 *
1908 * Returns 0 upon success and a negative error value otherwise.
1909 */
1910int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1911 size_t size, unsigned msg_flags)
1912{
1913 int err;
1914
1915 err = drbd_send(connection, sock, buffer, size, msg_flags);
1916 if (err < 0)
1917 return err;
1918 if (err != size)
1919 return -EIO;
1920 return 0;
1921}
1922
1923static int drbd_open(struct block_device *bdev, fmode_t mode)
1924{
1925 struct drbd_device *device = bdev->bd_disk->private_data;
1926 unsigned long flags;
1927 int rv = 0;
1928
1929 mutex_lock(&drbd_main_mutex);
1930 spin_lock_irqsave(&device->resource->req_lock, flags);
1931 /* to have a stable device->state.role
1932 * and no race with updating open_cnt */
1933
1934 if (device->state.role != R_PRIMARY) {
1935 if (mode & FMODE_WRITE)
1936 rv = -EROFS;
1937 else if (!drbd_allow_oos)
1938 rv = -EMEDIUMTYPE;
1939 }
1940
1941 if (!rv)
1942 device->open_cnt++;
1943 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1944 mutex_unlock(&drbd_main_mutex);
1945
1946 return rv;
1947}
1948
1949static void drbd_release(struct gendisk *gd, fmode_t mode)
1950{
1951 struct drbd_device *device = gd->private_data;
1952 mutex_lock(&drbd_main_mutex);
1953 device->open_cnt--;
1954 mutex_unlock(&drbd_main_mutex);
1955}
1956
1957/* need to hold resource->req_lock */
1958void drbd_queue_unplug(struct drbd_device *device)
1959{
1960 if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
1961 D_ASSERT(device, device->state.role == R_PRIMARY);
1962 if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
1963 drbd_queue_work_if_unqueued(
1964 &first_peer_device(device)->connection->sender_work,
1965 &device->unplug_work);
1966 }
1967 }
1968}
1969
1970static void drbd_set_defaults(struct drbd_device *device)
1971{
1972 /* Beware! The actual layout differs
1973 * between big endian and little endian */
1974 device->state = (union drbd_dev_state) {
1975 { .role = R_SECONDARY,
1976 .peer = R_UNKNOWN,
1977 .conn = C_STANDALONE,
1978 .disk = D_DISKLESS,
1979 .pdsk = D_UNKNOWN,
1980 } };
1981}
1982
1983void drbd_init_set_defaults(struct drbd_device *device)
1984{
1985 /* the memset(,0,) did most of this.
1986 * note: only assignments, no allocation in here */
1987
1988 drbd_set_defaults(device);
1989
1990 atomic_set(&device->ap_bio_cnt, 0);
1991 atomic_set(&device->ap_actlog_cnt, 0);
1992 atomic_set(&device->ap_pending_cnt, 0);
1993 atomic_set(&device->rs_pending_cnt, 0);
1994 atomic_set(&device->unacked_cnt, 0);
1995 atomic_set(&device->local_cnt, 0);
1996 atomic_set(&device->pp_in_use_by_net, 0);
1997 atomic_set(&device->rs_sect_in, 0);
1998 atomic_set(&device->rs_sect_ev, 0);
1999 atomic_set(&device->ap_in_flight, 0);
2000 atomic_set(&device->md_io.in_use, 0);
2001
2002 mutex_init(&device->own_state_mutex);
2003 device->state_mutex = &device->own_state_mutex;
2004
2005 spin_lock_init(&device->al_lock);
2006 spin_lock_init(&device->peer_seq_lock);
2007
2008 INIT_LIST_HEAD(&device->active_ee);
2009 INIT_LIST_HEAD(&device->sync_ee);
2010 INIT_LIST_HEAD(&device->done_ee);
2011 INIT_LIST_HEAD(&device->read_ee);
2012 INIT_LIST_HEAD(&device->net_ee);
2013 INIT_LIST_HEAD(&device->resync_reads);
2014 INIT_LIST_HEAD(&device->resync_work.list);
2015 INIT_LIST_HEAD(&device->unplug_work.list);
2016 INIT_LIST_HEAD(&device->bm_io_work.w.list);
2017 INIT_LIST_HEAD(&device->pending_master_completion[0]);
2018 INIT_LIST_HEAD(&device->pending_master_completion[1]);
2019 INIT_LIST_HEAD(&device->pending_completion[0]);
2020 INIT_LIST_HEAD(&device->pending_completion[1]);
2021
2022 device->resync_work.cb = w_resync_timer;
2023 device->unplug_work.cb = w_send_write_hint;
2024 device->bm_io_work.w.cb = w_bitmap_io;
2025
2026 setup_timer(&device->resync_timer, resync_timer_fn,
2027 (unsigned long)device);
2028 setup_timer(&device->md_sync_timer, md_sync_timer_fn,
2029 (unsigned long)device);
2030 setup_timer(&device->start_resync_timer, start_resync_timer_fn,
2031 (unsigned long)device);
2032 setup_timer(&device->request_timer, request_timer_fn,
2033 (unsigned long)device);
2034
2035 init_waitqueue_head(&device->misc_wait);
2036 init_waitqueue_head(&device->state_wait);
2037 init_waitqueue_head(&device->ee_wait);
2038 init_waitqueue_head(&device->al_wait);
2039 init_waitqueue_head(&device->seq_wait);
2040
2041 device->resync_wenr = LC_FREE;
2042 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2043 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2044}
2045
2046void drbd_device_cleanup(struct drbd_device *device)
2047{
2048 int i;
2049 if (first_peer_device(device)->connection->receiver.t_state != NONE)
2050 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2051 first_peer_device(device)->connection->receiver.t_state);
2052
2053 device->al_writ_cnt =
2054 device->bm_writ_cnt =
2055 device->read_cnt =
2056 device->recv_cnt =
2057 device->send_cnt =
2058 device->writ_cnt =
2059 device->p_size =
2060 device->rs_start =
2061 device->rs_total =
2062 device->rs_failed = 0;
2063 device->rs_last_events = 0;
2064 device->rs_last_sect_ev = 0;
2065 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2066 device->rs_mark_left[i] = 0;
2067 device->rs_mark_time[i] = 0;
2068 }
2069 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2070
2071 drbd_set_my_capacity(device, 0);
2072 if (device->bitmap) {
2073 /* maybe never allocated. */
2074 drbd_bm_resize(device, 0, 1);
2075 drbd_bm_cleanup(device);
2076 }
2077
2078 drbd_backing_dev_free(device, device->ldev);
2079 device->ldev = NULL;
2080
2081 clear_bit(AL_SUSPENDED, &device->flags);
2082
2083 D_ASSERT(device, list_empty(&device->active_ee));
2084 D_ASSERT(device, list_empty(&device->sync_ee));
2085 D_ASSERT(device, list_empty(&device->done_ee));
2086 D_ASSERT(device, list_empty(&device->read_ee));
2087 D_ASSERT(device, list_empty(&device->net_ee));
2088 D_ASSERT(device, list_empty(&device->resync_reads));
2089 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2090 D_ASSERT(device, list_empty(&device->resync_work.list));
2091 D_ASSERT(device, list_empty(&device->unplug_work.list));
2092
2093 drbd_set_defaults(device);
2094}
2095
2096
2097static void drbd_destroy_mempools(void)
2098{
2099 struct page *page;
2100
2101 while (drbd_pp_pool) {
2102 page = drbd_pp_pool;
2103 drbd_pp_pool = (struct page *)page_private(page);
2104 __free_page(page);
2105 drbd_pp_vacant--;
2106 }
2107
2108 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2109
2110 if (drbd_io_bio_set)
2111 bioset_free(drbd_io_bio_set);
2112 if (drbd_md_io_bio_set)
2113 bioset_free(drbd_md_io_bio_set);
2114 if (drbd_md_io_page_pool)
2115 mempool_destroy(drbd_md_io_page_pool);
2116 if (drbd_ee_mempool)
2117 mempool_destroy(drbd_ee_mempool);
2118 if (drbd_request_mempool)
2119 mempool_destroy(drbd_request_mempool);
2120 if (drbd_ee_cache)
2121 kmem_cache_destroy(drbd_ee_cache);
2122 if (drbd_request_cache)
2123 kmem_cache_destroy(drbd_request_cache);
2124 if (drbd_bm_ext_cache)
2125 kmem_cache_destroy(drbd_bm_ext_cache);
2126 if (drbd_al_ext_cache)
2127 kmem_cache_destroy(drbd_al_ext_cache);
2128
2129 drbd_io_bio_set = NULL;
2130 drbd_md_io_bio_set = NULL;
2131 drbd_md_io_page_pool = NULL;
2132 drbd_ee_mempool = NULL;
2133 drbd_request_mempool = NULL;
2134 drbd_ee_cache = NULL;
2135 drbd_request_cache = NULL;
2136 drbd_bm_ext_cache = NULL;
2137 drbd_al_ext_cache = NULL;
2138
2139 return;
2140}
2141
2142static int drbd_create_mempools(void)
2143{
2144 struct page *page;
2145 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
2146 int i;
2147
2148 /* prepare our caches and mempools */
2149 drbd_request_mempool = NULL;
2150 drbd_ee_cache = NULL;
2151 drbd_request_cache = NULL;
2152 drbd_bm_ext_cache = NULL;
2153 drbd_al_ext_cache = NULL;
2154 drbd_pp_pool = NULL;
2155 drbd_md_io_page_pool = NULL;
2156 drbd_md_io_bio_set = NULL;
2157 drbd_io_bio_set = NULL;
2158
2159 /* caches */
2160 drbd_request_cache = kmem_cache_create(
2161 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2162 if (drbd_request_cache == NULL)
2163 goto Enomem;
2164
2165 drbd_ee_cache = kmem_cache_create(
2166 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2167 if (drbd_ee_cache == NULL)
2168 goto Enomem;
2169
2170 drbd_bm_ext_cache = kmem_cache_create(
2171 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2172 if (drbd_bm_ext_cache == NULL)
2173 goto Enomem;
2174
2175 drbd_al_ext_cache = kmem_cache_create(
2176 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2177 if (drbd_al_ext_cache == NULL)
2178 goto Enomem;
2179
2180 /* mempools */
2181 drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
2182 if (drbd_io_bio_set == NULL)
2183 goto Enomem;
2184
2185 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0,
2186 BIOSET_NEED_BVECS);
2187 if (drbd_md_io_bio_set == NULL)
2188 goto Enomem;
2189
2190 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2191 if (drbd_md_io_page_pool == NULL)
2192 goto Enomem;
2193
2194 drbd_request_mempool = mempool_create_slab_pool(number,
2195 drbd_request_cache);
2196 if (drbd_request_mempool == NULL)
2197 goto Enomem;
2198
2199 drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache);
2200 if (drbd_ee_mempool == NULL)
2201 goto Enomem;
2202
2203 /* drbd's page pool */
2204 spin_lock_init(&drbd_pp_lock);
2205
2206 for (i = 0; i < number; i++) {
2207 page = alloc_page(GFP_HIGHUSER);
2208 if (!page)
2209 goto Enomem;
2210 set_page_private(page, (unsigned long)drbd_pp_pool);
2211 drbd_pp_pool = page;
2212 }
2213 drbd_pp_vacant = number;
2214
2215 return 0;
2216
2217Enomem:
2218 drbd_destroy_mempools(); /* in case we allocated some */
2219 return -ENOMEM;
2220}
2221
2222static void drbd_release_all_peer_reqs(struct drbd_device *device)
2223{
2224 int rr;
2225
2226 rr = drbd_free_peer_reqs(device, &device->active_ee);
2227 if (rr)
2228 drbd_err(device, "%d EEs in active list found!\n", rr);
2229
2230 rr = drbd_free_peer_reqs(device, &device->sync_ee);
2231 if (rr)
2232 drbd_err(device, "%d EEs in sync list found!\n", rr);
2233
2234 rr = drbd_free_peer_reqs(device, &device->read_ee);
2235 if (rr)
2236 drbd_err(device, "%d EEs in read list found!\n", rr);
2237
2238 rr = drbd_free_peer_reqs(device, &device->done_ee);
2239 if (rr)
2240 drbd_err(device, "%d EEs in done list found!\n", rr);
2241
2242 rr = drbd_free_peer_reqs(device, &device->net_ee);
2243 if (rr)
2244 drbd_err(device, "%d EEs in net list found!\n", rr);
2245}
2246
2247/* caution. no locking. */
2248void drbd_destroy_device(struct kref *kref)
2249{
2250 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2251 struct drbd_resource *resource = device->resource;
2252 struct drbd_peer_device *peer_device, *tmp_peer_device;
2253
2254 del_timer_sync(&device->request_timer);
2255
2256 /* paranoia asserts */
2257 D_ASSERT(device, device->open_cnt == 0);
2258 /* end paranoia asserts */
2259
2260 /* cleanup stuff that may have been allocated during
2261 * device (re-)configuration or state changes */
2262
2263 if (device->this_bdev)
2264 bdput(device->this_bdev);
2265
2266 drbd_backing_dev_free(device, device->ldev);
2267 device->ldev = NULL;
2268
2269 drbd_release_all_peer_reqs(device);
2270
2271 lc_destroy(device->act_log);
2272 lc_destroy(device->resync);
2273
2274 kfree(device->p_uuid);
2275 /* device->p_uuid = NULL; */
2276
2277 if (device->bitmap) /* should no longer be there. */
2278 drbd_bm_cleanup(device);
2279 __free_page(device->md_io.page);
2280 put_disk(device->vdisk);
2281 blk_cleanup_queue(device->rq_queue);
2282 kfree(device->rs_plan_s);
2283
2284 /* not for_each_connection(connection, resource):
2285 * those may have been cleaned up and disassociated already.
2286 */
2287 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2288 kref_put(&peer_device->connection->kref, drbd_destroy_connection);
2289 kfree(peer_device);
2290 }
2291 memset(device, 0xfd, sizeof(*device));
2292 kfree(device);
2293 kref_put(&resource->kref, drbd_destroy_resource);
2294}
2295
2296/* One global retry thread, if we need to push back some bio and have it
2297 * reinserted through our make request function.
2298 */
2299static struct retry_worker {
2300 struct workqueue_struct *wq;
2301 struct work_struct worker;
2302
2303 spinlock_t lock;
2304 struct list_head writes;
2305} retry;
2306
2307static void do_retry(struct work_struct *ws)
2308{
2309 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2310 LIST_HEAD(writes);
2311 struct drbd_request *req, *tmp;
2312
2313 spin_lock_irq(&retry->lock);
2314 list_splice_init(&retry->writes, &writes);
2315 spin_unlock_irq(&retry->lock);
2316
2317 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2318 struct drbd_device *device = req->device;
2319 struct bio *bio = req->master_bio;
2320 unsigned long start_jif = req->start_jif;
2321 bool expected;
2322
2323 expected =
2324 expect(atomic_read(&req->completion_ref) == 0) &&
2325 expect(req->rq_state & RQ_POSTPONED) &&
2326 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2327 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2328
2329 if (!expected)
2330 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2331 req, atomic_read(&req->completion_ref),
2332 req->rq_state);
2333
2334 /* We still need to put one kref associated with the
2335 * "completion_ref" going zero in the code path that queued it
2336 * here. The request object may still be referenced by a
2337 * frozen local req->private_bio, in case we force-detached.
2338 */
2339 kref_put(&req->kref, drbd_req_destroy);
2340
2341 /* A single suspended or otherwise blocking device may stall
2342 * all others as well. Fortunately, this code path is to
2343 * recover from a situation that "should not happen":
2344 * concurrent writes in multi-primary setup.
2345 * In a "normal" lifecycle, this workqueue is supposed to be
2346 * destroyed without ever doing anything.
2347 * If it turns out to be an issue anyways, we can do per
2348 * resource (replication group) or per device (minor) retry
2349 * workqueues instead.
2350 */
2351
2352 /* We are not just doing generic_make_request(),
2353 * as we want to keep the start_time information. */
2354 inc_ap_bio(device);
2355 __drbd_make_request(device, bio, start_jif);
2356 }
2357}
2358
2359/* called via drbd_req_put_completion_ref(),
2360 * holds resource->req_lock */
2361void drbd_restart_request(struct drbd_request *req)
2362{
2363 unsigned long flags;
2364 spin_lock_irqsave(&retry.lock, flags);
2365 list_move_tail(&req->tl_requests, &retry.writes);
2366 spin_unlock_irqrestore(&retry.lock, flags);
2367
2368 /* Drop the extra reference that would otherwise
2369 * have been dropped by complete_master_bio.
2370 * do_retry() needs to grab a new one. */
2371 dec_ap_bio(req->device);
2372
2373 queue_work(retry.wq, &retry.worker);
2374}
2375
2376void drbd_destroy_resource(struct kref *kref)
2377{
2378 struct drbd_resource *resource =
2379 container_of(kref, struct drbd_resource, kref);
2380
2381 idr_destroy(&resource->devices);
2382 free_cpumask_var(resource->cpu_mask);
2383 kfree(resource->name);
2384 memset(resource, 0xf2, sizeof(*resource));
2385 kfree(resource);
2386}
2387
2388void drbd_free_resource(struct drbd_resource *resource)
2389{
2390 struct drbd_connection *connection, *tmp;
2391
2392 for_each_connection_safe(connection, tmp, resource) {
2393 list_del(&connection->connections);
2394 drbd_debugfs_connection_cleanup(connection);
2395 kref_put(&connection->kref, drbd_destroy_connection);
2396 }
2397 drbd_debugfs_resource_cleanup(resource);
2398 kref_put(&resource->kref, drbd_destroy_resource);
2399}
2400
2401static void drbd_cleanup(void)
2402{
2403 unsigned int i;
2404 struct drbd_device *device;
2405 struct drbd_resource *resource, *tmp;
2406
2407 /* first remove proc,
2408 * drbdsetup uses it's presence to detect
2409 * whether DRBD is loaded.
2410 * If we would get stuck in proc removal,
2411 * but have netlink already deregistered,
2412 * some drbdsetup commands may wait forever
2413 * for an answer.
2414 */
2415 if (drbd_proc)
2416 remove_proc_entry("drbd", NULL);
2417
2418 if (retry.wq)
2419 destroy_workqueue(retry.wq);
2420
2421 drbd_genl_unregister();
2422
2423 idr_for_each_entry(&drbd_devices, device, i)
2424 drbd_delete_device(device);
2425
2426 /* not _rcu since, no other updater anymore. Genl already unregistered */
2427 for_each_resource_safe(resource, tmp, &drbd_resources) {
2428 list_del(&resource->resources);
2429 drbd_free_resource(resource);
2430 }
2431
2432 drbd_debugfs_cleanup();
2433
2434 drbd_destroy_mempools();
2435 unregister_blkdev(DRBD_MAJOR, "drbd");
2436
2437 idr_destroy(&drbd_devices);
2438
2439 pr_info("module cleanup done.\n");
2440}
2441
2442/**
2443 * drbd_congested() - Callback for the flusher thread
2444 * @congested_data: User data
2445 * @bdi_bits: Bits the BDI flusher thread is currently interested in
2446 *
2447 * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested.
2448 */
2449static int drbd_congested(void *congested_data, int bdi_bits)
2450{
2451 struct drbd_device *device = congested_data;
2452 struct request_queue *q;
2453 char reason = '-';
2454 int r = 0;
2455
2456 if (!may_inc_ap_bio(device)) {
2457 /* DRBD has frozen IO */
2458 r = bdi_bits;
2459 reason = 'd';
2460 goto out;
2461 }
2462
2463 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
2464 r |= (1 << WB_async_congested);
2465 /* Without good local data, we would need to read from remote,
2466 * and that would need the worker thread as well, which is
2467 * currently blocked waiting for that usermode helper to
2468 * finish.
2469 */
2470 if (!get_ldev_if_state(device, D_UP_TO_DATE))
2471 r |= (1 << WB_sync_congested);
2472 else
2473 put_ldev(device);
2474 r &= bdi_bits;
2475 reason = 'c';
2476 goto out;
2477 }
2478
2479 if (get_ldev(device)) {
2480 q = bdev_get_queue(device->ldev->backing_bdev);
2481 r = bdi_congested(q->backing_dev_info, bdi_bits);
2482 put_ldev(device);
2483 if (r)
2484 reason = 'b';
2485 }
2486
2487 if (bdi_bits & (1 << WB_async_congested) &&
2488 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
2489 r |= (1 << WB_async_congested);
2490 reason = reason == 'b' ? 'a' : 'n';
2491 }
2492
2493out:
2494 device->congestion_reason = reason;
2495 return r;
2496}
2497
2498static void drbd_init_workqueue(struct drbd_work_queue* wq)
2499{
2500 spin_lock_init(&wq->q_lock);
2501 INIT_LIST_HEAD(&wq->q);
2502 init_waitqueue_head(&wq->q_wait);
2503}
2504
2505struct completion_work {
2506 struct drbd_work w;
2507 struct completion done;
2508};
2509
2510static int w_complete(struct drbd_work *w, int cancel)
2511{
2512 struct completion_work *completion_work =
2513 container_of(w, struct completion_work, w);
2514
2515 complete(&completion_work->done);
2516 return 0;
2517}
2518
2519void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2520{
2521 struct completion_work completion_work;
2522
2523 completion_work.w.cb = w_complete;
2524 init_completion(&completion_work.done);
2525 drbd_queue_work(work_queue, &completion_work.w);
2526 wait_for_completion(&completion_work.done);
2527}
2528
2529struct drbd_resource *drbd_find_resource(const char *name)
2530{
2531 struct drbd_resource *resource;
2532
2533 if (!name || !name[0])
2534 return NULL;
2535
2536 rcu_read_lock();
2537 for_each_resource_rcu(resource, &drbd_resources) {
2538 if (!strcmp(resource->name, name)) {
2539 kref_get(&resource->kref);
2540 goto found;
2541 }
2542 }
2543 resource = NULL;
2544found:
2545 rcu_read_unlock();
2546 return resource;
2547}
2548
2549struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2550 void *peer_addr, int peer_addr_len)
2551{
2552 struct drbd_resource *resource;
2553 struct drbd_connection *connection;
2554
2555 rcu_read_lock();
2556 for_each_resource_rcu(resource, &drbd_resources) {
2557 for_each_connection_rcu(connection, resource) {
2558 if (connection->my_addr_len == my_addr_len &&
2559 connection->peer_addr_len == peer_addr_len &&
2560 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2561 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2562 kref_get(&connection->kref);
2563 goto found;
2564 }
2565 }
2566 }
2567 connection = NULL;
2568found:
2569 rcu_read_unlock();
2570 return connection;
2571}
2572
2573static int drbd_alloc_socket(struct drbd_socket *socket)
2574{
2575 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2576 if (!socket->rbuf)
2577 return -ENOMEM;
2578 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2579 if (!socket->sbuf)
2580 return -ENOMEM;
2581 return 0;
2582}
2583
2584static void drbd_free_socket(struct drbd_socket *socket)
2585{
2586 free_page((unsigned long) socket->sbuf);
2587 free_page((unsigned long) socket->rbuf);
2588}
2589
2590void conn_free_crypto(struct drbd_connection *connection)
2591{
2592 drbd_free_sock(connection);
2593
2594 crypto_free_ahash(connection->csums_tfm);
2595 crypto_free_ahash(connection->verify_tfm);
2596 crypto_free_shash(connection->cram_hmac_tfm);
2597 crypto_free_ahash(connection->integrity_tfm);
2598 crypto_free_ahash(connection->peer_integrity_tfm);
2599 kfree(connection->int_dig_in);
2600 kfree(connection->int_dig_vv);
2601
2602 connection->csums_tfm = NULL;
2603 connection->verify_tfm = NULL;
2604 connection->cram_hmac_tfm = NULL;
2605 connection->integrity_tfm = NULL;
2606 connection->peer_integrity_tfm = NULL;
2607 connection->int_dig_in = NULL;
2608 connection->int_dig_vv = NULL;
2609}
2610
2611int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2612{
2613 struct drbd_connection *connection;
2614 cpumask_var_t new_cpu_mask;
2615 int err;
2616
2617 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2618 return -ENOMEM;
2619
2620 /* silently ignore cpu mask on UP kernel */
2621 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2622 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
2623 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2624 if (err == -EOVERFLOW) {
2625 /* So what. mask it out. */
2626 cpumask_var_t tmp_cpu_mask;
2627 if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) {
2628 cpumask_setall(tmp_cpu_mask);
2629 cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask);
2630 drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2631 res_opts->cpu_mask,
2632 strlen(res_opts->cpu_mask) > 12 ? "..." : "",
2633 nr_cpu_ids);
2634 free_cpumask_var(tmp_cpu_mask);
2635 err = 0;
2636 }
2637 }
2638 if (err) {
2639 drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2640 /* retcode = ERR_CPU_MASK_PARSE; */
2641 goto fail;
2642 }
2643 }
2644 resource->res_opts = *res_opts;
2645 if (cpumask_empty(new_cpu_mask))
2646 drbd_calc_cpu_mask(&new_cpu_mask);
2647 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2648 cpumask_copy(resource->cpu_mask, new_cpu_mask);
2649 for_each_connection_rcu(connection, resource) {
2650 connection->receiver.reset_cpu_mask = 1;
2651 connection->ack_receiver.reset_cpu_mask = 1;
2652 connection->worker.reset_cpu_mask = 1;
2653 }
2654 }
2655 err = 0;
2656
2657fail:
2658 free_cpumask_var(new_cpu_mask);
2659 return err;
2660
2661}
2662
2663struct drbd_resource *drbd_create_resource(const char *name)
2664{
2665 struct drbd_resource *resource;
2666
2667 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2668 if (!resource)
2669 goto fail;
2670 resource->name = kstrdup(name, GFP_KERNEL);
2671 if (!resource->name)
2672 goto fail_free_resource;
2673 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2674 goto fail_free_name;
2675 kref_init(&resource->kref);
2676 idr_init(&resource->devices);
2677 INIT_LIST_HEAD(&resource->connections);
2678 resource->write_ordering = WO_BDEV_FLUSH;
2679 list_add_tail_rcu(&resource->resources, &drbd_resources);
2680 mutex_init(&resource->conf_update);
2681 mutex_init(&resource->adm_mutex);
2682 spin_lock_init(&resource->req_lock);
2683 drbd_debugfs_resource_add(resource);
2684 return resource;
2685
2686fail_free_name:
2687 kfree(resource->name);
2688fail_free_resource:
2689 kfree(resource);
2690fail:
2691 return NULL;
2692}
2693
2694/* caller must be under adm_mutex */
2695struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2696{
2697 struct drbd_resource *resource;
2698 struct drbd_connection *connection;
2699
2700 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2701 if (!connection)
2702 return NULL;
2703
2704 if (drbd_alloc_socket(&connection->data))
2705 goto fail;
2706 if (drbd_alloc_socket(&connection->meta))
2707 goto fail;
2708
2709 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2710 if (!connection->current_epoch)
2711 goto fail;
2712
2713 INIT_LIST_HEAD(&connection->transfer_log);
2714
2715 INIT_LIST_HEAD(&connection->current_epoch->list);
2716 connection->epochs = 1;
2717 spin_lock_init(&connection->epoch_lock);
2718
2719 connection->send.seen_any_write_yet = false;
2720 connection->send.current_epoch_nr = 0;
2721 connection->send.current_epoch_writes = 0;
2722
2723 resource = drbd_create_resource(name);
2724 if (!resource)
2725 goto fail;
2726
2727 connection->cstate = C_STANDALONE;
2728 mutex_init(&connection->cstate_mutex);
2729 init_waitqueue_head(&connection->ping_wait);
2730 idr_init(&connection->peer_devices);
2731
2732 drbd_init_workqueue(&connection->sender_work);
2733 mutex_init(&connection->data.mutex);
2734 mutex_init(&connection->meta.mutex);
2735
2736 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2737 connection->receiver.connection = connection;
2738 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2739 connection->worker.connection = connection;
2740 drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
2741 connection->ack_receiver.connection = connection;
2742
2743 kref_init(&connection->kref);
2744
2745 connection->resource = resource;
2746
2747 if (set_resource_options(resource, res_opts))
2748 goto fail_resource;
2749
2750 kref_get(&resource->kref);
2751 list_add_tail_rcu(&connection->connections, &resource->connections);
2752 drbd_debugfs_connection_add(connection);
2753 return connection;
2754
2755fail_resource:
2756 list_del(&resource->resources);
2757 drbd_free_resource(resource);
2758fail:
2759 kfree(connection->current_epoch);
2760 drbd_free_socket(&connection->meta);
2761 drbd_free_socket(&connection->data);
2762 kfree(connection);
2763 return NULL;
2764}
2765
2766void drbd_destroy_connection(struct kref *kref)
2767{
2768 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2769 struct drbd_resource *resource = connection->resource;
2770
2771 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
2772 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2773 kfree(connection->current_epoch);
2774
2775 idr_destroy(&connection->peer_devices);
2776
2777 drbd_free_socket(&connection->meta);
2778 drbd_free_socket(&connection->data);
2779 kfree(connection->int_dig_in);
2780 kfree(connection->int_dig_vv);
2781 memset(connection, 0xfc, sizeof(*connection));
2782 kfree(connection);
2783 kref_put(&resource->kref, drbd_destroy_resource);
2784}
2785
2786static int init_submitter(struct drbd_device *device)
2787{
2788 /* opencoded create_singlethread_workqueue(),
2789 * to be able to say "drbd%d", ..., minor */
2790 device->submit.wq =
2791 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2792 if (!device->submit.wq)
2793 return -ENOMEM;
2794
2795 INIT_WORK(&device->submit.worker, do_submit);
2796 INIT_LIST_HEAD(&device->submit.writes);
2797 return 0;
2798}
2799
2800enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
2801{
2802 struct drbd_resource *resource = adm_ctx->resource;
2803 struct drbd_connection *connection;
2804 struct drbd_device *device;
2805 struct drbd_peer_device *peer_device, *tmp_peer_device;
2806 struct gendisk *disk;
2807 struct request_queue *q;
2808 int id;
2809 int vnr = adm_ctx->volume;
2810 enum drbd_ret_code err = ERR_NOMEM;
2811
2812 device = minor_to_device(minor);
2813 if (device)
2814 return ERR_MINOR_OR_VOLUME_EXISTS;
2815
2816 /* GFP_KERNEL, we are outside of all write-out paths */
2817 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2818 if (!device)
2819 return ERR_NOMEM;
2820 kref_init(&device->kref);
2821
2822 kref_get(&resource->kref);
2823 device->resource = resource;
2824 device->minor = minor;
2825 device->vnr = vnr;
2826
2827 drbd_init_set_defaults(device);
2828
2829 q = blk_alloc_queue(GFP_KERNEL);
2830 if (!q)
2831 goto out_no_q;
2832 device->rq_queue = q;
2833 q->queuedata = device;
2834
2835 disk = alloc_disk(1);
2836 if (!disk)
2837 goto out_no_disk;
2838 device->vdisk = disk;
2839
2840 set_disk_ro(disk, true);
2841
2842 disk->queue = q;
2843 disk->major = DRBD_MAJOR;
2844 disk->first_minor = minor;
2845 disk->fops = &drbd_ops;
2846 sprintf(disk->disk_name, "drbd%d", minor);
2847 disk->private_data = device;
2848
2849 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2850 /* we have no partitions. we contain only ourselves. */
2851 device->this_bdev->bd_contains = device->this_bdev;
2852
2853 q->backing_dev_info->congested_fn = drbd_congested;
2854 q->backing_dev_info->congested_data = device;
2855
2856 blk_queue_make_request(q, drbd_make_request);
2857 blk_queue_write_cache(q, true, true);
2858 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2859 This triggers a max_bio_size message upon first attach or connect */
2860 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2861 q->queue_lock = &resource->req_lock;
2862
2863 device->md_io.page = alloc_page(GFP_KERNEL);
2864 if (!device->md_io.page)
2865 goto out_no_io_page;
2866
2867 if (drbd_bm_init(device))
2868 goto out_no_bitmap;
2869 device->read_requests = RB_ROOT;
2870 device->write_requests = RB_ROOT;
2871
2872 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2873 if (id < 0) {
2874 if (id == -ENOSPC)
2875 err = ERR_MINOR_OR_VOLUME_EXISTS;
2876 goto out_no_minor_idr;
2877 }
2878 kref_get(&device->kref);
2879
2880 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2881 if (id < 0) {
2882 if (id == -ENOSPC)
2883 err = ERR_MINOR_OR_VOLUME_EXISTS;
2884 goto out_idr_remove_minor;
2885 }
2886 kref_get(&device->kref);
2887
2888 INIT_LIST_HEAD(&device->peer_devices);
2889 INIT_LIST_HEAD(&device->pending_bitmap_io);
2890 for_each_connection(connection, resource) {
2891 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2892 if (!peer_device)
2893 goto out_idr_remove_from_resource;
2894 peer_device->connection = connection;
2895 peer_device->device = device;
2896
2897 list_add(&peer_device->peer_devices, &device->peer_devices);
2898 kref_get(&device->kref);
2899
2900 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2901 if (id < 0) {
2902 if (id == -ENOSPC)
2903 err = ERR_INVALID_REQUEST;
2904 goto out_idr_remove_from_resource;
2905 }
2906 kref_get(&connection->kref);
2907 INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
2908 }
2909
2910 if (init_submitter(device)) {
2911 err = ERR_NOMEM;
2912 goto out_idr_remove_vol;
2913 }
2914
2915 add_disk(disk);
2916
2917 /* inherit the connection state */
2918 device->state.conn = first_connection(resource)->cstate;
2919 if (device->state.conn == C_WF_REPORT_PARAMS) {
2920 for_each_peer_device(peer_device, device)
2921 drbd_connected(peer_device);
2922 }
2923 /* move to create_peer_device() */
2924 for_each_peer_device(peer_device, device)
2925 drbd_debugfs_peer_device_add(peer_device);
2926 drbd_debugfs_device_add(device);
2927 return NO_ERROR;
2928
2929out_idr_remove_vol:
2930 idr_remove(&connection->peer_devices, vnr);
2931out_idr_remove_from_resource:
2932 for_each_connection(connection, resource) {
2933 peer_device = idr_remove(&connection->peer_devices, vnr);
2934 if (peer_device)
2935 kref_put(&connection->kref, drbd_destroy_connection);
2936 }
2937 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2938 list_del(&peer_device->peer_devices);
2939 kfree(peer_device);
2940 }
2941 idr_remove(&resource->devices, vnr);
2942out_idr_remove_minor:
2943 idr_remove(&drbd_devices, minor);
2944 synchronize_rcu();
2945out_no_minor_idr:
2946 drbd_bm_cleanup(device);
2947out_no_bitmap:
2948 __free_page(device->md_io.page);
2949out_no_io_page:
2950 put_disk(disk);
2951out_no_disk:
2952 blk_cleanup_queue(q);
2953out_no_q:
2954 kref_put(&resource->kref, drbd_destroy_resource);
2955 kfree(device);
2956 return err;
2957}
2958
2959void drbd_delete_device(struct drbd_device *device)
2960{
2961 struct drbd_resource *resource = device->resource;
2962 struct drbd_connection *connection;
2963 struct drbd_peer_device *peer_device;
2964
2965 /* move to free_peer_device() */
2966 for_each_peer_device(peer_device, device)
2967 drbd_debugfs_peer_device_cleanup(peer_device);
2968 drbd_debugfs_device_cleanup(device);
2969 for_each_connection(connection, resource) {
2970 idr_remove(&connection->peer_devices, device->vnr);
2971 kref_put(&device->kref, drbd_destroy_device);
2972 }
2973 idr_remove(&resource->devices, device->vnr);
2974 kref_put(&device->kref, drbd_destroy_device);
2975 idr_remove(&drbd_devices, device_to_minor(device));
2976 kref_put(&device->kref, drbd_destroy_device);
2977 del_gendisk(device->vdisk);
2978 synchronize_rcu();
2979 kref_put(&device->kref, drbd_destroy_device);
2980}
2981
2982static int __init drbd_init(void)
2983{
2984 int err;
2985
2986 if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
2987 pr_err("invalid minor_count (%d)\n", drbd_minor_count);
2988#ifdef MODULE
2989 return -EINVAL;
2990#else
2991 drbd_minor_count = DRBD_MINOR_COUNT_DEF;
2992#endif
2993 }
2994
2995 err = register_blkdev(DRBD_MAJOR, "drbd");
2996 if (err) {
2997 pr_err("unable to register block device major %d\n",
2998 DRBD_MAJOR);
2999 return err;
3000 }
3001
3002 /*
3003 * allocate all necessary structs
3004 */
3005 init_waitqueue_head(&drbd_pp_wait);
3006
3007 drbd_proc = NULL; /* play safe for drbd_cleanup */
3008 idr_init(&drbd_devices);
3009
3010 mutex_init(&resources_mutex);
3011 INIT_LIST_HEAD(&drbd_resources);
3012
3013 err = drbd_genl_register();
3014 if (err) {
3015 pr_err("unable to register generic netlink family\n");
3016 goto fail;
3017 }
3018
3019 err = drbd_create_mempools();
3020 if (err)
3021 goto fail;
3022
3023 err = -ENOMEM;
3024 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3025 if (!drbd_proc) {
3026 pr_err("unable to register proc file\n");
3027 goto fail;
3028 }
3029
3030 retry.wq = create_singlethread_workqueue("drbd-reissue");
3031 if (!retry.wq) {
3032 pr_err("unable to create retry workqueue\n");
3033 goto fail;
3034 }
3035 INIT_WORK(&retry.worker, do_retry);
3036 spin_lock_init(&retry.lock);
3037 INIT_LIST_HEAD(&retry.writes);
3038
3039 if (drbd_debugfs_init())
3040 pr_notice("failed to initialize debugfs -- will not be available\n");
3041
3042 pr_info("initialized. "
3043 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3044 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3045 pr_info("%s\n", drbd_buildtag());
3046 pr_info("registered as block device major %d\n", DRBD_MAJOR);
3047 return 0; /* Success! */
3048
3049fail:
3050 drbd_cleanup();
3051 if (err == -ENOMEM)
3052 pr_err("ran out of memory\n");
3053 else
3054 pr_err("initialization failure\n");
3055 return err;
3056}
3057
3058static void drbd_free_one_sock(struct drbd_socket *ds)
3059{
3060 struct socket *s;
3061 mutex_lock(&ds->mutex);
3062 s = ds->socket;
3063 ds->socket = NULL;
3064 mutex_unlock(&ds->mutex);
3065 if (s) {
3066 /* so debugfs does not need to mutex_lock() */
3067 synchronize_rcu();
3068 kernel_sock_shutdown(s, SHUT_RDWR);
3069 sock_release(s);
3070 }
3071}
3072
3073void drbd_free_sock(struct drbd_connection *connection)
3074{
3075 if (connection->data.socket)
3076 drbd_free_one_sock(&connection->data);
3077 if (connection->meta.socket)
3078 drbd_free_one_sock(&connection->meta);
3079}
3080
3081/* meta data management */
3082
3083void conn_md_sync(struct drbd_connection *connection)
3084{
3085 struct drbd_peer_device *peer_device;
3086 int vnr;
3087
3088 rcu_read_lock();
3089 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
3090 struct drbd_device *device = peer_device->device;
3091
3092 kref_get(&device->kref);
3093 rcu_read_unlock();
3094 drbd_md_sync(device);
3095 kref_put(&device->kref, drbd_destroy_device);
3096 rcu_read_lock();
3097 }
3098 rcu_read_unlock();
3099}
3100
3101/* aligned 4kByte */
3102struct meta_data_on_disk {
3103 u64 la_size_sect; /* last agreed size. */
3104 u64 uuid[UI_SIZE]; /* UUIDs. */
3105 u64 device_uuid;
3106 u64 reserved_u64_1;
3107 u32 flags; /* MDF */
3108 u32 magic;
3109 u32 md_size_sect;
3110 u32 al_offset; /* offset to this block */
3111 u32 al_nr_extents; /* important for restoring the AL (userspace) */
3112 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
3113 u32 bm_offset; /* offset to the bitmap, from here */
3114 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3115 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3116
3117 /* see al_tr_number_to_on_disk_sector() */
3118 u32 al_stripes;
3119 u32 al_stripe_size_4k;
3120
3121 u8 reserved_u8[4096 - (7*8 + 10*4)];
3122} __packed;
3123
3124
3125
3126void drbd_md_write(struct drbd_device *device, void *b)
3127{
3128 struct meta_data_on_disk *buffer = b;
3129 sector_t sector;
3130 int i;
3131
3132 memset(buffer, 0, sizeof(*buffer));
3133
3134 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
3135 for (i = UI_CURRENT; i < UI_SIZE; i++)
3136 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3137 buffer->flags = cpu_to_be32(device->ldev->md.flags);
3138 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
3139
3140 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3141 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3142 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3143 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3144 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3145
3146 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3147 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3148
3149 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3150 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3151
3152 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3153 sector = device->ldev->md.md_offset;
3154
3155 if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3156 /* this was a try anyways ... */
3157 drbd_err(device, "meta data update failed!\n");
3158 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3159 }
3160}
3161
3162/**
3163 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3164 * @device: DRBD device.
3165 */
3166void drbd_md_sync(struct drbd_device *device)
3167{
3168 struct meta_data_on_disk *buffer;
3169
3170 /* Don't accidentally change the DRBD meta data layout. */
3171 BUILD_BUG_ON(UI_SIZE != 4);
3172 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3173
3174 del_timer(&device->md_sync_timer);
3175 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3176 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3177 return;
3178
3179 /* We use here D_FAILED and not D_ATTACHING because we try to write
3180 * metadata even if we detach due to a disk failure! */
3181 if (!get_ldev_if_state(device, D_FAILED))
3182 return;
3183
3184 buffer = drbd_md_get_buffer(device, __func__);
3185 if (!buffer)
3186 goto out;
3187
3188 drbd_md_write(device, buffer);
3189
3190 /* Update device->ldev->md.la_size_sect,
3191 * since we updated it on metadata. */
3192 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
3193
3194 drbd_md_put_buffer(device);
3195out:
3196 put_ldev(device);
3197}
3198
3199static int check_activity_log_stripe_size(struct drbd_device *device,
3200 struct meta_data_on_disk *on_disk,
3201 struct drbd_md *in_core)
3202{
3203 u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3204 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3205 u64 al_size_4k;
3206
3207 /* both not set: default to old fixed size activity log */
3208 if (al_stripes == 0 && al_stripe_size_4k == 0) {
3209 al_stripes = 1;
3210 al_stripe_size_4k = MD_32kB_SECT/8;
3211 }
3212
3213 /* some paranoia plausibility checks */
3214
3215 /* we need both values to be set */
3216 if (al_stripes == 0 || al_stripe_size_4k == 0)
3217 goto err;
3218
3219 al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3220
3221 /* Upper limit of activity log area, to avoid potential overflow
3222 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3223 * than 72 * 4k blocks total only increases the amount of history,
3224 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3225 if (al_size_4k > (16 * 1024 * 1024/4))
3226 goto err;
3227
3228 /* Lower limit: we need at least 8 transaction slots (32kB)
3229 * to not break existing setups */
3230 if (al_size_4k < MD_32kB_SECT/8)
3231 goto err;
3232
3233 in_core->al_stripe_size_4k = al_stripe_size_4k;
3234 in_core->al_stripes = al_stripes;
3235 in_core->al_size_4k = al_size_4k;
3236
3237 return 0;
3238err:
3239 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3240 al_stripes, al_stripe_size_4k);
3241 return -EINVAL;
3242}
3243
3244static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3245{
3246 sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3247 struct drbd_md *in_core = &bdev->md;
3248 s32 on_disk_al_sect;
3249 s32 on_disk_bm_sect;
3250
3251 /* The on-disk size of the activity log, calculated from offsets, and
3252 * the size of the activity log calculated from the stripe settings,
3253 * should match.
3254 * Though we could relax this a bit: it is ok, if the striped activity log
3255 * fits in the available on-disk activity log size.
3256 * Right now, that would break how resize is implemented.
3257 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3258 * of possible unused padding space in the on disk layout. */
3259 if (in_core->al_offset < 0) {
3260 if (in_core->bm_offset > in_core->al_offset)
3261 goto err;
3262 on_disk_al_sect = -in_core->al_offset;
3263 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3264 } else {
3265 if (in_core->al_offset != MD_4kB_SECT)
3266 goto err;
3267 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3268 goto err;
3269
3270 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3271 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3272 }
3273
3274 /* old fixed size meta data is exactly that: fixed. */
3275 if (in_core->meta_dev_idx >= 0) {
3276 if (in_core->md_size_sect != MD_128MB_SECT
3277 || in_core->al_offset != MD_4kB_SECT
3278 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3279 || in_core->al_stripes != 1
3280 || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3281 goto err;
3282 }
3283
3284 if (capacity < in_core->md_size_sect)
3285 goto err;
3286 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3287 goto err;
3288
3289 /* should be aligned, and at least 32k */
3290 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3291 goto err;
3292
3293 /* should fit (for now: exactly) into the available on-disk space;
3294 * overflow prevention is in check_activity_log_stripe_size() above. */
3295 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3296 goto err;
3297
3298 /* again, should be aligned */
3299 if (in_core->bm_offset & 7)
3300 goto err;
3301
3302 /* FIXME check for device grow with flex external meta data? */
3303
3304 /* can the available bitmap space cover the last agreed device size? */
3305 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3306 goto err;
3307
3308 return 0;
3309
3310err:
3311 drbd_err(device, "meta data offsets don't make sense: idx=%d "
3312 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3313 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3314 in_core->meta_dev_idx,
3315 in_core->al_stripes, in_core->al_stripe_size_4k,
3316 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3317 (unsigned long long)in_core->la_size_sect,
3318 (unsigned long long)capacity);
3319
3320 return -EINVAL;
3321}
3322
3323
3324/**
3325 * drbd_md_read() - Reads in the meta data super block
3326 * @device: DRBD device.
3327 * @bdev: Device from which the meta data should be read in.
3328 *
3329 * Return NO_ERROR on success, and an enum drbd_ret_code in case
3330 * something goes wrong.
3331 *
3332 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3333 * even before @bdev is assigned to @device->ldev.
3334 */
3335int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3336{
3337 struct meta_data_on_disk *buffer;
3338 u32 magic, flags;
3339 int i, rv = NO_ERROR;
3340
3341 if (device->state.disk != D_DISKLESS)
3342 return ERR_DISK_CONFIGURED;
3343
3344 buffer = drbd_md_get_buffer(device, __func__);
3345 if (!buffer)
3346 return ERR_NOMEM;
3347
3348 /* First, figure out where our meta data superblock is located,
3349 * and read it. */
3350 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3351 bdev->md.md_offset = drbd_md_ss(bdev);
3352 /* Even for (flexible or indexed) external meta data,
3353 * initially restrict us to the 4k superblock for now.
3354 * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
3355 bdev->md.md_size_sect = 8;
3356
3357 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3358 REQ_OP_READ)) {
3359 /* NOTE: can't do normal error processing here as this is
3360 called BEFORE disk is attached */
3361 drbd_err(device, "Error while reading metadata.\n");
3362 rv = ERR_IO_MD_DISK;
3363 goto err;
3364 }
3365
3366 magic = be32_to_cpu(buffer->magic);
3367 flags = be32_to_cpu(buffer->flags);
3368 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3369 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3370 /* btw: that's Activity Log clean, not "all" clean. */
3371 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3372 rv = ERR_MD_UNCLEAN;
3373 goto err;
3374 }
3375
3376 rv = ERR_MD_INVALID;
3377 if (magic != DRBD_MD_MAGIC_08) {
3378 if (magic == DRBD_MD_MAGIC_07)
3379 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3380 else
3381 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3382 goto err;
3383 }
3384
3385 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3386 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3387 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3388 goto err;
3389 }
3390
3391
3392 /* convert to in_core endian */
3393 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3394 for (i = UI_CURRENT; i < UI_SIZE; i++)
3395 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3396 bdev->md.flags = be32_to_cpu(buffer->flags);
3397 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3398
3399 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3400 bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3401 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3402
3403 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3404 goto err;
3405 if (check_offsets_and_sizes(device, bdev))
3406 goto err;
3407
3408 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3409 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3410 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3411 goto err;
3412 }
3413 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3414 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3415 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3416 goto err;
3417 }
3418
3419 rv = NO_ERROR;
3420
3421 spin_lock_irq(&device->resource->req_lock);
3422 if (device->state.conn < C_CONNECTED) {
3423 unsigned int peer;
3424 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3425 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3426 device->peer_max_bio_size = peer;
3427 }
3428 spin_unlock_irq(&device->resource->req_lock);
3429
3430 err:
3431 drbd_md_put_buffer(device);
3432
3433 return rv;
3434}
3435
3436/**
3437 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3438 * @device: DRBD device.
3439 *
3440 * Call this function if you change anything that should be written to
3441 * the meta-data super block. This function sets MD_DIRTY, and starts a
3442 * timer that ensures that within five seconds you have to call drbd_md_sync().
3443 */
3444#ifdef DEBUG
3445void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
3446{
3447 if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3448 mod_timer(&device->md_sync_timer, jiffies + HZ);
3449 device->last_md_mark_dirty.line = line;
3450 device->last_md_mark_dirty.func = func;
3451 }
3452}
3453#else
3454void drbd_md_mark_dirty(struct drbd_device *device)
3455{
3456 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3457 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3458}
3459#endif
3460
3461void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3462{
3463 int i;
3464
3465 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3466 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3467}
3468
3469void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3470{
3471 if (idx == UI_CURRENT) {
3472 if (device->state.role == R_PRIMARY)
3473 val |= 1;
3474 else
3475 val &= ~((u64)1);
3476
3477 drbd_set_ed_uuid(device, val);
3478 }
3479
3480 device->ldev->md.uuid[idx] = val;
3481 drbd_md_mark_dirty(device);
3482}
3483
3484void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3485{
3486 unsigned long flags;
3487 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3488 __drbd_uuid_set(device, idx, val);
3489 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3490}
3491
3492void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3493{
3494 unsigned long flags;
3495 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3496 if (device->ldev->md.uuid[idx]) {
3497 drbd_uuid_move_history(device);
3498 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3499 }
3500 __drbd_uuid_set(device, idx, val);
3501 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3502}
3503
3504/**
3505 * drbd_uuid_new_current() - Creates a new current UUID
3506 * @device: DRBD device.
3507 *
3508 * Creates a new current UUID, and rotates the old current UUID into
3509 * the bitmap slot. Causes an incremental resync upon next connect.
3510 */
3511void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3512{
3513 u64 val;
3514 unsigned long long bm_uuid;
3515
3516 get_random_bytes(&val, sizeof(u64));
3517
3518 spin_lock_irq(&device->ldev->md.uuid_lock);
3519 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3520
3521 if (bm_uuid)
3522 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3523
3524 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3525 __drbd_uuid_set(device, UI_CURRENT, val);
3526 spin_unlock_irq(&device->ldev->md.uuid_lock);
3527
3528 drbd_print_uuids(device, "new current UUID");
3529 /* get it to stable storage _now_ */
3530 drbd_md_sync(device);
3531}
3532
3533void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3534{
3535 unsigned long flags;
3536 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3537 return;
3538
3539 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3540 if (val == 0) {
3541 drbd_uuid_move_history(device);
3542 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3543 device->ldev->md.uuid[UI_BITMAP] = 0;
3544 } else {
3545 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3546 if (bm_uuid)
3547 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3548
3549 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3550 }
3551 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3552
3553 drbd_md_mark_dirty(device);
3554}
3555
3556/**
3557 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3558 * @device: DRBD device.
3559 *
3560 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3561 */
3562int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
3563{
3564 int rv = -EIO;
3565
3566 drbd_md_set_flag(device, MDF_FULL_SYNC);
3567 drbd_md_sync(device);
3568 drbd_bm_set_all(device);
3569
3570 rv = drbd_bm_write(device);
3571
3572 if (!rv) {
3573 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3574 drbd_md_sync(device);
3575 }
3576
3577 return rv;
3578}
3579
3580/**
3581 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3582 * @device: DRBD device.
3583 *
3584 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3585 */
3586int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
3587{
3588 drbd_resume_al(device);
3589 drbd_bm_clear_all(device);
3590 return drbd_bm_write(device);
3591}
3592
3593static int w_bitmap_io(struct drbd_work *w, int unused)
3594{
3595 struct drbd_device *device =
3596 container_of(w, struct drbd_device, bm_io_work.w);
3597 struct bm_io_work *work = &device->bm_io_work;
3598 int rv = -EIO;
3599
3600 if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
3601 int cnt = atomic_read(&device->ap_bio_cnt);
3602 if (cnt)
3603 drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3604 cnt, work->why);
3605 }
3606
3607 if (get_ldev(device)) {
3608 drbd_bm_lock(device, work->why, work->flags);
3609 rv = work->io_fn(device);
3610 drbd_bm_unlock(device);
3611 put_ldev(device);
3612 }
3613
3614 clear_bit_unlock(BITMAP_IO, &device->flags);
3615 wake_up(&device->misc_wait);
3616
3617 if (work->done)
3618 work->done(device, rv);
3619
3620 clear_bit(BITMAP_IO_QUEUED, &device->flags);
3621 work->why = NULL;
3622 work->flags = 0;
3623
3624 return 0;
3625}
3626
3627/**
3628 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3629 * @device: DRBD device.
3630 * @io_fn: IO callback to be called when bitmap IO is possible
3631 * @done: callback to be called after the bitmap IO was performed
3632 * @why: Descriptive text of the reason for doing the IO
3633 *
3634 * While IO on the bitmap happens we freeze application IO thus we ensure
3635 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3636 * called from worker context. It MUST NOT be used while a previous such
3637 * work is still pending!
3638 *
3639 * Its worker function encloses the call of io_fn() by get_ldev() and
3640 * put_ldev().
3641 */
3642void drbd_queue_bitmap_io(struct drbd_device *device,
3643 int (*io_fn)(struct drbd_device *),
3644 void (*done)(struct drbd_device *, int),
3645 char *why, enum bm_flag flags)
3646{
3647 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3648
3649 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3650 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3651 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3652 if (device->bm_io_work.why)
3653 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3654 why, device->bm_io_work.why);
3655
3656 device->bm_io_work.io_fn = io_fn;
3657 device->bm_io_work.done = done;
3658 device->bm_io_work.why = why;
3659 device->bm_io_work.flags = flags;
3660
3661 spin_lock_irq(&device->resource->req_lock);
3662 set_bit(BITMAP_IO, &device->flags);
3663 /* don't wait for pending application IO if the caller indicates that
3664 * application IO does not conflict anyways. */
3665 if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3666 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3667 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3668 &device->bm_io_work.w);
3669 }
3670 spin_unlock_irq(&device->resource->req_lock);
3671}
3672
3673/**
3674 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3675 * @device: DRBD device.
3676 * @io_fn: IO callback to be called when bitmap IO is possible
3677 * @why: Descriptive text of the reason for doing the IO
3678 *
3679 * freezes application IO while that the actual IO operations runs. This
3680 * functions MAY NOT be called from worker context.
3681 */
3682int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
3683 char *why, enum bm_flag flags)
3684{
3685 /* Only suspend io, if some operation is supposed to be locked out */
3686 const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
3687 int rv;
3688
3689 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3690
3691 if (do_suspend_io)
3692 drbd_suspend_io(device);
3693
3694 drbd_bm_lock(device, why, flags);
3695 rv = io_fn(device);
3696 drbd_bm_unlock(device);
3697
3698 if (do_suspend_io)
3699 drbd_resume_io(device);
3700
3701 return rv;
3702}
3703
3704void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3705{
3706 if ((device->ldev->md.flags & flag) != flag) {
3707 drbd_md_mark_dirty(device);
3708 device->ldev->md.flags |= flag;
3709 }
3710}
3711
3712void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3713{
3714 if ((device->ldev->md.flags & flag) != 0) {
3715 drbd_md_mark_dirty(device);
3716 device->ldev->md.flags &= ~flag;
3717 }
3718}
3719int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3720{
3721 return (bdev->md.flags & flag) != 0;
3722}
3723
3724static void md_sync_timer_fn(unsigned long data)
3725{
3726 struct drbd_device *device = (struct drbd_device *) data;
3727 drbd_device_post_work(device, MD_SYNC);
3728}
3729
3730const char *cmdname(enum drbd_packet cmd)
3731{
3732 /* THINK may need to become several global tables
3733 * when we want to support more than
3734 * one PRO_VERSION */
3735 static const char *cmdnames[] = {
3736 [P_DATA] = "Data",
3737 [P_WSAME] = "WriteSame",
3738 [P_TRIM] = "Trim",
3739 [P_DATA_REPLY] = "DataReply",
3740 [P_RS_DATA_REPLY] = "RSDataReply",
3741 [P_BARRIER] = "Barrier",
3742 [P_BITMAP] = "ReportBitMap",
3743 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3744 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3745 [P_UNPLUG_REMOTE] = "UnplugRemote",
3746 [P_DATA_REQUEST] = "DataRequest",
3747 [P_RS_DATA_REQUEST] = "RSDataRequest",
3748 [P_SYNC_PARAM] = "SyncParam",
3749 [P_SYNC_PARAM89] = "SyncParam89",
3750 [P_PROTOCOL] = "ReportProtocol",
3751 [P_UUIDS] = "ReportUUIDs",
3752 [P_SIZES] = "ReportSizes",
3753 [P_STATE] = "ReportState",
3754 [P_SYNC_UUID] = "ReportSyncUUID",
3755 [P_AUTH_CHALLENGE] = "AuthChallenge",
3756 [P_AUTH_RESPONSE] = "AuthResponse",
3757 [P_PING] = "Ping",
3758 [P_PING_ACK] = "PingAck",
3759 [P_RECV_ACK] = "RecvAck",
3760 [P_WRITE_ACK] = "WriteAck",
3761 [P_RS_WRITE_ACK] = "RSWriteAck",
3762 [P_SUPERSEDED] = "Superseded",
3763 [P_NEG_ACK] = "NegAck",
3764 [P_NEG_DREPLY] = "NegDReply",
3765 [P_NEG_RS_DREPLY] = "NegRSDReply",
3766 [P_BARRIER_ACK] = "BarrierAck",
3767 [P_STATE_CHG_REQ] = "StateChgRequest",
3768 [P_STATE_CHG_REPLY] = "StateChgReply",
3769 [P_OV_REQUEST] = "OVRequest",
3770 [P_OV_REPLY] = "OVReply",
3771 [P_OV_RESULT] = "OVResult",
3772 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3773 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3774 [P_COMPRESSED_BITMAP] = "CBitmap",
3775 [P_DELAY_PROBE] = "DelayProbe",
3776 [P_OUT_OF_SYNC] = "OutOfSync",
3777 [P_RETRY_WRITE] = "RetryWrite",
3778 [P_RS_CANCEL] = "RSCancel",
3779 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3780 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
3781 [P_RETRY_WRITE] = "retry_write",
3782 [P_PROTOCOL_UPDATE] = "protocol_update",
3783 [P_RS_THIN_REQ] = "rs_thin_req",
3784 [P_RS_DEALLOCATED] = "rs_deallocated",
3785
3786 /* enum drbd_packet, but not commands - obsoleted flags:
3787 * P_MAY_IGNORE
3788 * P_MAX_OPT_CMD
3789 */
3790 };
3791
3792 /* too big for the array: 0xfffX */
3793 if (cmd == P_INITIAL_META)
3794 return "InitialMeta";
3795 if (cmd == P_INITIAL_DATA)
3796 return "InitialData";
3797 if (cmd == P_CONNECTION_FEATURES)
3798 return "ConnectionFeatures";
3799 if (cmd >= ARRAY_SIZE(cmdnames))
3800 return "Unknown";
3801 return cmdnames[cmd];
3802}
3803
3804/**
3805 * drbd_wait_misc - wait for a request to make progress
3806 * @device: device associated with the request
3807 * @i: the struct drbd_interval embedded in struct drbd_request or
3808 * struct drbd_peer_request
3809 */
3810int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3811{
3812 struct net_conf *nc;
3813 DEFINE_WAIT(wait);
3814 long timeout;
3815
3816 rcu_read_lock();
3817 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3818 if (!nc) {
3819 rcu_read_unlock();
3820 return -ETIMEDOUT;
3821 }
3822 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3823 rcu_read_unlock();
3824
3825 /* Indicate to wake up device->misc_wait on progress. */
3826 i->waiting = true;
3827 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3828 spin_unlock_irq(&device->resource->req_lock);
3829 timeout = schedule_timeout(timeout);
3830 finish_wait(&device->misc_wait, &wait);
3831 spin_lock_irq(&device->resource->req_lock);
3832 if (!timeout || device->state.conn < C_CONNECTED)
3833 return -ETIMEDOUT;
3834 if (signal_pending(current))
3835 return -ERESTARTSYS;
3836 return 0;
3837}
3838
3839void lock_all_resources(void)
3840{
3841 struct drbd_resource *resource;
3842 int __maybe_unused i = 0;
3843
3844 mutex_lock(&resources_mutex);
3845 local_irq_disable();
3846 for_each_resource(resource, &drbd_resources)
3847 spin_lock_nested(&resource->req_lock, i++);
3848}
3849
3850void unlock_all_resources(void)
3851{
3852 struct drbd_resource *resource;
3853
3854 for_each_resource(resource, &drbd_resources)
3855 spin_unlock(&resource->req_lock);
3856 local_irq_enable();
3857 mutex_unlock(&resources_mutex);
3858}
3859
3860#ifdef CONFIG_DRBD_FAULT_INJECTION
3861/* Fault insertion support including random number generator shamelessly
3862 * stolen from kernel/rcutorture.c */
3863struct fault_random_state {
3864 unsigned long state;
3865 unsigned long count;
3866};
3867
3868#define FAULT_RANDOM_MULT 39916801 /* prime */
3869#define FAULT_RANDOM_ADD 479001701 /* prime */
3870#define FAULT_RANDOM_REFRESH 10000
3871
3872/*
3873 * Crude but fast random-number generator. Uses a linear congruential
3874 * generator, with occasional help from get_random_bytes().
3875 */
3876static unsigned long
3877_drbd_fault_random(struct fault_random_state *rsp)
3878{
3879 long refresh;
3880
3881 if (!rsp->count--) {
3882 get_random_bytes(&refresh, sizeof(refresh));
3883 rsp->state += refresh;
3884 rsp->count = FAULT_RANDOM_REFRESH;
3885 }
3886 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3887 return swahw32(rsp->state);
3888}
3889
3890static char *
3891_drbd_fault_str(unsigned int type) {
3892 static char *_faults[] = {
3893 [DRBD_FAULT_MD_WR] = "Meta-data write",
3894 [DRBD_FAULT_MD_RD] = "Meta-data read",
3895 [DRBD_FAULT_RS_WR] = "Resync write",
3896 [DRBD_FAULT_RS_RD] = "Resync read",
3897 [DRBD_FAULT_DT_WR] = "Data write",
3898 [DRBD_FAULT_DT_RD] = "Data read",
3899 [DRBD_FAULT_DT_RA] = "Data read ahead",
3900 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3901 [DRBD_FAULT_AL_EE] = "EE allocation",
3902 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3903 };
3904
3905 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3906}
3907
3908unsigned int
3909_drbd_insert_fault(struct drbd_device *device, unsigned int type)
3910{
3911 static struct fault_random_state rrs = {0, 0};
3912
3913 unsigned int ret = (
3914 (drbd_fault_devs == 0 ||
3915 ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
3916 (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
3917
3918 if (ret) {
3919 drbd_fault_count++;
3920
3921 if (__ratelimit(&drbd_ratelimit_state))
3922 drbd_warn(device, "***Simulating %s failure\n",
3923 _drbd_fault_str(type));
3924 }
3925
3926 return ret;
3927}
3928#endif
3929
3930const char *drbd_buildtag(void)
3931{
3932 /* DRBD built from external sources has here a reference to the
3933 git hash of the source code. */
3934
3935 static char buildtag[38] = "\0uilt-in";
3936
3937 if (buildtag[0] == 0) {
3938#ifdef MODULE
3939 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3940#else
3941 buildtag[0] = 'b';
3942#endif
3943 }
3944
3945 return buildtag;
3946}
3947
3948module_init(drbd_init)
3949module_exit(drbd_cleanup)
3950
3951EXPORT_SYMBOL(drbd_conn_str);
3952EXPORT_SYMBOL(drbd_role_str);
3953EXPORT_SYMBOL(drbd_disk_str);
3954EXPORT_SYMBOL(drbd_set_st_err_str);