Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 drbd.c
4
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10
11 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
12 from Logicworks, Inc. for making SDP replication support possible.
13
14
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h>
20#include <linux/jiffies.h>
21#include <linux/drbd.h>
22#include <linux/uaccess.h>
23#include <asm/types.h>
24#include <net/sock.h>
25#include <linux/ctype.h>
26#include <linux/mutex.h>
27#include <linux/fs.h>
28#include <linux/file.h>
29#include <linux/proc_fs.h>
30#include <linux/init.h>
31#include <linux/mm.h>
32#include <linux/memcontrol.h>
33#include <linux/mm_inline.h>
34#include <linux/slab.h>
35#include <linux/random.h>
36#include <linux/reboot.h>
37#include <linux/notifier.h>
38#include <linux/kthread.h>
39#include <linux/workqueue.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/sched/signal.h>
44
45#include <linux/drbd_limits.h>
46#include "drbd_int.h"
47#include "drbd_protocol.h"
48#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
49#include "drbd_vli.h"
50#include "drbd_debugfs.h"
51
52static DEFINE_MUTEX(drbd_main_mutex);
53static int drbd_open(struct block_device *bdev, fmode_t mode);
54static void drbd_release(struct gendisk *gd, fmode_t mode);
55static void md_sync_timer_fn(struct timer_list *t);
56static int w_bitmap_io(struct drbd_work *w, int unused);
57
58MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
59 "Lars Ellenberg <lars@linbit.com>");
60MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
61MODULE_VERSION(REL_VERSION);
62MODULE_LICENSE("GPL");
63MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
64 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
65MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
66
67#include <linux/moduleparam.h>
68/* thanks to these macros, if compiled into the kernel (not-module),
69 * these become boot parameters (e.g., drbd.minor_count) */
70
71#ifdef CONFIG_DRBD_FAULT_INJECTION
72int drbd_enable_faults;
73int drbd_fault_rate;
74static int drbd_fault_count;
75static int drbd_fault_devs;
76/* bitmap of enabled faults */
77module_param_named(enable_faults, drbd_enable_faults, int, 0664);
78/* fault rate % value - applies to all enabled faults */
79module_param_named(fault_rate, drbd_fault_rate, int, 0664);
80/* count of faults inserted */
81module_param_named(fault_count, drbd_fault_count, int, 0664);
82/* bitmap of devices to insert faults on */
83module_param_named(fault_devs, drbd_fault_devs, int, 0644);
84#endif
85
86/* module parameters we can keep static */
87static bool drbd_allow_oos; /* allow_open_on_secondary */
88static bool drbd_disable_sendpage;
89MODULE_PARM_DESC(allow_oos, "DONT USE!");
90module_param_named(allow_oos, drbd_allow_oos, bool, 0);
91module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
92
93/* module parameters we share */
94int drbd_proc_details; /* Detail level in proc drbd*/
95module_param_named(proc_details, drbd_proc_details, int, 0644);
96/* module parameters shared with defaults */
97unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
98/* Module parameter for setting the user mode helper program
99 * to run. Default is /sbin/drbdadm */
100char drbd_usermode_helper[80] = "/sbin/drbdadm";
101module_param_named(minor_count, drbd_minor_count, uint, 0444);
102module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
103
104/* in 2.6.x, our device mapping and config info contains our virtual gendisks
105 * as member "struct gendisk *vdisk;"
106 */
107struct idr drbd_devices;
108struct list_head drbd_resources;
109struct mutex resources_mutex;
110
111struct kmem_cache *drbd_request_cache;
112struct kmem_cache *drbd_ee_cache; /* peer requests */
113struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
114struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
115mempool_t drbd_request_mempool;
116mempool_t drbd_ee_mempool;
117mempool_t drbd_md_io_page_pool;
118struct bio_set drbd_md_io_bio_set;
119struct bio_set drbd_io_bio_set;
120
121/* I do not use a standard mempool, because:
122 1) I want to hand out the pre-allocated objects first.
123 2) I want to be able to interrupt sleeping allocation with a signal.
124 Note: This is a single linked list, the next pointer is the private
125 member of struct page.
126 */
127struct page *drbd_pp_pool;
128DEFINE_SPINLOCK(drbd_pp_lock);
129int drbd_pp_vacant;
130wait_queue_head_t drbd_pp_wait;
131
132DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
133
134static const struct block_device_operations drbd_ops = {
135 .owner = THIS_MODULE,
136 .submit_bio = drbd_submit_bio,
137 .open = drbd_open,
138 .release = drbd_release,
139};
140
141#ifdef __CHECKER__
142/* When checking with sparse, and this is an inline function, sparse will
143 give tons of false positives. When this is a real functions sparse works.
144 */
145int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
146{
147 int io_allowed;
148
149 atomic_inc(&device->local_cnt);
150 io_allowed = (device->state.disk >= mins);
151 if (!io_allowed) {
152 if (atomic_dec_and_test(&device->local_cnt))
153 wake_up(&device->misc_wait);
154 }
155 return io_allowed;
156}
157
158#endif
159
160/**
161 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
162 * @connection: DRBD connection.
163 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
164 * @set_size: Expected number of requests before that barrier.
165 *
166 * In case the passed barrier_nr or set_size does not match the oldest
167 * epoch of not yet barrier-acked requests, this function will cause a
168 * termination of the connection.
169 */
170void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
171 unsigned int set_size)
172{
173 struct drbd_request *r;
174 struct drbd_request *req = NULL, *tmp = NULL;
175 int expect_epoch = 0;
176 int expect_size = 0;
177
178 spin_lock_irq(&connection->resource->req_lock);
179
180 /* find oldest not yet barrier-acked write request,
181 * count writes in its epoch. */
182 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
183 const unsigned s = r->rq_state;
184 if (!req) {
185 if (!(s & RQ_WRITE))
186 continue;
187 if (!(s & RQ_NET_MASK))
188 continue;
189 if (s & RQ_NET_DONE)
190 continue;
191 req = r;
192 expect_epoch = req->epoch;
193 expect_size ++;
194 } else {
195 if (r->epoch != expect_epoch)
196 break;
197 if (!(s & RQ_WRITE))
198 continue;
199 /* if (s & RQ_DONE): not expected */
200 /* if (!(s & RQ_NET_MASK)): not expected */
201 expect_size++;
202 }
203 }
204
205 /* first some paranoia code */
206 if (req == NULL) {
207 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
208 barrier_nr);
209 goto bail;
210 }
211 if (expect_epoch != barrier_nr) {
212 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
213 barrier_nr, expect_epoch);
214 goto bail;
215 }
216
217 if (expect_size != set_size) {
218 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
219 barrier_nr, set_size, expect_size);
220 goto bail;
221 }
222
223 /* Clean up list of requests processed during current epoch. */
224 /* this extra list walk restart is paranoia,
225 * to catch requests being barrier-acked "unexpectedly".
226 * It usually should find the same req again, or some READ preceding it. */
227 list_for_each_entry(req, &connection->transfer_log, tl_requests)
228 if (req->epoch == expect_epoch) {
229 tmp = req;
230 break;
231 }
232 req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
233 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
234 struct drbd_peer_device *peer_device;
235 if (req->epoch != expect_epoch)
236 break;
237 peer_device = conn_peer_device(connection, req->device->vnr);
238 _req_mod(req, BARRIER_ACKED, peer_device);
239 }
240 spin_unlock_irq(&connection->resource->req_lock);
241
242 return;
243
244bail:
245 spin_unlock_irq(&connection->resource->req_lock);
246 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
247}
248
249
250/**
251 * _tl_restart() - Walks the transfer log, and applies an action to all requests
252 * @connection: DRBD connection to operate on.
253 * @what: The action/event to perform with all request objects
254 *
255 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
256 * RESTART_FROZEN_DISK_IO.
257 */
258/* must hold resource->req_lock */
259void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
260{
261 struct drbd_peer_device *peer_device;
262 struct drbd_request *req, *r;
263
264 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
265 peer_device = conn_peer_device(connection, req->device->vnr);
266 _req_mod(req, what, peer_device);
267 }
268}
269
270void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
271{
272 spin_lock_irq(&connection->resource->req_lock);
273 _tl_restart(connection, what);
274 spin_unlock_irq(&connection->resource->req_lock);
275}
276
277/**
278 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
279 * @connection: DRBD connection.
280 *
281 * This is called after the connection to the peer was lost. The storage covered
282 * by the requests on the transfer gets marked as our of sync. Called from the
283 * receiver thread and the worker thread.
284 */
285void tl_clear(struct drbd_connection *connection)
286{
287 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
288}
289
290/**
291 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
292 * @device: DRBD device.
293 */
294void tl_abort_disk_io(struct drbd_device *device)
295{
296 struct drbd_connection *connection = first_peer_device(device)->connection;
297 struct drbd_request *req, *r;
298
299 spin_lock_irq(&connection->resource->req_lock);
300 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
301 if (!(req->rq_state & RQ_LOCAL_PENDING))
302 continue;
303 if (req->device != device)
304 continue;
305 _req_mod(req, ABORT_DISK_IO, NULL);
306 }
307 spin_unlock_irq(&connection->resource->req_lock);
308}
309
310static int drbd_thread_setup(void *arg)
311{
312 struct drbd_thread *thi = (struct drbd_thread *) arg;
313 struct drbd_resource *resource = thi->resource;
314 unsigned long flags;
315 int retval;
316
317 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
318 thi->name[0],
319 resource->name);
320
321 allow_kernel_signal(DRBD_SIGKILL);
322 allow_kernel_signal(SIGXCPU);
323restart:
324 retval = thi->function(thi);
325
326 spin_lock_irqsave(&thi->t_lock, flags);
327
328 /* if the receiver has been "EXITING", the last thing it did
329 * was set the conn state to "StandAlone",
330 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
331 * and receiver thread will be "started".
332 * drbd_thread_start needs to set "RESTARTING" in that case.
333 * t_state check and assignment needs to be within the same spinlock,
334 * so either thread_start sees EXITING, and can remap to RESTARTING,
335 * or thread_start see NONE, and can proceed as normal.
336 */
337
338 if (thi->t_state == RESTARTING) {
339 drbd_info(resource, "Restarting %s thread\n", thi->name);
340 thi->t_state = RUNNING;
341 spin_unlock_irqrestore(&thi->t_lock, flags);
342 goto restart;
343 }
344
345 thi->task = NULL;
346 thi->t_state = NONE;
347 smp_mb();
348 complete_all(&thi->stop);
349 spin_unlock_irqrestore(&thi->t_lock, flags);
350
351 drbd_info(resource, "Terminating %s\n", current->comm);
352
353 /* Release mod reference taken when thread was started */
354
355 if (thi->connection)
356 kref_put(&thi->connection->kref, drbd_destroy_connection);
357 kref_put(&resource->kref, drbd_destroy_resource);
358 module_put(THIS_MODULE);
359 return retval;
360}
361
362static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
363 int (*func) (struct drbd_thread *), const char *name)
364{
365 spin_lock_init(&thi->t_lock);
366 thi->task = NULL;
367 thi->t_state = NONE;
368 thi->function = func;
369 thi->resource = resource;
370 thi->connection = NULL;
371 thi->name = name;
372}
373
374int drbd_thread_start(struct drbd_thread *thi)
375{
376 struct drbd_resource *resource = thi->resource;
377 struct task_struct *nt;
378 unsigned long flags;
379
380 /* is used from state engine doing drbd_thread_stop_nowait,
381 * while holding the req lock irqsave */
382 spin_lock_irqsave(&thi->t_lock, flags);
383
384 switch (thi->t_state) {
385 case NONE:
386 drbd_info(resource, "Starting %s thread (from %s [%d])\n",
387 thi->name, current->comm, current->pid);
388
389 /* Get ref on module for thread - this is released when thread exits */
390 if (!try_module_get(THIS_MODULE)) {
391 drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
392 spin_unlock_irqrestore(&thi->t_lock, flags);
393 return false;
394 }
395
396 kref_get(&resource->kref);
397 if (thi->connection)
398 kref_get(&thi->connection->kref);
399
400 init_completion(&thi->stop);
401 thi->reset_cpu_mask = 1;
402 thi->t_state = RUNNING;
403 spin_unlock_irqrestore(&thi->t_lock, flags);
404 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
405
406 nt = kthread_create(drbd_thread_setup, (void *) thi,
407 "drbd_%c_%s", thi->name[0], thi->resource->name);
408
409 if (IS_ERR(nt)) {
410 drbd_err(resource, "Couldn't start thread\n");
411
412 if (thi->connection)
413 kref_put(&thi->connection->kref, drbd_destroy_connection);
414 kref_put(&resource->kref, drbd_destroy_resource);
415 module_put(THIS_MODULE);
416 return false;
417 }
418 spin_lock_irqsave(&thi->t_lock, flags);
419 thi->task = nt;
420 thi->t_state = RUNNING;
421 spin_unlock_irqrestore(&thi->t_lock, flags);
422 wake_up_process(nt);
423 break;
424 case EXITING:
425 thi->t_state = RESTARTING;
426 drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
427 thi->name, current->comm, current->pid);
428 fallthrough;
429 case RUNNING:
430 case RESTARTING:
431 default:
432 spin_unlock_irqrestore(&thi->t_lock, flags);
433 break;
434 }
435
436 return true;
437}
438
439
440void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
441{
442 unsigned long flags;
443
444 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
445
446 /* may be called from state engine, holding the req lock irqsave */
447 spin_lock_irqsave(&thi->t_lock, flags);
448
449 if (thi->t_state == NONE) {
450 spin_unlock_irqrestore(&thi->t_lock, flags);
451 if (restart)
452 drbd_thread_start(thi);
453 return;
454 }
455
456 if (thi->t_state != ns) {
457 if (thi->task == NULL) {
458 spin_unlock_irqrestore(&thi->t_lock, flags);
459 return;
460 }
461
462 thi->t_state = ns;
463 smp_mb();
464 init_completion(&thi->stop);
465 if (thi->task != current)
466 send_sig(DRBD_SIGKILL, thi->task, 1);
467 }
468
469 spin_unlock_irqrestore(&thi->t_lock, flags);
470
471 if (wait)
472 wait_for_completion(&thi->stop);
473}
474
475int conn_lowest_minor(struct drbd_connection *connection)
476{
477 struct drbd_peer_device *peer_device;
478 int vnr = 0, minor = -1;
479
480 rcu_read_lock();
481 peer_device = idr_get_next(&connection->peer_devices, &vnr);
482 if (peer_device)
483 minor = device_to_minor(peer_device->device);
484 rcu_read_unlock();
485
486 return minor;
487}
488
489#ifdef CONFIG_SMP
490/*
491 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
492 *
493 * Forces all threads of a resource onto the same CPU. This is beneficial for
494 * DRBD's performance. May be overwritten by user's configuration.
495 */
496static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
497{
498 unsigned int *resources_per_cpu, min_index = ~0;
499
500 resources_per_cpu = kcalloc(nr_cpu_ids, sizeof(*resources_per_cpu),
501 GFP_KERNEL);
502 if (resources_per_cpu) {
503 struct drbd_resource *resource;
504 unsigned int cpu, min = ~0;
505
506 rcu_read_lock();
507 for_each_resource_rcu(resource, &drbd_resources) {
508 for_each_cpu(cpu, resource->cpu_mask)
509 resources_per_cpu[cpu]++;
510 }
511 rcu_read_unlock();
512 for_each_online_cpu(cpu) {
513 if (resources_per_cpu[cpu] < min) {
514 min = resources_per_cpu[cpu];
515 min_index = cpu;
516 }
517 }
518 kfree(resources_per_cpu);
519 }
520 if (min_index == ~0) {
521 cpumask_setall(*cpu_mask);
522 return;
523 }
524 cpumask_set_cpu(min_index, *cpu_mask);
525}
526
527/**
528 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
529 * @thi: drbd_thread object
530 *
531 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
532 * prematurely.
533 */
534void drbd_thread_current_set_cpu(struct drbd_thread *thi)
535{
536 struct drbd_resource *resource = thi->resource;
537 struct task_struct *p = current;
538
539 if (!thi->reset_cpu_mask)
540 return;
541 thi->reset_cpu_mask = 0;
542 set_cpus_allowed_ptr(p, resource->cpu_mask);
543}
544#else
545#define drbd_calc_cpu_mask(A) ({})
546#endif
547
548/*
549 * drbd_header_size - size of a packet header
550 *
551 * The header size is a multiple of 8, so any payload following the header is
552 * word aligned on 64-bit architectures. (The bitmap send and receive code
553 * relies on this.)
554 */
555unsigned int drbd_header_size(struct drbd_connection *connection)
556{
557 if (connection->agreed_pro_version >= 100) {
558 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
559 return sizeof(struct p_header100);
560 } else {
561 BUILD_BUG_ON(sizeof(struct p_header80) !=
562 sizeof(struct p_header95));
563 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
564 return sizeof(struct p_header80);
565 }
566}
567
568static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
569{
570 h->magic = cpu_to_be32(DRBD_MAGIC);
571 h->command = cpu_to_be16(cmd);
572 h->length = cpu_to_be16(size);
573 return sizeof(struct p_header80);
574}
575
576static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
577{
578 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
579 h->command = cpu_to_be16(cmd);
580 h->length = cpu_to_be32(size);
581 return sizeof(struct p_header95);
582}
583
584static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
585 int size, int vnr)
586{
587 h->magic = cpu_to_be32(DRBD_MAGIC_100);
588 h->volume = cpu_to_be16(vnr);
589 h->command = cpu_to_be16(cmd);
590 h->length = cpu_to_be32(size);
591 h->pad = 0;
592 return sizeof(struct p_header100);
593}
594
595static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
596 void *buffer, enum drbd_packet cmd, int size)
597{
598 if (connection->agreed_pro_version >= 100)
599 return prepare_header100(buffer, cmd, size, vnr);
600 else if (connection->agreed_pro_version >= 95 &&
601 size > DRBD_MAX_SIZE_H80_PACKET)
602 return prepare_header95(buffer, cmd, size);
603 else
604 return prepare_header80(buffer, cmd, size);
605}
606
607static void *__conn_prepare_command(struct drbd_connection *connection,
608 struct drbd_socket *sock)
609{
610 if (!sock->socket)
611 return NULL;
612 return sock->sbuf + drbd_header_size(connection);
613}
614
615void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
616{
617 void *p;
618
619 mutex_lock(&sock->mutex);
620 p = __conn_prepare_command(connection, sock);
621 if (!p)
622 mutex_unlock(&sock->mutex);
623
624 return p;
625}
626
627void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
628{
629 return conn_prepare_command(peer_device->connection, sock);
630}
631
632static int __send_command(struct drbd_connection *connection, int vnr,
633 struct drbd_socket *sock, enum drbd_packet cmd,
634 unsigned int header_size, void *data,
635 unsigned int size)
636{
637 int msg_flags;
638 int err;
639
640 /*
641 * Called with @data == NULL and the size of the data blocks in @size
642 * for commands that send data blocks. For those commands, omit the
643 * MSG_MORE flag: this will increase the likelihood that data blocks
644 * which are page aligned on the sender will end up page aligned on the
645 * receiver.
646 */
647 msg_flags = data ? MSG_MORE : 0;
648
649 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
650 header_size + size);
651 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
652 msg_flags);
653 if (data && !err)
654 err = drbd_send_all(connection, sock->socket, data, size, 0);
655 /* DRBD protocol "pings" are latency critical.
656 * This is supposed to trigger tcp_push_pending_frames() */
657 if (!err && (cmd == P_PING || cmd == P_PING_ACK))
658 tcp_sock_set_nodelay(sock->socket->sk);
659
660 return err;
661}
662
663static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
664 enum drbd_packet cmd, unsigned int header_size,
665 void *data, unsigned int size)
666{
667 return __send_command(connection, 0, sock, cmd, header_size, data, size);
668}
669
670int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
671 enum drbd_packet cmd, unsigned int header_size,
672 void *data, unsigned int size)
673{
674 int err;
675
676 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
677 mutex_unlock(&sock->mutex);
678 return err;
679}
680
681int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
682 enum drbd_packet cmd, unsigned int header_size,
683 void *data, unsigned int size)
684{
685 int err;
686
687 err = __send_command(peer_device->connection, peer_device->device->vnr,
688 sock, cmd, header_size, data, size);
689 mutex_unlock(&sock->mutex);
690 return err;
691}
692
693int drbd_send_ping(struct drbd_connection *connection)
694{
695 struct drbd_socket *sock;
696
697 sock = &connection->meta;
698 if (!conn_prepare_command(connection, sock))
699 return -EIO;
700 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
701}
702
703int drbd_send_ping_ack(struct drbd_connection *connection)
704{
705 struct drbd_socket *sock;
706
707 sock = &connection->meta;
708 if (!conn_prepare_command(connection, sock))
709 return -EIO;
710 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
711}
712
713int drbd_send_sync_param(struct drbd_peer_device *peer_device)
714{
715 struct drbd_socket *sock;
716 struct p_rs_param_95 *p;
717 int size;
718 const int apv = peer_device->connection->agreed_pro_version;
719 enum drbd_packet cmd;
720 struct net_conf *nc;
721 struct disk_conf *dc;
722
723 sock = &peer_device->connection->data;
724 p = drbd_prepare_command(peer_device, sock);
725 if (!p)
726 return -EIO;
727
728 rcu_read_lock();
729 nc = rcu_dereference(peer_device->connection->net_conf);
730
731 size = apv <= 87 ? sizeof(struct p_rs_param)
732 : apv == 88 ? sizeof(struct p_rs_param)
733 + strlen(nc->verify_alg) + 1
734 : apv <= 94 ? sizeof(struct p_rs_param_89)
735 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
736
737 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
738
739 /* initialize verify_alg and csums_alg */
740 BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
741 memset(&p->algs, 0, sizeof(p->algs));
742
743 if (get_ldev(peer_device->device)) {
744 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
745 p->resync_rate = cpu_to_be32(dc->resync_rate);
746 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
747 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
748 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
749 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
750 put_ldev(peer_device->device);
751 } else {
752 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
753 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
754 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
755 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
756 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
757 }
758
759 if (apv >= 88)
760 strcpy(p->verify_alg, nc->verify_alg);
761 if (apv >= 89)
762 strcpy(p->csums_alg, nc->csums_alg);
763 rcu_read_unlock();
764
765 return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
766}
767
768int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
769{
770 struct drbd_socket *sock;
771 struct p_protocol *p;
772 struct net_conf *nc;
773 int size, cf;
774
775 sock = &connection->data;
776 p = __conn_prepare_command(connection, sock);
777 if (!p)
778 return -EIO;
779
780 rcu_read_lock();
781 nc = rcu_dereference(connection->net_conf);
782
783 if (nc->tentative && connection->agreed_pro_version < 92) {
784 rcu_read_unlock();
785 drbd_err(connection, "--dry-run is not supported by peer");
786 return -EOPNOTSUPP;
787 }
788
789 size = sizeof(*p);
790 if (connection->agreed_pro_version >= 87)
791 size += strlen(nc->integrity_alg) + 1;
792
793 p->protocol = cpu_to_be32(nc->wire_protocol);
794 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
795 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
796 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
797 p->two_primaries = cpu_to_be32(nc->two_primaries);
798 cf = 0;
799 if (nc->discard_my_data)
800 cf |= CF_DISCARD_MY_DATA;
801 if (nc->tentative)
802 cf |= CF_DRY_RUN;
803 p->conn_flags = cpu_to_be32(cf);
804
805 if (connection->agreed_pro_version >= 87)
806 strcpy(p->integrity_alg, nc->integrity_alg);
807 rcu_read_unlock();
808
809 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
810}
811
812int drbd_send_protocol(struct drbd_connection *connection)
813{
814 int err;
815
816 mutex_lock(&connection->data.mutex);
817 err = __drbd_send_protocol(connection, P_PROTOCOL);
818 mutex_unlock(&connection->data.mutex);
819
820 return err;
821}
822
823static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
824{
825 struct drbd_device *device = peer_device->device;
826 struct drbd_socket *sock;
827 struct p_uuids *p;
828 int i;
829
830 if (!get_ldev_if_state(device, D_NEGOTIATING))
831 return 0;
832
833 sock = &peer_device->connection->data;
834 p = drbd_prepare_command(peer_device, sock);
835 if (!p) {
836 put_ldev(device);
837 return -EIO;
838 }
839 spin_lock_irq(&device->ldev->md.uuid_lock);
840 for (i = UI_CURRENT; i < UI_SIZE; i++)
841 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
842 spin_unlock_irq(&device->ldev->md.uuid_lock);
843
844 device->comm_bm_set = drbd_bm_total_weight(device);
845 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
846 rcu_read_lock();
847 uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
848 rcu_read_unlock();
849 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
850 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
851 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
852
853 put_ldev(device);
854 return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
855}
856
857int drbd_send_uuids(struct drbd_peer_device *peer_device)
858{
859 return _drbd_send_uuids(peer_device, 0);
860}
861
862int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
863{
864 return _drbd_send_uuids(peer_device, 8);
865}
866
867void drbd_print_uuids(struct drbd_device *device, const char *text)
868{
869 if (get_ldev_if_state(device, D_NEGOTIATING)) {
870 u64 *uuid = device->ldev->md.uuid;
871 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
872 text,
873 (unsigned long long)uuid[UI_CURRENT],
874 (unsigned long long)uuid[UI_BITMAP],
875 (unsigned long long)uuid[UI_HISTORY_START],
876 (unsigned long long)uuid[UI_HISTORY_END]);
877 put_ldev(device);
878 } else {
879 drbd_info(device, "%s effective data uuid: %016llX\n",
880 text,
881 (unsigned long long)device->ed_uuid);
882 }
883}
884
885void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
886{
887 struct drbd_device *device = peer_device->device;
888 struct drbd_socket *sock;
889 struct p_rs_uuid *p;
890 u64 uuid;
891
892 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
893
894 uuid = device->ldev->md.uuid[UI_BITMAP];
895 if (uuid && uuid != UUID_JUST_CREATED)
896 uuid = uuid + UUID_NEW_BM_OFFSET;
897 else
898 get_random_bytes(&uuid, sizeof(u64));
899 drbd_uuid_set(device, UI_BITMAP, uuid);
900 drbd_print_uuids(device, "updated sync UUID");
901 drbd_md_sync(device);
902
903 sock = &peer_device->connection->data;
904 p = drbd_prepare_command(peer_device, sock);
905 if (p) {
906 p->uuid = cpu_to_be64(uuid);
907 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
908 }
909}
910
911int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
912{
913 struct drbd_device *device = peer_device->device;
914 struct drbd_socket *sock;
915 struct p_sizes *p;
916 sector_t d_size, u_size;
917 int q_order_type;
918 unsigned int max_bio_size;
919 unsigned int packet_size;
920
921 sock = &peer_device->connection->data;
922 p = drbd_prepare_command(peer_device, sock);
923 if (!p)
924 return -EIO;
925
926 packet_size = sizeof(*p);
927 if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
928 packet_size += sizeof(p->qlim[0]);
929
930 memset(p, 0, packet_size);
931 if (get_ldev_if_state(device, D_NEGOTIATING)) {
932 struct block_device *bdev = device->ldev->backing_bdev;
933 struct request_queue *q = bdev_get_queue(bdev);
934
935 d_size = drbd_get_max_capacity(device->ldev);
936 rcu_read_lock();
937 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
938 rcu_read_unlock();
939 q_order_type = drbd_queue_order_type(device);
940 max_bio_size = queue_max_hw_sectors(q) << 9;
941 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
942 p->qlim->physical_block_size =
943 cpu_to_be32(bdev_physical_block_size(bdev));
944 p->qlim->logical_block_size =
945 cpu_to_be32(bdev_logical_block_size(bdev));
946 p->qlim->alignment_offset =
947 cpu_to_be32(bdev_alignment_offset(bdev));
948 p->qlim->io_min = cpu_to_be32(bdev_io_min(bdev));
949 p->qlim->io_opt = cpu_to_be32(bdev_io_opt(bdev));
950 p->qlim->discard_enabled = !!bdev_max_discard_sectors(bdev);
951 put_ldev(device);
952 } else {
953 struct request_queue *q = device->rq_queue;
954
955 p->qlim->physical_block_size =
956 cpu_to_be32(queue_physical_block_size(q));
957 p->qlim->logical_block_size =
958 cpu_to_be32(queue_logical_block_size(q));
959 p->qlim->alignment_offset = 0;
960 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
961 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
962 p->qlim->discard_enabled = 0;
963
964 d_size = 0;
965 u_size = 0;
966 q_order_type = QUEUE_ORDERED_NONE;
967 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
968 }
969
970 if (peer_device->connection->agreed_pro_version <= 94)
971 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
972 else if (peer_device->connection->agreed_pro_version < 100)
973 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
974
975 p->d_size = cpu_to_be64(d_size);
976 p->u_size = cpu_to_be64(u_size);
977 if (trigger_reply)
978 p->c_size = 0;
979 else
980 p->c_size = cpu_to_be64(get_capacity(device->vdisk));
981 p->max_bio_size = cpu_to_be32(max_bio_size);
982 p->queue_order_type = cpu_to_be16(q_order_type);
983 p->dds_flags = cpu_to_be16(flags);
984
985 return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
986}
987
988/**
989 * drbd_send_current_state() - Sends the drbd state to the peer
990 * @peer_device: DRBD peer device.
991 */
992int drbd_send_current_state(struct drbd_peer_device *peer_device)
993{
994 struct drbd_socket *sock;
995 struct p_state *p;
996
997 sock = &peer_device->connection->data;
998 p = drbd_prepare_command(peer_device, sock);
999 if (!p)
1000 return -EIO;
1001 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
1002 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1003}
1004
1005/**
1006 * drbd_send_state() - After a state change, sends the new state to the peer
1007 * @peer_device: DRBD peer device.
1008 * @state: the state to send, not necessarily the current state.
1009 *
1010 * Each state change queues an "after_state_ch" work, which will eventually
1011 * send the resulting new state to the peer. If more state changes happen
1012 * between queuing and processing of the after_state_ch work, we still
1013 * want to send each intermediary state in the order it occurred.
1014 */
1015int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
1016{
1017 struct drbd_socket *sock;
1018 struct p_state *p;
1019
1020 sock = &peer_device->connection->data;
1021 p = drbd_prepare_command(peer_device, sock);
1022 if (!p)
1023 return -EIO;
1024 p->state = cpu_to_be32(state.i); /* Within the send mutex */
1025 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1026}
1027
1028int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
1029{
1030 struct drbd_socket *sock;
1031 struct p_req_state *p;
1032
1033 sock = &peer_device->connection->data;
1034 p = drbd_prepare_command(peer_device, sock);
1035 if (!p)
1036 return -EIO;
1037 p->mask = cpu_to_be32(mask.i);
1038 p->val = cpu_to_be32(val.i);
1039 return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1040}
1041
1042int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1043{
1044 enum drbd_packet cmd;
1045 struct drbd_socket *sock;
1046 struct p_req_state *p;
1047
1048 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1049 sock = &connection->data;
1050 p = conn_prepare_command(connection, sock);
1051 if (!p)
1052 return -EIO;
1053 p->mask = cpu_to_be32(mask.i);
1054 p->val = cpu_to_be32(val.i);
1055 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1056}
1057
1058void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
1059{
1060 struct drbd_socket *sock;
1061 struct p_req_state_reply *p;
1062
1063 sock = &peer_device->connection->meta;
1064 p = drbd_prepare_command(peer_device, sock);
1065 if (p) {
1066 p->retcode = cpu_to_be32(retcode);
1067 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1068 }
1069}
1070
1071void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1072{
1073 struct drbd_socket *sock;
1074 struct p_req_state_reply *p;
1075 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1076
1077 sock = &connection->meta;
1078 p = conn_prepare_command(connection, sock);
1079 if (p) {
1080 p->retcode = cpu_to_be32(retcode);
1081 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1082 }
1083}
1084
1085static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1086{
1087 BUG_ON(code & ~0xf);
1088 p->encoding = (p->encoding & ~0xf) | code;
1089}
1090
1091static void dcbp_set_start(struct p_compressed_bm *p, int set)
1092{
1093 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1094}
1095
1096static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1097{
1098 BUG_ON(n & ~0x7);
1099 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1100}
1101
1102static int fill_bitmap_rle_bits(struct drbd_device *device,
1103 struct p_compressed_bm *p,
1104 unsigned int size,
1105 struct bm_xfer_ctx *c)
1106{
1107 struct bitstream bs;
1108 unsigned long plain_bits;
1109 unsigned long tmp;
1110 unsigned long rl;
1111 unsigned len;
1112 unsigned toggle;
1113 int bits, use_rle;
1114
1115 /* may we use this feature? */
1116 rcu_read_lock();
1117 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1118 rcu_read_unlock();
1119 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1120 return 0;
1121
1122 if (c->bit_offset >= c->bm_bits)
1123 return 0; /* nothing to do. */
1124
1125 /* use at most thus many bytes */
1126 bitstream_init(&bs, p->code, size, 0);
1127 memset(p->code, 0, size);
1128 /* plain bits covered in this code string */
1129 plain_bits = 0;
1130
1131 /* p->encoding & 0x80 stores whether the first run length is set.
1132 * bit offset is implicit.
1133 * start with toggle == 2 to be able to tell the first iteration */
1134 toggle = 2;
1135
1136 /* see how much plain bits we can stuff into one packet
1137 * using RLE and VLI. */
1138 do {
1139 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1140 : _drbd_bm_find_next(device, c->bit_offset);
1141 if (tmp == -1UL)
1142 tmp = c->bm_bits;
1143 rl = tmp - c->bit_offset;
1144
1145 if (toggle == 2) { /* first iteration */
1146 if (rl == 0) {
1147 /* the first checked bit was set,
1148 * store start value, */
1149 dcbp_set_start(p, 1);
1150 /* but skip encoding of zero run length */
1151 toggle = !toggle;
1152 continue;
1153 }
1154 dcbp_set_start(p, 0);
1155 }
1156
1157 /* paranoia: catch zero runlength.
1158 * can only happen if bitmap is modified while we scan it. */
1159 if (rl == 0) {
1160 drbd_err(device, "unexpected zero runlength while encoding bitmap "
1161 "t:%u bo:%lu\n", toggle, c->bit_offset);
1162 return -1;
1163 }
1164
1165 bits = vli_encode_bits(&bs, rl);
1166 if (bits == -ENOBUFS) /* buffer full */
1167 break;
1168 if (bits <= 0) {
1169 drbd_err(device, "error while encoding bitmap: %d\n", bits);
1170 return 0;
1171 }
1172
1173 toggle = !toggle;
1174 plain_bits += rl;
1175 c->bit_offset = tmp;
1176 } while (c->bit_offset < c->bm_bits);
1177
1178 len = bs.cur.b - p->code + !!bs.cur.bit;
1179
1180 if (plain_bits < (len << 3)) {
1181 /* incompressible with this method.
1182 * we need to rewind both word and bit position. */
1183 c->bit_offset -= plain_bits;
1184 bm_xfer_ctx_bit_to_word_offset(c);
1185 c->bit_offset = c->word_offset * BITS_PER_LONG;
1186 return 0;
1187 }
1188
1189 /* RLE + VLI was able to compress it just fine.
1190 * update c->word_offset. */
1191 bm_xfer_ctx_bit_to_word_offset(c);
1192
1193 /* store pad_bits */
1194 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1195
1196 return len;
1197}
1198
1199/*
1200 * send_bitmap_rle_or_plain
1201 *
1202 * Return 0 when done, 1 when another iteration is needed, and a negative error
1203 * code upon failure.
1204 */
1205static int
1206send_bitmap_rle_or_plain(struct drbd_peer_device *peer_device, struct bm_xfer_ctx *c)
1207{
1208 struct drbd_device *device = peer_device->device;
1209 struct drbd_socket *sock = &peer_device->connection->data;
1210 unsigned int header_size = drbd_header_size(peer_device->connection);
1211 struct p_compressed_bm *p = sock->sbuf + header_size;
1212 int len, err;
1213
1214 len = fill_bitmap_rle_bits(device, p,
1215 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1216 if (len < 0)
1217 return -EIO;
1218
1219 if (len) {
1220 dcbp_set_code(p, RLE_VLI_Bits);
1221 err = __send_command(peer_device->connection, device->vnr, sock,
1222 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1223 NULL, 0);
1224 c->packets[0]++;
1225 c->bytes[0] += header_size + sizeof(*p) + len;
1226
1227 if (c->bit_offset >= c->bm_bits)
1228 len = 0; /* DONE */
1229 } else {
1230 /* was not compressible.
1231 * send a buffer full of plain text bits instead. */
1232 unsigned int data_size;
1233 unsigned long num_words;
1234 unsigned long *p = sock->sbuf + header_size;
1235
1236 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1237 num_words = min_t(size_t, data_size / sizeof(*p),
1238 c->bm_words - c->word_offset);
1239 len = num_words * sizeof(*p);
1240 if (len)
1241 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1242 err = __send_command(peer_device->connection, device->vnr, sock, P_BITMAP,
1243 len, NULL, 0);
1244 c->word_offset += num_words;
1245 c->bit_offset = c->word_offset * BITS_PER_LONG;
1246
1247 c->packets[1]++;
1248 c->bytes[1] += header_size + len;
1249
1250 if (c->bit_offset > c->bm_bits)
1251 c->bit_offset = c->bm_bits;
1252 }
1253 if (!err) {
1254 if (len == 0) {
1255 INFO_bm_xfer_stats(peer_device, "send", c);
1256 return 0;
1257 } else
1258 return 1;
1259 }
1260 return -EIO;
1261}
1262
1263/* See the comment at receive_bitmap() */
1264static int _drbd_send_bitmap(struct drbd_device *device,
1265 struct drbd_peer_device *peer_device)
1266{
1267 struct bm_xfer_ctx c;
1268 int err;
1269
1270 if (!expect(device, device->bitmap))
1271 return false;
1272
1273 if (get_ldev(device)) {
1274 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1275 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1276 drbd_bm_set_all(device);
1277 if (drbd_bm_write(device, peer_device)) {
1278 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1279 * but otherwise process as per normal - need to tell other
1280 * side that a full resync is required! */
1281 drbd_err(device, "Failed to write bitmap to disk!\n");
1282 } else {
1283 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1284 drbd_md_sync(device);
1285 }
1286 }
1287 put_ldev(device);
1288 }
1289
1290 c = (struct bm_xfer_ctx) {
1291 .bm_bits = drbd_bm_bits(device),
1292 .bm_words = drbd_bm_words(device),
1293 };
1294
1295 do {
1296 err = send_bitmap_rle_or_plain(peer_device, &c);
1297 } while (err > 0);
1298
1299 return err == 0;
1300}
1301
1302int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device)
1303{
1304 struct drbd_socket *sock = &peer_device->connection->data;
1305 int err = -1;
1306
1307 mutex_lock(&sock->mutex);
1308 if (sock->socket)
1309 err = !_drbd_send_bitmap(device, peer_device);
1310 mutex_unlock(&sock->mutex);
1311 return err;
1312}
1313
1314void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1315{
1316 struct drbd_socket *sock;
1317 struct p_barrier_ack *p;
1318
1319 if (connection->cstate < C_WF_REPORT_PARAMS)
1320 return;
1321
1322 sock = &connection->meta;
1323 p = conn_prepare_command(connection, sock);
1324 if (!p)
1325 return;
1326 p->barrier = barrier_nr;
1327 p->set_size = cpu_to_be32(set_size);
1328 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1329}
1330
1331/**
1332 * _drbd_send_ack() - Sends an ack packet
1333 * @peer_device: DRBD peer device.
1334 * @cmd: Packet command code.
1335 * @sector: sector, needs to be in big endian byte order
1336 * @blksize: size in byte, needs to be in big endian byte order
1337 * @block_id: Id, big endian byte order
1338 */
1339static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1340 u64 sector, u32 blksize, u64 block_id)
1341{
1342 struct drbd_socket *sock;
1343 struct p_block_ack *p;
1344
1345 if (peer_device->device->state.conn < C_CONNECTED)
1346 return -EIO;
1347
1348 sock = &peer_device->connection->meta;
1349 p = drbd_prepare_command(peer_device, sock);
1350 if (!p)
1351 return -EIO;
1352 p->sector = sector;
1353 p->block_id = block_id;
1354 p->blksize = blksize;
1355 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1356 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1357}
1358
1359/* dp->sector and dp->block_id already/still in network byte order,
1360 * data_size is payload size according to dp->head,
1361 * and may need to be corrected for digest size. */
1362void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1363 struct p_data *dp, int data_size)
1364{
1365 if (peer_device->connection->peer_integrity_tfm)
1366 data_size -= crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
1367 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
1368 dp->block_id);
1369}
1370
1371void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1372 struct p_block_req *rp)
1373{
1374 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
1375}
1376
1377/**
1378 * drbd_send_ack() - Sends an ack packet
1379 * @peer_device: DRBD peer device
1380 * @cmd: packet command code
1381 * @peer_req: peer request
1382 */
1383int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1384 struct drbd_peer_request *peer_req)
1385{
1386 return _drbd_send_ack(peer_device, cmd,
1387 cpu_to_be64(peer_req->i.sector),
1388 cpu_to_be32(peer_req->i.size),
1389 peer_req->block_id);
1390}
1391
1392/* This function misuses the block_id field to signal if the blocks
1393 * are is sync or not. */
1394int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1395 sector_t sector, int blksize, u64 block_id)
1396{
1397 return _drbd_send_ack(peer_device, cmd,
1398 cpu_to_be64(sector),
1399 cpu_to_be32(blksize),
1400 cpu_to_be64(block_id));
1401}
1402
1403int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
1404 struct drbd_peer_request *peer_req)
1405{
1406 struct drbd_socket *sock;
1407 struct p_block_desc *p;
1408
1409 sock = &peer_device->connection->data;
1410 p = drbd_prepare_command(peer_device, sock);
1411 if (!p)
1412 return -EIO;
1413 p->sector = cpu_to_be64(peer_req->i.sector);
1414 p->blksize = cpu_to_be32(peer_req->i.size);
1415 p->pad = 0;
1416 return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
1417}
1418
1419int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
1420 sector_t sector, int size, u64 block_id)
1421{
1422 struct drbd_socket *sock;
1423 struct p_block_req *p;
1424
1425 sock = &peer_device->connection->data;
1426 p = drbd_prepare_command(peer_device, sock);
1427 if (!p)
1428 return -EIO;
1429 p->sector = cpu_to_be64(sector);
1430 p->block_id = block_id;
1431 p->blksize = cpu_to_be32(size);
1432 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1433}
1434
1435int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
1436 void *digest, int digest_size, enum drbd_packet cmd)
1437{
1438 struct drbd_socket *sock;
1439 struct p_block_req *p;
1440
1441 /* FIXME: Put the digest into the preallocated socket buffer. */
1442
1443 sock = &peer_device->connection->data;
1444 p = drbd_prepare_command(peer_device, sock);
1445 if (!p)
1446 return -EIO;
1447 p->sector = cpu_to_be64(sector);
1448 p->block_id = ID_SYNCER /* unused */;
1449 p->blksize = cpu_to_be32(size);
1450 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
1451}
1452
1453int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
1454{
1455 struct drbd_socket *sock;
1456 struct p_block_req *p;
1457
1458 sock = &peer_device->connection->data;
1459 p = drbd_prepare_command(peer_device, sock);
1460 if (!p)
1461 return -EIO;
1462 p->sector = cpu_to_be64(sector);
1463 p->block_id = ID_SYNCER /* unused */;
1464 p->blksize = cpu_to_be32(size);
1465 return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1466}
1467
1468/* called on sndtimeo
1469 * returns false if we should retry,
1470 * true if we think connection is dead
1471 */
1472static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1473{
1474 int drop_it;
1475 /* long elapsed = (long)(jiffies - device->last_received); */
1476
1477 drop_it = connection->meta.socket == sock
1478 || !connection->ack_receiver.task
1479 || get_t_state(&connection->ack_receiver) != RUNNING
1480 || connection->cstate < C_WF_REPORT_PARAMS;
1481
1482 if (drop_it)
1483 return true;
1484
1485 drop_it = !--connection->ko_count;
1486 if (!drop_it) {
1487 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1488 current->comm, current->pid, connection->ko_count);
1489 request_ping(connection);
1490 }
1491
1492 return drop_it; /* && (device->state == R_PRIMARY) */;
1493}
1494
1495static void drbd_update_congested(struct drbd_connection *connection)
1496{
1497 struct sock *sk = connection->data.socket->sk;
1498 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1499 set_bit(NET_CONGESTED, &connection->flags);
1500}
1501
1502/* The idea of sendpage seems to be to put some kind of reference
1503 * to the page into the skb, and to hand it over to the NIC. In
1504 * this process get_page() gets called.
1505 *
1506 * As soon as the page was really sent over the network put_page()
1507 * gets called by some part of the network layer. [ NIC driver? ]
1508 *
1509 * [ get_page() / put_page() increment/decrement the count. If count
1510 * reaches 0 the page will be freed. ]
1511 *
1512 * This works nicely with pages from FSs.
1513 * But this means that in protocol A we might signal IO completion too early!
1514 *
1515 * In order not to corrupt data during a resync we must make sure
1516 * that we do not reuse our own buffer pages (EEs) to early, therefore
1517 * we have the net_ee list.
1518 *
1519 * XFS seems to have problems, still, it submits pages with page_count == 0!
1520 * As a workaround, we disable sendpage on pages
1521 * with page_count == 0 or PageSlab.
1522 */
1523static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
1524 int offset, size_t size, unsigned msg_flags)
1525{
1526 struct socket *socket;
1527 void *addr;
1528 int err;
1529
1530 socket = peer_device->connection->data.socket;
1531 addr = kmap(page) + offset;
1532 err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
1533 kunmap(page);
1534 if (!err)
1535 peer_device->device->send_cnt += size >> 9;
1536 return err;
1537}
1538
1539static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
1540 int offset, size_t size, unsigned msg_flags)
1541{
1542 struct socket *socket = peer_device->connection->data.socket;
1543 int len = size;
1544 int err = -EIO;
1545
1546 /* e.g. XFS meta- & log-data is in slab pages, which have a
1547 * page_count of 0 and/or have PageSlab() set.
1548 * we cannot use send_page for those, as that does get_page();
1549 * put_page(); and would cause either a VM_BUG directly, or
1550 * __page_cache_release a page that would actually still be referenced
1551 * by someone, leading to some obscure delayed Oops somewhere else. */
1552 if (drbd_disable_sendpage || !sendpage_ok(page))
1553 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
1554
1555 msg_flags |= MSG_NOSIGNAL;
1556 drbd_update_congested(peer_device->connection);
1557 do {
1558 int sent;
1559
1560 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1561 if (sent <= 0) {
1562 if (sent == -EAGAIN) {
1563 if (we_should_drop_the_connection(peer_device->connection, socket))
1564 break;
1565 continue;
1566 }
1567 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1568 __func__, (int)size, len, sent);
1569 if (sent < 0)
1570 err = sent;
1571 break;
1572 }
1573 len -= sent;
1574 offset += sent;
1575 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1576 clear_bit(NET_CONGESTED, &peer_device->connection->flags);
1577
1578 if (len == 0) {
1579 err = 0;
1580 peer_device->device->send_cnt += size >> 9;
1581 }
1582 return err;
1583}
1584
1585static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1586{
1587 struct bio_vec bvec;
1588 struct bvec_iter iter;
1589
1590 /* hint all but last page with MSG_MORE */
1591 bio_for_each_segment(bvec, bio, iter) {
1592 int err;
1593
1594 err = _drbd_no_send_page(peer_device, bvec.bv_page,
1595 bvec.bv_offset, bvec.bv_len,
1596 bio_iter_last(bvec, iter)
1597 ? 0 : MSG_MORE);
1598 if (err)
1599 return err;
1600 }
1601 return 0;
1602}
1603
1604static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1605{
1606 struct bio_vec bvec;
1607 struct bvec_iter iter;
1608
1609 /* hint all but last page with MSG_MORE */
1610 bio_for_each_segment(bvec, bio, iter) {
1611 int err;
1612
1613 err = _drbd_send_page(peer_device, bvec.bv_page,
1614 bvec.bv_offset, bvec.bv_len,
1615 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1616 if (err)
1617 return err;
1618 }
1619 return 0;
1620}
1621
1622static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1623 struct drbd_peer_request *peer_req)
1624{
1625 struct page *page = peer_req->pages;
1626 unsigned len = peer_req->i.size;
1627 int err;
1628
1629 /* hint all but last page with MSG_MORE */
1630 page_chain_for_each(page) {
1631 unsigned l = min_t(unsigned, len, PAGE_SIZE);
1632
1633 err = _drbd_send_page(peer_device, page, 0, l,
1634 page_chain_next(page) ? MSG_MORE : 0);
1635 if (err)
1636 return err;
1637 len -= l;
1638 }
1639 return 0;
1640}
1641
1642static u32 bio_flags_to_wire(struct drbd_connection *connection,
1643 struct bio *bio)
1644{
1645 if (connection->agreed_pro_version >= 95)
1646 return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1647 (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1648 (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1649 (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
1650 (bio_op(bio) == REQ_OP_WRITE_ZEROES ?
1651 ((connection->agreed_features & DRBD_FF_WZEROES) ?
1652 (DP_ZEROES |(!(bio->bi_opf & REQ_NOUNMAP) ? DP_DISCARD : 0))
1653 : DP_DISCARD)
1654 : 0);
1655 else
1656 return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1657}
1658
1659/* Used to send write or TRIM aka REQ_OP_DISCARD requests
1660 * R_PRIMARY -> Peer (P_DATA, P_TRIM)
1661 */
1662int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
1663{
1664 struct drbd_device *device = peer_device->device;
1665 struct drbd_socket *sock;
1666 struct p_data *p;
1667 void *digest_out;
1668 unsigned int dp_flags = 0;
1669 int digest_size;
1670 int err;
1671
1672 sock = &peer_device->connection->data;
1673 p = drbd_prepare_command(peer_device, sock);
1674 digest_size = peer_device->connection->integrity_tfm ?
1675 crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
1676
1677 if (!p)
1678 return -EIO;
1679 p->sector = cpu_to_be64(req->i.sector);
1680 p->block_id = (unsigned long)req;
1681 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1682 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
1683 if (device->state.conn >= C_SYNC_SOURCE &&
1684 device->state.conn <= C_PAUSED_SYNC_T)
1685 dp_flags |= DP_MAY_SET_IN_SYNC;
1686 if (peer_device->connection->agreed_pro_version >= 100) {
1687 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1688 dp_flags |= DP_SEND_RECEIVE_ACK;
1689 /* During resync, request an explicit write ack,
1690 * even in protocol != C */
1691 if (req->rq_state & RQ_EXP_WRITE_ACK
1692 || (dp_flags & DP_MAY_SET_IN_SYNC))
1693 dp_flags |= DP_SEND_WRITE_ACK;
1694 }
1695 p->dp_flags = cpu_to_be32(dp_flags);
1696
1697 if (dp_flags & (DP_DISCARD|DP_ZEROES)) {
1698 enum drbd_packet cmd = (dp_flags & DP_ZEROES) ? P_ZEROES : P_TRIM;
1699 struct p_trim *t = (struct p_trim*)p;
1700 t->size = cpu_to_be32(req->i.size);
1701 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*t), NULL, 0);
1702 goto out;
1703 }
1704 digest_out = p + 1;
1705
1706 /* our digest is still only over the payload.
1707 * TRIM does not carry any payload. */
1708 if (digest_size)
1709 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
1710 err = __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1711 sizeof(*p) + digest_size, NULL, req->i.size);
1712 if (!err) {
1713 /* For protocol A, we have to memcpy the payload into
1714 * socket buffers, as we may complete right away
1715 * as soon as we handed it over to tcp, at which point the data
1716 * pages may become invalid.
1717 *
1718 * For data-integrity enabled, we copy it as well, so we can be
1719 * sure that even if the bio pages may still be modified, it
1720 * won't change the data on the wire, thus if the digest checks
1721 * out ok after sending on this side, but does not fit on the
1722 * receiving side, we sure have detected corruption elsewhere.
1723 */
1724 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
1725 err = _drbd_send_bio(peer_device, req->master_bio);
1726 else
1727 err = _drbd_send_zc_bio(peer_device, req->master_bio);
1728
1729 /* double check digest, sometimes buffers have been modified in flight. */
1730 if (digest_size > 0 && digest_size <= 64) {
1731 /* 64 byte, 512 bit, is the largest digest size
1732 * currently supported in kernel crypto. */
1733 unsigned char digest[64];
1734 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
1735 if (memcmp(p + 1, digest, digest_size)) {
1736 drbd_warn(device,
1737 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1738 (unsigned long long)req->i.sector, req->i.size);
1739 }
1740 } /* else if (digest_size > 64) {
1741 ... Be noisy about digest too large ...
1742 } */
1743 }
1744out:
1745 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1746
1747 return err;
1748}
1749
1750/* answer packet, used to send data back for read requests:
1751 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1752 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1753 */
1754int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1755 struct drbd_peer_request *peer_req)
1756{
1757 struct drbd_device *device = peer_device->device;
1758 struct drbd_socket *sock;
1759 struct p_data *p;
1760 int err;
1761 int digest_size;
1762
1763 sock = &peer_device->connection->data;
1764 p = drbd_prepare_command(peer_device, sock);
1765
1766 digest_size = peer_device->connection->integrity_tfm ?
1767 crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
1768
1769 if (!p)
1770 return -EIO;
1771 p->sector = cpu_to_be64(peer_req->i.sector);
1772 p->block_id = peer_req->block_id;
1773 p->seq_num = 0; /* unused */
1774 p->dp_flags = 0;
1775 if (digest_size)
1776 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1777 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1778 if (!err)
1779 err = _drbd_send_zc_ee(peer_device, peer_req);
1780 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1781
1782 return err;
1783}
1784
1785int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
1786{
1787 struct drbd_socket *sock;
1788 struct p_block_desc *p;
1789
1790 sock = &peer_device->connection->data;
1791 p = drbd_prepare_command(peer_device, sock);
1792 if (!p)
1793 return -EIO;
1794 p->sector = cpu_to_be64(req->i.sector);
1795 p->blksize = cpu_to_be32(req->i.size);
1796 return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1797}
1798
1799/*
1800 drbd_send distinguishes two cases:
1801
1802 Packets sent via the data socket "sock"
1803 and packets sent via the meta data socket "msock"
1804
1805 sock msock
1806 -----------------+-------------------------+------------------------------
1807 timeout conf.timeout / 2 conf.timeout / 2
1808 timeout action send a ping via msock Abort communication
1809 and close all sockets
1810*/
1811
1812/*
1813 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1814 */
1815int drbd_send(struct drbd_connection *connection, struct socket *sock,
1816 void *buf, size_t size, unsigned msg_flags)
1817{
1818 struct kvec iov = {.iov_base = buf, .iov_len = size};
1819 struct msghdr msg = {.msg_flags = msg_flags | MSG_NOSIGNAL};
1820 int rv, sent = 0;
1821
1822 if (!sock)
1823 return -EBADR;
1824
1825 /* THINK if (signal_pending) return ... ? */
1826
1827 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size);
1828
1829 if (sock == connection->data.socket) {
1830 rcu_read_lock();
1831 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1832 rcu_read_unlock();
1833 drbd_update_congested(connection);
1834 }
1835 do {
1836 rv = sock_sendmsg(sock, &msg);
1837 if (rv == -EAGAIN) {
1838 if (we_should_drop_the_connection(connection, sock))
1839 break;
1840 else
1841 continue;
1842 }
1843 if (rv == -EINTR) {
1844 flush_signals(current);
1845 rv = 0;
1846 }
1847 if (rv < 0)
1848 break;
1849 sent += rv;
1850 } while (sent < size);
1851
1852 if (sock == connection->data.socket)
1853 clear_bit(NET_CONGESTED, &connection->flags);
1854
1855 if (rv <= 0) {
1856 if (rv != -EAGAIN) {
1857 drbd_err(connection, "%s_sendmsg returned %d\n",
1858 sock == connection->meta.socket ? "msock" : "sock",
1859 rv);
1860 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1861 } else
1862 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1863 }
1864
1865 return sent;
1866}
1867
1868/*
1869 * drbd_send_all - Send an entire buffer
1870 *
1871 * Returns 0 upon success and a negative error value otherwise.
1872 */
1873int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1874 size_t size, unsigned msg_flags)
1875{
1876 int err;
1877
1878 err = drbd_send(connection, sock, buffer, size, msg_flags);
1879 if (err < 0)
1880 return err;
1881 if (err != size)
1882 return -EIO;
1883 return 0;
1884}
1885
1886static int drbd_open(struct block_device *bdev, fmode_t mode)
1887{
1888 struct drbd_device *device = bdev->bd_disk->private_data;
1889 unsigned long flags;
1890 int rv = 0;
1891
1892 mutex_lock(&drbd_main_mutex);
1893 spin_lock_irqsave(&device->resource->req_lock, flags);
1894 /* to have a stable device->state.role
1895 * and no race with updating open_cnt */
1896
1897 if (device->state.role != R_PRIMARY) {
1898 if (mode & FMODE_WRITE)
1899 rv = -EROFS;
1900 else if (!drbd_allow_oos)
1901 rv = -EMEDIUMTYPE;
1902 }
1903
1904 if (!rv)
1905 device->open_cnt++;
1906 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1907 mutex_unlock(&drbd_main_mutex);
1908
1909 return rv;
1910}
1911
1912static void drbd_release(struct gendisk *gd, fmode_t mode)
1913{
1914 struct drbd_device *device = gd->private_data;
1915 mutex_lock(&drbd_main_mutex);
1916 device->open_cnt--;
1917 mutex_unlock(&drbd_main_mutex);
1918}
1919
1920/* need to hold resource->req_lock */
1921void drbd_queue_unplug(struct drbd_device *device)
1922{
1923 if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
1924 D_ASSERT(device, device->state.role == R_PRIMARY);
1925 if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
1926 drbd_queue_work_if_unqueued(
1927 &first_peer_device(device)->connection->sender_work,
1928 &device->unplug_work);
1929 }
1930 }
1931}
1932
1933static void drbd_set_defaults(struct drbd_device *device)
1934{
1935 /* Beware! The actual layout differs
1936 * between big endian and little endian */
1937 device->state = (union drbd_dev_state) {
1938 { .role = R_SECONDARY,
1939 .peer = R_UNKNOWN,
1940 .conn = C_STANDALONE,
1941 .disk = D_DISKLESS,
1942 .pdsk = D_UNKNOWN,
1943 } };
1944}
1945
1946void drbd_init_set_defaults(struct drbd_device *device)
1947{
1948 /* the memset(,0,) did most of this.
1949 * note: only assignments, no allocation in here */
1950
1951 drbd_set_defaults(device);
1952
1953 atomic_set(&device->ap_bio_cnt, 0);
1954 atomic_set(&device->ap_actlog_cnt, 0);
1955 atomic_set(&device->ap_pending_cnt, 0);
1956 atomic_set(&device->rs_pending_cnt, 0);
1957 atomic_set(&device->unacked_cnt, 0);
1958 atomic_set(&device->local_cnt, 0);
1959 atomic_set(&device->pp_in_use_by_net, 0);
1960 atomic_set(&device->rs_sect_in, 0);
1961 atomic_set(&device->rs_sect_ev, 0);
1962 atomic_set(&device->ap_in_flight, 0);
1963 atomic_set(&device->md_io.in_use, 0);
1964
1965 mutex_init(&device->own_state_mutex);
1966 device->state_mutex = &device->own_state_mutex;
1967
1968 spin_lock_init(&device->al_lock);
1969 spin_lock_init(&device->peer_seq_lock);
1970
1971 INIT_LIST_HEAD(&device->active_ee);
1972 INIT_LIST_HEAD(&device->sync_ee);
1973 INIT_LIST_HEAD(&device->done_ee);
1974 INIT_LIST_HEAD(&device->read_ee);
1975 INIT_LIST_HEAD(&device->net_ee);
1976 INIT_LIST_HEAD(&device->resync_reads);
1977 INIT_LIST_HEAD(&device->resync_work.list);
1978 INIT_LIST_HEAD(&device->unplug_work.list);
1979 INIT_LIST_HEAD(&device->bm_io_work.w.list);
1980 INIT_LIST_HEAD(&device->pending_master_completion[0]);
1981 INIT_LIST_HEAD(&device->pending_master_completion[1]);
1982 INIT_LIST_HEAD(&device->pending_completion[0]);
1983 INIT_LIST_HEAD(&device->pending_completion[1]);
1984
1985 device->resync_work.cb = w_resync_timer;
1986 device->unplug_work.cb = w_send_write_hint;
1987 device->bm_io_work.w.cb = w_bitmap_io;
1988
1989 timer_setup(&device->resync_timer, resync_timer_fn, 0);
1990 timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0);
1991 timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0);
1992 timer_setup(&device->request_timer, request_timer_fn, 0);
1993
1994 init_waitqueue_head(&device->misc_wait);
1995 init_waitqueue_head(&device->state_wait);
1996 init_waitqueue_head(&device->ee_wait);
1997 init_waitqueue_head(&device->al_wait);
1998 init_waitqueue_head(&device->seq_wait);
1999
2000 device->resync_wenr = LC_FREE;
2001 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2002 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2003}
2004
2005void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
2006{
2007 char ppb[10];
2008
2009 set_capacity_and_notify(device->vdisk, size);
2010
2011 drbd_info(device, "size = %s (%llu KB)\n",
2012 ppsize(ppb, size>>1), (unsigned long long)size>>1);
2013}
2014
2015void drbd_device_cleanup(struct drbd_device *device)
2016{
2017 int i;
2018 if (first_peer_device(device)->connection->receiver.t_state != NONE)
2019 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2020 first_peer_device(device)->connection->receiver.t_state);
2021
2022 device->al_writ_cnt =
2023 device->bm_writ_cnt =
2024 device->read_cnt =
2025 device->recv_cnt =
2026 device->send_cnt =
2027 device->writ_cnt =
2028 device->p_size =
2029 device->rs_start =
2030 device->rs_total =
2031 device->rs_failed = 0;
2032 device->rs_last_events = 0;
2033 device->rs_last_sect_ev = 0;
2034 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2035 device->rs_mark_left[i] = 0;
2036 device->rs_mark_time[i] = 0;
2037 }
2038 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2039
2040 set_capacity_and_notify(device->vdisk, 0);
2041 if (device->bitmap) {
2042 /* maybe never allocated. */
2043 drbd_bm_resize(device, 0, 1);
2044 drbd_bm_cleanup(device);
2045 }
2046
2047 drbd_backing_dev_free(device, device->ldev);
2048 device->ldev = NULL;
2049
2050 clear_bit(AL_SUSPENDED, &device->flags);
2051
2052 D_ASSERT(device, list_empty(&device->active_ee));
2053 D_ASSERT(device, list_empty(&device->sync_ee));
2054 D_ASSERT(device, list_empty(&device->done_ee));
2055 D_ASSERT(device, list_empty(&device->read_ee));
2056 D_ASSERT(device, list_empty(&device->net_ee));
2057 D_ASSERT(device, list_empty(&device->resync_reads));
2058 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2059 D_ASSERT(device, list_empty(&device->resync_work.list));
2060 D_ASSERT(device, list_empty(&device->unplug_work.list));
2061
2062 drbd_set_defaults(device);
2063}
2064
2065
2066static void drbd_destroy_mempools(void)
2067{
2068 struct page *page;
2069
2070 while (drbd_pp_pool) {
2071 page = drbd_pp_pool;
2072 drbd_pp_pool = (struct page *)page_private(page);
2073 __free_page(page);
2074 drbd_pp_vacant--;
2075 }
2076
2077 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2078
2079 bioset_exit(&drbd_io_bio_set);
2080 bioset_exit(&drbd_md_io_bio_set);
2081 mempool_exit(&drbd_md_io_page_pool);
2082 mempool_exit(&drbd_ee_mempool);
2083 mempool_exit(&drbd_request_mempool);
2084 kmem_cache_destroy(drbd_ee_cache);
2085 kmem_cache_destroy(drbd_request_cache);
2086 kmem_cache_destroy(drbd_bm_ext_cache);
2087 kmem_cache_destroy(drbd_al_ext_cache);
2088
2089 drbd_ee_cache = NULL;
2090 drbd_request_cache = NULL;
2091 drbd_bm_ext_cache = NULL;
2092 drbd_al_ext_cache = NULL;
2093
2094 return;
2095}
2096
2097static int drbd_create_mempools(void)
2098{
2099 struct page *page;
2100 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
2101 int i, ret;
2102
2103 /* caches */
2104 drbd_request_cache = kmem_cache_create(
2105 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2106 if (drbd_request_cache == NULL)
2107 goto Enomem;
2108
2109 drbd_ee_cache = kmem_cache_create(
2110 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2111 if (drbd_ee_cache == NULL)
2112 goto Enomem;
2113
2114 drbd_bm_ext_cache = kmem_cache_create(
2115 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2116 if (drbd_bm_ext_cache == NULL)
2117 goto Enomem;
2118
2119 drbd_al_ext_cache = kmem_cache_create(
2120 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2121 if (drbd_al_ext_cache == NULL)
2122 goto Enomem;
2123
2124 /* mempools */
2125 ret = bioset_init(&drbd_io_bio_set, BIO_POOL_SIZE, 0, 0);
2126 if (ret)
2127 goto Enomem;
2128
2129 ret = bioset_init(&drbd_md_io_bio_set, DRBD_MIN_POOL_PAGES, 0,
2130 BIOSET_NEED_BVECS);
2131 if (ret)
2132 goto Enomem;
2133
2134 ret = mempool_init_page_pool(&drbd_md_io_page_pool, DRBD_MIN_POOL_PAGES, 0);
2135 if (ret)
2136 goto Enomem;
2137
2138 ret = mempool_init_slab_pool(&drbd_request_mempool, number,
2139 drbd_request_cache);
2140 if (ret)
2141 goto Enomem;
2142
2143 ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache);
2144 if (ret)
2145 goto Enomem;
2146
2147 for (i = 0; i < number; i++) {
2148 page = alloc_page(GFP_HIGHUSER);
2149 if (!page)
2150 goto Enomem;
2151 set_page_private(page, (unsigned long)drbd_pp_pool);
2152 drbd_pp_pool = page;
2153 }
2154 drbd_pp_vacant = number;
2155
2156 return 0;
2157
2158Enomem:
2159 drbd_destroy_mempools(); /* in case we allocated some */
2160 return -ENOMEM;
2161}
2162
2163static void drbd_release_all_peer_reqs(struct drbd_device *device)
2164{
2165 int rr;
2166
2167 rr = drbd_free_peer_reqs(device, &device->active_ee);
2168 if (rr)
2169 drbd_err(device, "%d EEs in active list found!\n", rr);
2170
2171 rr = drbd_free_peer_reqs(device, &device->sync_ee);
2172 if (rr)
2173 drbd_err(device, "%d EEs in sync list found!\n", rr);
2174
2175 rr = drbd_free_peer_reqs(device, &device->read_ee);
2176 if (rr)
2177 drbd_err(device, "%d EEs in read list found!\n", rr);
2178
2179 rr = drbd_free_peer_reqs(device, &device->done_ee);
2180 if (rr)
2181 drbd_err(device, "%d EEs in done list found!\n", rr);
2182
2183 rr = drbd_free_peer_reqs(device, &device->net_ee);
2184 if (rr)
2185 drbd_err(device, "%d EEs in net list found!\n", rr);
2186}
2187
2188/* caution. no locking. */
2189void drbd_destroy_device(struct kref *kref)
2190{
2191 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2192 struct drbd_resource *resource = device->resource;
2193 struct drbd_peer_device *peer_device, *tmp_peer_device;
2194
2195 timer_shutdown_sync(&device->request_timer);
2196
2197 /* paranoia asserts */
2198 D_ASSERT(device, device->open_cnt == 0);
2199 /* end paranoia asserts */
2200
2201 /* cleanup stuff that may have been allocated during
2202 * device (re-)configuration or state changes */
2203
2204 drbd_backing_dev_free(device, device->ldev);
2205 device->ldev = NULL;
2206
2207 drbd_release_all_peer_reqs(device);
2208
2209 lc_destroy(device->act_log);
2210 lc_destroy(device->resync);
2211
2212 kfree(device->p_uuid);
2213 /* device->p_uuid = NULL; */
2214
2215 if (device->bitmap) /* should no longer be there. */
2216 drbd_bm_cleanup(device);
2217 __free_page(device->md_io.page);
2218 put_disk(device->vdisk);
2219 kfree(device->rs_plan_s);
2220
2221 /* not for_each_connection(connection, resource):
2222 * those may have been cleaned up and disassociated already.
2223 */
2224 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2225 kref_put(&peer_device->connection->kref, drbd_destroy_connection);
2226 kfree(peer_device);
2227 }
2228 if (device->submit.wq)
2229 destroy_workqueue(device->submit.wq);
2230 kfree(device);
2231 kref_put(&resource->kref, drbd_destroy_resource);
2232}
2233
2234/* One global retry thread, if we need to push back some bio and have it
2235 * reinserted through our make request function.
2236 */
2237static struct retry_worker {
2238 struct workqueue_struct *wq;
2239 struct work_struct worker;
2240
2241 spinlock_t lock;
2242 struct list_head writes;
2243} retry;
2244
2245static void do_retry(struct work_struct *ws)
2246{
2247 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2248 LIST_HEAD(writes);
2249 struct drbd_request *req, *tmp;
2250
2251 spin_lock_irq(&retry->lock);
2252 list_splice_init(&retry->writes, &writes);
2253 spin_unlock_irq(&retry->lock);
2254
2255 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2256 struct drbd_device *device = req->device;
2257 struct bio *bio = req->master_bio;
2258 bool expected;
2259
2260 expected =
2261 expect(device, atomic_read(&req->completion_ref) == 0) &&
2262 expect(device, req->rq_state & RQ_POSTPONED) &&
2263 expect(device, (req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2264 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2265
2266 if (!expected)
2267 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2268 req, atomic_read(&req->completion_ref),
2269 req->rq_state);
2270
2271 /* We still need to put one kref associated with the
2272 * "completion_ref" going zero in the code path that queued it
2273 * here. The request object may still be referenced by a
2274 * frozen local req->private_bio, in case we force-detached.
2275 */
2276 kref_put(&req->kref, drbd_req_destroy);
2277
2278 /* A single suspended or otherwise blocking device may stall
2279 * all others as well. Fortunately, this code path is to
2280 * recover from a situation that "should not happen":
2281 * concurrent writes in multi-primary setup.
2282 * In a "normal" lifecycle, this workqueue is supposed to be
2283 * destroyed without ever doing anything.
2284 * If it turns out to be an issue anyways, we can do per
2285 * resource (replication group) or per device (minor) retry
2286 * workqueues instead.
2287 */
2288
2289 /* We are not just doing submit_bio_noacct(),
2290 * as we want to keep the start_time information. */
2291 inc_ap_bio(device);
2292 __drbd_make_request(device, bio);
2293 }
2294}
2295
2296/* called via drbd_req_put_completion_ref(),
2297 * holds resource->req_lock */
2298void drbd_restart_request(struct drbd_request *req)
2299{
2300 unsigned long flags;
2301 spin_lock_irqsave(&retry.lock, flags);
2302 list_move_tail(&req->tl_requests, &retry.writes);
2303 spin_unlock_irqrestore(&retry.lock, flags);
2304
2305 /* Drop the extra reference that would otherwise
2306 * have been dropped by complete_master_bio.
2307 * do_retry() needs to grab a new one. */
2308 dec_ap_bio(req->device);
2309
2310 queue_work(retry.wq, &retry.worker);
2311}
2312
2313void drbd_destroy_resource(struct kref *kref)
2314{
2315 struct drbd_resource *resource =
2316 container_of(kref, struct drbd_resource, kref);
2317
2318 idr_destroy(&resource->devices);
2319 free_cpumask_var(resource->cpu_mask);
2320 kfree(resource->name);
2321 kfree(resource);
2322}
2323
2324void drbd_free_resource(struct drbd_resource *resource)
2325{
2326 struct drbd_connection *connection, *tmp;
2327
2328 for_each_connection_safe(connection, tmp, resource) {
2329 list_del(&connection->connections);
2330 drbd_debugfs_connection_cleanup(connection);
2331 kref_put(&connection->kref, drbd_destroy_connection);
2332 }
2333 drbd_debugfs_resource_cleanup(resource);
2334 kref_put(&resource->kref, drbd_destroy_resource);
2335}
2336
2337static void drbd_cleanup(void)
2338{
2339 unsigned int i;
2340 struct drbd_device *device;
2341 struct drbd_resource *resource, *tmp;
2342
2343 /* first remove proc,
2344 * drbdsetup uses it's presence to detect
2345 * whether DRBD is loaded.
2346 * If we would get stuck in proc removal,
2347 * but have netlink already deregistered,
2348 * some drbdsetup commands may wait forever
2349 * for an answer.
2350 */
2351 if (drbd_proc)
2352 remove_proc_entry("drbd", NULL);
2353
2354 if (retry.wq)
2355 destroy_workqueue(retry.wq);
2356
2357 drbd_genl_unregister();
2358
2359 idr_for_each_entry(&drbd_devices, device, i)
2360 drbd_delete_device(device);
2361
2362 /* not _rcu since, no other updater anymore. Genl already unregistered */
2363 for_each_resource_safe(resource, tmp, &drbd_resources) {
2364 list_del(&resource->resources);
2365 drbd_free_resource(resource);
2366 }
2367
2368 drbd_debugfs_cleanup();
2369
2370 drbd_destroy_mempools();
2371 unregister_blkdev(DRBD_MAJOR, "drbd");
2372
2373 idr_destroy(&drbd_devices);
2374
2375 pr_info("module cleanup done.\n");
2376}
2377
2378static void drbd_init_workqueue(struct drbd_work_queue* wq)
2379{
2380 spin_lock_init(&wq->q_lock);
2381 INIT_LIST_HEAD(&wq->q);
2382 init_waitqueue_head(&wq->q_wait);
2383}
2384
2385struct completion_work {
2386 struct drbd_work w;
2387 struct completion done;
2388};
2389
2390static int w_complete(struct drbd_work *w, int cancel)
2391{
2392 struct completion_work *completion_work =
2393 container_of(w, struct completion_work, w);
2394
2395 complete(&completion_work->done);
2396 return 0;
2397}
2398
2399void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2400{
2401 struct completion_work completion_work;
2402
2403 completion_work.w.cb = w_complete;
2404 init_completion(&completion_work.done);
2405 drbd_queue_work(work_queue, &completion_work.w);
2406 wait_for_completion(&completion_work.done);
2407}
2408
2409struct drbd_resource *drbd_find_resource(const char *name)
2410{
2411 struct drbd_resource *resource;
2412
2413 if (!name || !name[0])
2414 return NULL;
2415
2416 rcu_read_lock();
2417 for_each_resource_rcu(resource, &drbd_resources) {
2418 if (!strcmp(resource->name, name)) {
2419 kref_get(&resource->kref);
2420 goto found;
2421 }
2422 }
2423 resource = NULL;
2424found:
2425 rcu_read_unlock();
2426 return resource;
2427}
2428
2429struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2430 void *peer_addr, int peer_addr_len)
2431{
2432 struct drbd_resource *resource;
2433 struct drbd_connection *connection;
2434
2435 rcu_read_lock();
2436 for_each_resource_rcu(resource, &drbd_resources) {
2437 for_each_connection_rcu(connection, resource) {
2438 if (connection->my_addr_len == my_addr_len &&
2439 connection->peer_addr_len == peer_addr_len &&
2440 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2441 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2442 kref_get(&connection->kref);
2443 goto found;
2444 }
2445 }
2446 }
2447 connection = NULL;
2448found:
2449 rcu_read_unlock();
2450 return connection;
2451}
2452
2453static int drbd_alloc_socket(struct drbd_socket *socket)
2454{
2455 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2456 if (!socket->rbuf)
2457 return -ENOMEM;
2458 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2459 if (!socket->sbuf)
2460 return -ENOMEM;
2461 return 0;
2462}
2463
2464static void drbd_free_socket(struct drbd_socket *socket)
2465{
2466 free_page((unsigned long) socket->sbuf);
2467 free_page((unsigned long) socket->rbuf);
2468}
2469
2470void conn_free_crypto(struct drbd_connection *connection)
2471{
2472 drbd_free_sock(connection);
2473
2474 crypto_free_shash(connection->csums_tfm);
2475 crypto_free_shash(connection->verify_tfm);
2476 crypto_free_shash(connection->cram_hmac_tfm);
2477 crypto_free_shash(connection->integrity_tfm);
2478 crypto_free_shash(connection->peer_integrity_tfm);
2479 kfree(connection->int_dig_in);
2480 kfree(connection->int_dig_vv);
2481
2482 connection->csums_tfm = NULL;
2483 connection->verify_tfm = NULL;
2484 connection->cram_hmac_tfm = NULL;
2485 connection->integrity_tfm = NULL;
2486 connection->peer_integrity_tfm = NULL;
2487 connection->int_dig_in = NULL;
2488 connection->int_dig_vv = NULL;
2489}
2490
2491int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2492{
2493 struct drbd_connection *connection;
2494 cpumask_var_t new_cpu_mask;
2495 int err;
2496
2497 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2498 return -ENOMEM;
2499
2500 /* silently ignore cpu mask on UP kernel */
2501 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2502 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
2503 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2504 if (err == -EOVERFLOW) {
2505 /* So what. mask it out. */
2506 cpumask_var_t tmp_cpu_mask;
2507 if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) {
2508 cpumask_setall(tmp_cpu_mask);
2509 cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask);
2510 drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2511 res_opts->cpu_mask,
2512 strlen(res_opts->cpu_mask) > 12 ? "..." : "",
2513 nr_cpu_ids);
2514 free_cpumask_var(tmp_cpu_mask);
2515 err = 0;
2516 }
2517 }
2518 if (err) {
2519 drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2520 /* retcode = ERR_CPU_MASK_PARSE; */
2521 goto fail;
2522 }
2523 }
2524 resource->res_opts = *res_opts;
2525 if (cpumask_empty(new_cpu_mask))
2526 drbd_calc_cpu_mask(&new_cpu_mask);
2527 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2528 cpumask_copy(resource->cpu_mask, new_cpu_mask);
2529 for_each_connection_rcu(connection, resource) {
2530 connection->receiver.reset_cpu_mask = 1;
2531 connection->ack_receiver.reset_cpu_mask = 1;
2532 connection->worker.reset_cpu_mask = 1;
2533 }
2534 }
2535 err = 0;
2536
2537fail:
2538 free_cpumask_var(new_cpu_mask);
2539 return err;
2540
2541}
2542
2543struct drbd_resource *drbd_create_resource(const char *name)
2544{
2545 struct drbd_resource *resource;
2546
2547 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2548 if (!resource)
2549 goto fail;
2550 resource->name = kstrdup(name, GFP_KERNEL);
2551 if (!resource->name)
2552 goto fail_free_resource;
2553 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2554 goto fail_free_name;
2555 kref_init(&resource->kref);
2556 idr_init(&resource->devices);
2557 INIT_LIST_HEAD(&resource->connections);
2558 resource->write_ordering = WO_BDEV_FLUSH;
2559 list_add_tail_rcu(&resource->resources, &drbd_resources);
2560 mutex_init(&resource->conf_update);
2561 mutex_init(&resource->adm_mutex);
2562 spin_lock_init(&resource->req_lock);
2563 drbd_debugfs_resource_add(resource);
2564 return resource;
2565
2566fail_free_name:
2567 kfree(resource->name);
2568fail_free_resource:
2569 kfree(resource);
2570fail:
2571 return NULL;
2572}
2573
2574/* caller must be under adm_mutex */
2575struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2576{
2577 struct drbd_resource *resource;
2578 struct drbd_connection *connection;
2579
2580 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2581 if (!connection)
2582 return NULL;
2583
2584 if (drbd_alloc_socket(&connection->data))
2585 goto fail;
2586 if (drbd_alloc_socket(&connection->meta))
2587 goto fail;
2588
2589 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2590 if (!connection->current_epoch)
2591 goto fail;
2592
2593 INIT_LIST_HEAD(&connection->transfer_log);
2594
2595 INIT_LIST_HEAD(&connection->current_epoch->list);
2596 connection->epochs = 1;
2597 spin_lock_init(&connection->epoch_lock);
2598
2599 connection->send.seen_any_write_yet = false;
2600 connection->send.current_epoch_nr = 0;
2601 connection->send.current_epoch_writes = 0;
2602
2603 resource = drbd_create_resource(name);
2604 if (!resource)
2605 goto fail;
2606
2607 connection->cstate = C_STANDALONE;
2608 mutex_init(&connection->cstate_mutex);
2609 init_waitqueue_head(&connection->ping_wait);
2610 idr_init(&connection->peer_devices);
2611
2612 drbd_init_workqueue(&connection->sender_work);
2613 mutex_init(&connection->data.mutex);
2614 mutex_init(&connection->meta.mutex);
2615
2616 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2617 connection->receiver.connection = connection;
2618 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2619 connection->worker.connection = connection;
2620 drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
2621 connection->ack_receiver.connection = connection;
2622
2623 kref_init(&connection->kref);
2624
2625 connection->resource = resource;
2626
2627 if (set_resource_options(resource, res_opts))
2628 goto fail_resource;
2629
2630 kref_get(&resource->kref);
2631 list_add_tail_rcu(&connection->connections, &resource->connections);
2632 drbd_debugfs_connection_add(connection);
2633 return connection;
2634
2635fail_resource:
2636 list_del(&resource->resources);
2637 drbd_free_resource(resource);
2638fail:
2639 kfree(connection->current_epoch);
2640 drbd_free_socket(&connection->meta);
2641 drbd_free_socket(&connection->data);
2642 kfree(connection);
2643 return NULL;
2644}
2645
2646void drbd_destroy_connection(struct kref *kref)
2647{
2648 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2649 struct drbd_resource *resource = connection->resource;
2650
2651 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
2652 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2653 kfree(connection->current_epoch);
2654
2655 idr_destroy(&connection->peer_devices);
2656
2657 drbd_free_socket(&connection->meta);
2658 drbd_free_socket(&connection->data);
2659 kfree(connection->int_dig_in);
2660 kfree(connection->int_dig_vv);
2661 kfree(connection);
2662 kref_put(&resource->kref, drbd_destroy_resource);
2663}
2664
2665static int init_submitter(struct drbd_device *device)
2666{
2667 /* opencoded create_singlethread_workqueue(),
2668 * to be able to say "drbd%d", ..., minor */
2669 device->submit.wq =
2670 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2671 if (!device->submit.wq)
2672 return -ENOMEM;
2673
2674 INIT_WORK(&device->submit.worker, do_submit);
2675 INIT_LIST_HEAD(&device->submit.writes);
2676 return 0;
2677}
2678
2679enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
2680{
2681 struct drbd_resource *resource = adm_ctx->resource;
2682 struct drbd_connection *connection, *n;
2683 struct drbd_device *device;
2684 struct drbd_peer_device *peer_device, *tmp_peer_device;
2685 struct gendisk *disk;
2686 int id;
2687 int vnr = adm_ctx->volume;
2688 enum drbd_ret_code err = ERR_NOMEM;
2689
2690 device = minor_to_device(minor);
2691 if (device)
2692 return ERR_MINOR_OR_VOLUME_EXISTS;
2693
2694 /* GFP_KERNEL, we are outside of all write-out paths */
2695 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2696 if (!device)
2697 return ERR_NOMEM;
2698 kref_init(&device->kref);
2699
2700 kref_get(&resource->kref);
2701 device->resource = resource;
2702 device->minor = minor;
2703 device->vnr = vnr;
2704
2705 drbd_init_set_defaults(device);
2706
2707 disk = blk_alloc_disk(NUMA_NO_NODE);
2708 if (!disk)
2709 goto out_no_disk;
2710
2711 device->vdisk = disk;
2712 device->rq_queue = disk->queue;
2713
2714 set_disk_ro(disk, true);
2715
2716 disk->major = DRBD_MAJOR;
2717 disk->first_minor = minor;
2718 disk->minors = 1;
2719 disk->fops = &drbd_ops;
2720 disk->flags |= GENHD_FL_NO_PART;
2721 sprintf(disk->disk_name, "drbd%d", minor);
2722 disk->private_data = device;
2723
2724 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
2725 blk_queue_write_cache(disk->queue, true, true);
2726 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2727 This triggers a max_bio_size message upon first attach or connect */
2728 blk_queue_max_hw_sectors(disk->queue, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2729
2730 device->md_io.page = alloc_page(GFP_KERNEL);
2731 if (!device->md_io.page)
2732 goto out_no_io_page;
2733
2734 if (drbd_bm_init(device))
2735 goto out_no_bitmap;
2736 device->read_requests = RB_ROOT;
2737 device->write_requests = RB_ROOT;
2738
2739 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2740 if (id < 0) {
2741 if (id == -ENOSPC)
2742 err = ERR_MINOR_OR_VOLUME_EXISTS;
2743 goto out_no_minor_idr;
2744 }
2745 kref_get(&device->kref);
2746
2747 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2748 if (id < 0) {
2749 if (id == -ENOSPC)
2750 err = ERR_MINOR_OR_VOLUME_EXISTS;
2751 goto out_idr_remove_minor;
2752 }
2753 kref_get(&device->kref);
2754
2755 INIT_LIST_HEAD(&device->peer_devices);
2756 INIT_LIST_HEAD(&device->pending_bitmap_io);
2757 for_each_connection(connection, resource) {
2758 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2759 if (!peer_device)
2760 goto out_idr_remove_from_resource;
2761 peer_device->connection = connection;
2762 peer_device->device = device;
2763
2764 list_add(&peer_device->peer_devices, &device->peer_devices);
2765 kref_get(&device->kref);
2766
2767 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2768 if (id < 0) {
2769 if (id == -ENOSPC)
2770 err = ERR_INVALID_REQUEST;
2771 goto out_idr_remove_from_resource;
2772 }
2773 kref_get(&connection->kref);
2774 INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
2775 }
2776
2777 if (init_submitter(device)) {
2778 err = ERR_NOMEM;
2779 goto out_idr_remove_from_resource;
2780 }
2781
2782 err = add_disk(disk);
2783 if (err)
2784 goto out_destroy_workqueue;
2785
2786 /* inherit the connection state */
2787 device->state.conn = first_connection(resource)->cstate;
2788 if (device->state.conn == C_WF_REPORT_PARAMS) {
2789 for_each_peer_device(peer_device, device)
2790 drbd_connected(peer_device);
2791 }
2792 /* move to create_peer_device() */
2793 for_each_peer_device(peer_device, device)
2794 drbd_debugfs_peer_device_add(peer_device);
2795 drbd_debugfs_device_add(device);
2796 return NO_ERROR;
2797
2798out_destroy_workqueue:
2799 destroy_workqueue(device->submit.wq);
2800out_idr_remove_from_resource:
2801 for_each_connection_safe(connection, n, resource) {
2802 peer_device = idr_remove(&connection->peer_devices, vnr);
2803 if (peer_device)
2804 kref_put(&connection->kref, drbd_destroy_connection);
2805 }
2806 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2807 list_del(&peer_device->peer_devices);
2808 kfree(peer_device);
2809 }
2810 idr_remove(&resource->devices, vnr);
2811out_idr_remove_minor:
2812 idr_remove(&drbd_devices, minor);
2813 synchronize_rcu();
2814out_no_minor_idr:
2815 drbd_bm_cleanup(device);
2816out_no_bitmap:
2817 __free_page(device->md_io.page);
2818out_no_io_page:
2819 put_disk(disk);
2820out_no_disk:
2821 kref_put(&resource->kref, drbd_destroy_resource);
2822 kfree(device);
2823 return err;
2824}
2825
2826void drbd_delete_device(struct drbd_device *device)
2827{
2828 struct drbd_resource *resource = device->resource;
2829 struct drbd_connection *connection;
2830 struct drbd_peer_device *peer_device;
2831
2832 /* move to free_peer_device() */
2833 for_each_peer_device(peer_device, device)
2834 drbd_debugfs_peer_device_cleanup(peer_device);
2835 drbd_debugfs_device_cleanup(device);
2836 for_each_connection(connection, resource) {
2837 idr_remove(&connection->peer_devices, device->vnr);
2838 kref_put(&device->kref, drbd_destroy_device);
2839 }
2840 idr_remove(&resource->devices, device->vnr);
2841 kref_put(&device->kref, drbd_destroy_device);
2842 idr_remove(&drbd_devices, device_to_minor(device));
2843 kref_put(&device->kref, drbd_destroy_device);
2844 del_gendisk(device->vdisk);
2845 synchronize_rcu();
2846 kref_put(&device->kref, drbd_destroy_device);
2847}
2848
2849static int __init drbd_init(void)
2850{
2851 int err;
2852
2853 if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
2854 pr_err("invalid minor_count (%d)\n", drbd_minor_count);
2855#ifdef MODULE
2856 return -EINVAL;
2857#else
2858 drbd_minor_count = DRBD_MINOR_COUNT_DEF;
2859#endif
2860 }
2861
2862 err = register_blkdev(DRBD_MAJOR, "drbd");
2863 if (err) {
2864 pr_err("unable to register block device major %d\n",
2865 DRBD_MAJOR);
2866 return err;
2867 }
2868
2869 /*
2870 * allocate all necessary structs
2871 */
2872 init_waitqueue_head(&drbd_pp_wait);
2873
2874 drbd_proc = NULL; /* play safe for drbd_cleanup */
2875 idr_init(&drbd_devices);
2876
2877 mutex_init(&resources_mutex);
2878 INIT_LIST_HEAD(&drbd_resources);
2879
2880 err = drbd_genl_register();
2881 if (err) {
2882 pr_err("unable to register generic netlink family\n");
2883 goto fail;
2884 }
2885
2886 err = drbd_create_mempools();
2887 if (err)
2888 goto fail;
2889
2890 err = -ENOMEM;
2891 drbd_proc = proc_create_single("drbd", S_IFREG | 0444 , NULL, drbd_seq_show);
2892 if (!drbd_proc) {
2893 pr_err("unable to register proc file\n");
2894 goto fail;
2895 }
2896
2897 retry.wq = create_singlethread_workqueue("drbd-reissue");
2898 if (!retry.wq) {
2899 pr_err("unable to create retry workqueue\n");
2900 goto fail;
2901 }
2902 INIT_WORK(&retry.worker, do_retry);
2903 spin_lock_init(&retry.lock);
2904 INIT_LIST_HEAD(&retry.writes);
2905
2906 drbd_debugfs_init();
2907
2908 pr_info("initialized. "
2909 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2910 GENL_MAGIC_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2911 pr_info("%s\n", drbd_buildtag());
2912 pr_info("registered as block device major %d\n", DRBD_MAJOR);
2913 return 0; /* Success! */
2914
2915fail:
2916 drbd_cleanup();
2917 if (err == -ENOMEM)
2918 pr_err("ran out of memory\n");
2919 else
2920 pr_err("initialization failure\n");
2921 return err;
2922}
2923
2924static void drbd_free_one_sock(struct drbd_socket *ds)
2925{
2926 struct socket *s;
2927 mutex_lock(&ds->mutex);
2928 s = ds->socket;
2929 ds->socket = NULL;
2930 mutex_unlock(&ds->mutex);
2931 if (s) {
2932 /* so debugfs does not need to mutex_lock() */
2933 synchronize_rcu();
2934 kernel_sock_shutdown(s, SHUT_RDWR);
2935 sock_release(s);
2936 }
2937}
2938
2939void drbd_free_sock(struct drbd_connection *connection)
2940{
2941 if (connection->data.socket)
2942 drbd_free_one_sock(&connection->data);
2943 if (connection->meta.socket)
2944 drbd_free_one_sock(&connection->meta);
2945}
2946
2947/* meta data management */
2948
2949void conn_md_sync(struct drbd_connection *connection)
2950{
2951 struct drbd_peer_device *peer_device;
2952 int vnr;
2953
2954 rcu_read_lock();
2955 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2956 struct drbd_device *device = peer_device->device;
2957
2958 kref_get(&device->kref);
2959 rcu_read_unlock();
2960 drbd_md_sync(device);
2961 kref_put(&device->kref, drbd_destroy_device);
2962 rcu_read_lock();
2963 }
2964 rcu_read_unlock();
2965}
2966
2967/* aligned 4kByte */
2968struct meta_data_on_disk {
2969 u64 la_size_sect; /* last agreed size. */
2970 u64 uuid[UI_SIZE]; /* UUIDs. */
2971 u64 device_uuid;
2972 u64 reserved_u64_1;
2973 u32 flags; /* MDF */
2974 u32 magic;
2975 u32 md_size_sect;
2976 u32 al_offset; /* offset to this block */
2977 u32 al_nr_extents; /* important for restoring the AL (userspace) */
2978 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
2979 u32 bm_offset; /* offset to the bitmap, from here */
2980 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
2981 u32 la_peer_max_bio_size; /* last peer max_bio_size */
2982
2983 /* see al_tr_number_to_on_disk_sector() */
2984 u32 al_stripes;
2985 u32 al_stripe_size_4k;
2986
2987 u8 reserved_u8[4096 - (7*8 + 10*4)];
2988} __packed;
2989
2990
2991
2992void drbd_md_write(struct drbd_device *device, void *b)
2993{
2994 struct meta_data_on_disk *buffer = b;
2995 sector_t sector;
2996 int i;
2997
2998 memset(buffer, 0, sizeof(*buffer));
2999
3000 buffer->la_size_sect = cpu_to_be64(get_capacity(device->vdisk));
3001 for (i = UI_CURRENT; i < UI_SIZE; i++)
3002 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3003 buffer->flags = cpu_to_be32(device->ldev->md.flags);
3004 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
3005
3006 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3007 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3008 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3009 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3010 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3011
3012 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3013 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3014
3015 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3016 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3017
3018 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3019 sector = device->ldev->md.md_offset;
3020
3021 if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3022 /* this was a try anyways ... */
3023 drbd_err(device, "meta data update failed!\n");
3024 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3025 }
3026}
3027
3028/**
3029 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3030 * @device: DRBD device.
3031 */
3032void drbd_md_sync(struct drbd_device *device)
3033{
3034 struct meta_data_on_disk *buffer;
3035
3036 /* Don't accidentally change the DRBD meta data layout. */
3037 BUILD_BUG_ON(UI_SIZE != 4);
3038 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3039
3040 del_timer(&device->md_sync_timer);
3041 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3042 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3043 return;
3044
3045 /* We use here D_FAILED and not D_ATTACHING because we try to write
3046 * metadata even if we detach due to a disk failure! */
3047 if (!get_ldev_if_state(device, D_FAILED))
3048 return;
3049
3050 buffer = drbd_md_get_buffer(device, __func__);
3051 if (!buffer)
3052 goto out;
3053
3054 drbd_md_write(device, buffer);
3055
3056 /* Update device->ldev->md.la_size_sect,
3057 * since we updated it on metadata. */
3058 device->ldev->md.la_size_sect = get_capacity(device->vdisk);
3059
3060 drbd_md_put_buffer(device);
3061out:
3062 put_ldev(device);
3063}
3064
3065static int check_activity_log_stripe_size(struct drbd_device *device,
3066 struct meta_data_on_disk *on_disk,
3067 struct drbd_md *in_core)
3068{
3069 u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3070 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3071 u64 al_size_4k;
3072
3073 /* both not set: default to old fixed size activity log */
3074 if (al_stripes == 0 && al_stripe_size_4k == 0) {
3075 al_stripes = 1;
3076 al_stripe_size_4k = MD_32kB_SECT/8;
3077 }
3078
3079 /* some paranoia plausibility checks */
3080
3081 /* we need both values to be set */
3082 if (al_stripes == 0 || al_stripe_size_4k == 0)
3083 goto err;
3084
3085 al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3086
3087 /* Upper limit of activity log area, to avoid potential overflow
3088 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3089 * than 72 * 4k blocks total only increases the amount of history,
3090 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3091 if (al_size_4k > (16 * 1024 * 1024/4))
3092 goto err;
3093
3094 /* Lower limit: we need at least 8 transaction slots (32kB)
3095 * to not break existing setups */
3096 if (al_size_4k < MD_32kB_SECT/8)
3097 goto err;
3098
3099 in_core->al_stripe_size_4k = al_stripe_size_4k;
3100 in_core->al_stripes = al_stripes;
3101 in_core->al_size_4k = al_size_4k;
3102
3103 return 0;
3104err:
3105 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3106 al_stripes, al_stripe_size_4k);
3107 return -EINVAL;
3108}
3109
3110static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3111{
3112 sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3113 struct drbd_md *in_core = &bdev->md;
3114 s32 on_disk_al_sect;
3115 s32 on_disk_bm_sect;
3116
3117 /* The on-disk size of the activity log, calculated from offsets, and
3118 * the size of the activity log calculated from the stripe settings,
3119 * should match.
3120 * Though we could relax this a bit: it is ok, if the striped activity log
3121 * fits in the available on-disk activity log size.
3122 * Right now, that would break how resize is implemented.
3123 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3124 * of possible unused padding space in the on disk layout. */
3125 if (in_core->al_offset < 0) {
3126 if (in_core->bm_offset > in_core->al_offset)
3127 goto err;
3128 on_disk_al_sect = -in_core->al_offset;
3129 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3130 } else {
3131 if (in_core->al_offset != MD_4kB_SECT)
3132 goto err;
3133 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3134 goto err;
3135
3136 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3137 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3138 }
3139
3140 /* old fixed size meta data is exactly that: fixed. */
3141 if (in_core->meta_dev_idx >= 0) {
3142 if (in_core->md_size_sect != MD_128MB_SECT
3143 || in_core->al_offset != MD_4kB_SECT
3144 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3145 || in_core->al_stripes != 1
3146 || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3147 goto err;
3148 }
3149
3150 if (capacity < in_core->md_size_sect)
3151 goto err;
3152 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3153 goto err;
3154
3155 /* should be aligned, and at least 32k */
3156 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3157 goto err;
3158
3159 /* should fit (for now: exactly) into the available on-disk space;
3160 * overflow prevention is in check_activity_log_stripe_size() above. */
3161 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3162 goto err;
3163
3164 /* again, should be aligned */
3165 if (in_core->bm_offset & 7)
3166 goto err;
3167
3168 /* FIXME check for device grow with flex external meta data? */
3169
3170 /* can the available bitmap space cover the last agreed device size? */
3171 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3172 goto err;
3173
3174 return 0;
3175
3176err:
3177 drbd_err(device, "meta data offsets don't make sense: idx=%d "
3178 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3179 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3180 in_core->meta_dev_idx,
3181 in_core->al_stripes, in_core->al_stripe_size_4k,
3182 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3183 (unsigned long long)in_core->la_size_sect,
3184 (unsigned long long)capacity);
3185
3186 return -EINVAL;
3187}
3188
3189
3190/**
3191 * drbd_md_read() - Reads in the meta data super block
3192 * @device: DRBD device.
3193 * @bdev: Device from which the meta data should be read in.
3194 *
3195 * Return NO_ERROR on success, and an enum drbd_ret_code in case
3196 * something goes wrong.
3197 *
3198 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3199 * even before @bdev is assigned to @device->ldev.
3200 */
3201int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3202{
3203 struct meta_data_on_disk *buffer;
3204 u32 magic, flags;
3205 int i, rv = NO_ERROR;
3206
3207 if (device->state.disk != D_DISKLESS)
3208 return ERR_DISK_CONFIGURED;
3209
3210 buffer = drbd_md_get_buffer(device, __func__);
3211 if (!buffer)
3212 return ERR_NOMEM;
3213
3214 /* First, figure out where our meta data superblock is located,
3215 * and read it. */
3216 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3217 bdev->md.md_offset = drbd_md_ss(bdev);
3218 /* Even for (flexible or indexed) external meta data,
3219 * initially restrict us to the 4k superblock for now.
3220 * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
3221 bdev->md.md_size_sect = 8;
3222
3223 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3224 REQ_OP_READ)) {
3225 /* NOTE: can't do normal error processing here as this is
3226 called BEFORE disk is attached */
3227 drbd_err(device, "Error while reading metadata.\n");
3228 rv = ERR_IO_MD_DISK;
3229 goto err;
3230 }
3231
3232 magic = be32_to_cpu(buffer->magic);
3233 flags = be32_to_cpu(buffer->flags);
3234 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3235 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3236 /* btw: that's Activity Log clean, not "all" clean. */
3237 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3238 rv = ERR_MD_UNCLEAN;
3239 goto err;
3240 }
3241
3242 rv = ERR_MD_INVALID;
3243 if (magic != DRBD_MD_MAGIC_08) {
3244 if (magic == DRBD_MD_MAGIC_07)
3245 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3246 else
3247 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3248 goto err;
3249 }
3250
3251 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3252 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3253 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3254 goto err;
3255 }
3256
3257
3258 /* convert to in_core endian */
3259 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3260 for (i = UI_CURRENT; i < UI_SIZE; i++)
3261 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3262 bdev->md.flags = be32_to_cpu(buffer->flags);
3263 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3264
3265 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3266 bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3267 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3268
3269 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3270 goto err;
3271 if (check_offsets_and_sizes(device, bdev))
3272 goto err;
3273
3274 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3275 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3276 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3277 goto err;
3278 }
3279 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3280 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3281 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3282 goto err;
3283 }
3284
3285 rv = NO_ERROR;
3286
3287 spin_lock_irq(&device->resource->req_lock);
3288 if (device->state.conn < C_CONNECTED) {
3289 unsigned int peer;
3290 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3291 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3292 device->peer_max_bio_size = peer;
3293 }
3294 spin_unlock_irq(&device->resource->req_lock);
3295
3296 err:
3297 drbd_md_put_buffer(device);
3298
3299 return rv;
3300}
3301
3302/**
3303 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3304 * @device: DRBD device.
3305 *
3306 * Call this function if you change anything that should be written to
3307 * the meta-data super block. This function sets MD_DIRTY, and starts a
3308 * timer that ensures that within five seconds you have to call drbd_md_sync().
3309 */
3310void drbd_md_mark_dirty(struct drbd_device *device)
3311{
3312 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3313 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3314}
3315
3316void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3317{
3318 int i;
3319
3320 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3321 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3322}
3323
3324void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3325{
3326 if (idx == UI_CURRENT) {
3327 if (device->state.role == R_PRIMARY)
3328 val |= 1;
3329 else
3330 val &= ~((u64)1);
3331
3332 drbd_set_ed_uuid(device, val);
3333 }
3334
3335 device->ldev->md.uuid[idx] = val;
3336 drbd_md_mark_dirty(device);
3337}
3338
3339void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3340{
3341 unsigned long flags;
3342 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3343 __drbd_uuid_set(device, idx, val);
3344 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3345}
3346
3347void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3348{
3349 unsigned long flags;
3350 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3351 if (device->ldev->md.uuid[idx]) {
3352 drbd_uuid_move_history(device);
3353 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3354 }
3355 __drbd_uuid_set(device, idx, val);
3356 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3357}
3358
3359/**
3360 * drbd_uuid_new_current() - Creates a new current UUID
3361 * @device: DRBD device.
3362 *
3363 * Creates a new current UUID, and rotates the old current UUID into
3364 * the bitmap slot. Causes an incremental resync upon next connect.
3365 */
3366void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3367{
3368 u64 val;
3369 unsigned long long bm_uuid;
3370
3371 get_random_bytes(&val, sizeof(u64));
3372
3373 spin_lock_irq(&device->ldev->md.uuid_lock);
3374 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3375
3376 if (bm_uuid)
3377 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3378
3379 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3380 __drbd_uuid_set(device, UI_CURRENT, val);
3381 spin_unlock_irq(&device->ldev->md.uuid_lock);
3382
3383 drbd_print_uuids(device, "new current UUID");
3384 /* get it to stable storage _now_ */
3385 drbd_md_sync(device);
3386}
3387
3388void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3389{
3390 unsigned long flags;
3391 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3392 return;
3393
3394 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3395 if (val == 0) {
3396 drbd_uuid_move_history(device);
3397 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3398 device->ldev->md.uuid[UI_BITMAP] = 0;
3399 } else {
3400 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3401 if (bm_uuid)
3402 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3403
3404 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3405 }
3406 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3407
3408 drbd_md_mark_dirty(device);
3409}
3410
3411/**
3412 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3413 * @device: DRBD device.
3414 *
3415 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3416 */
3417int drbd_bmio_set_n_write(struct drbd_device *device,
3418 struct drbd_peer_device *peer_device) __must_hold(local)
3419
3420{
3421 int rv = -EIO;
3422
3423 drbd_md_set_flag(device, MDF_FULL_SYNC);
3424 drbd_md_sync(device);
3425 drbd_bm_set_all(device);
3426
3427 rv = drbd_bm_write(device, peer_device);
3428
3429 if (!rv) {
3430 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3431 drbd_md_sync(device);
3432 }
3433
3434 return rv;
3435}
3436
3437/**
3438 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3439 * @device: DRBD device.
3440 *
3441 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3442 */
3443int drbd_bmio_clear_n_write(struct drbd_device *device,
3444 struct drbd_peer_device *peer_device) __must_hold(local)
3445
3446{
3447 drbd_resume_al(device);
3448 drbd_bm_clear_all(device);
3449 return drbd_bm_write(device, peer_device);
3450}
3451
3452static int w_bitmap_io(struct drbd_work *w, int unused)
3453{
3454 struct drbd_device *device =
3455 container_of(w, struct drbd_device, bm_io_work.w);
3456 struct bm_io_work *work = &device->bm_io_work;
3457 int rv = -EIO;
3458
3459 if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
3460 int cnt = atomic_read(&device->ap_bio_cnt);
3461 if (cnt)
3462 drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3463 cnt, work->why);
3464 }
3465
3466 if (get_ldev(device)) {
3467 drbd_bm_lock(device, work->why, work->flags);
3468 rv = work->io_fn(device, work->peer_device);
3469 drbd_bm_unlock(device);
3470 put_ldev(device);
3471 }
3472
3473 clear_bit_unlock(BITMAP_IO, &device->flags);
3474 wake_up(&device->misc_wait);
3475
3476 if (work->done)
3477 work->done(device, rv);
3478
3479 clear_bit(BITMAP_IO_QUEUED, &device->flags);
3480 work->why = NULL;
3481 work->flags = 0;
3482
3483 return 0;
3484}
3485
3486/**
3487 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3488 * @device: DRBD device.
3489 * @io_fn: IO callback to be called when bitmap IO is possible
3490 * @done: callback to be called after the bitmap IO was performed
3491 * @why: Descriptive text of the reason for doing the IO
3492 * @flags: Bitmap flags
3493 *
3494 * While IO on the bitmap happens we freeze application IO thus we ensure
3495 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3496 * called from worker context. It MUST NOT be used while a previous such
3497 * work is still pending!
3498 *
3499 * Its worker function encloses the call of io_fn() by get_ldev() and
3500 * put_ldev().
3501 */
3502void drbd_queue_bitmap_io(struct drbd_device *device,
3503 int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
3504 void (*done)(struct drbd_device *, int),
3505 char *why, enum bm_flag flags,
3506 struct drbd_peer_device *peer_device)
3507{
3508 D_ASSERT(device, current == peer_device->connection->worker.task);
3509
3510 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3511 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3512 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3513 if (device->bm_io_work.why)
3514 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3515 why, device->bm_io_work.why);
3516
3517 device->bm_io_work.peer_device = peer_device;
3518 device->bm_io_work.io_fn = io_fn;
3519 device->bm_io_work.done = done;
3520 device->bm_io_work.why = why;
3521 device->bm_io_work.flags = flags;
3522
3523 spin_lock_irq(&device->resource->req_lock);
3524 set_bit(BITMAP_IO, &device->flags);
3525 /* don't wait for pending application IO if the caller indicates that
3526 * application IO does not conflict anyways. */
3527 if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3528 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3529 drbd_queue_work(&peer_device->connection->sender_work,
3530 &device->bm_io_work.w);
3531 }
3532 spin_unlock_irq(&device->resource->req_lock);
3533}
3534
3535/**
3536 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3537 * @device: DRBD device.
3538 * @io_fn: IO callback to be called when bitmap IO is possible
3539 * @why: Descriptive text of the reason for doing the IO
3540 * @flags: Bitmap flags
3541 *
3542 * freezes application IO while that the actual IO operations runs. This
3543 * functions MAY NOT be called from worker context.
3544 */
3545int drbd_bitmap_io(struct drbd_device *device,
3546 int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
3547 char *why, enum bm_flag flags,
3548 struct drbd_peer_device *peer_device)
3549{
3550 /* Only suspend io, if some operation is supposed to be locked out */
3551 const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
3552 int rv;
3553
3554 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3555
3556 if (do_suspend_io)
3557 drbd_suspend_io(device);
3558
3559 drbd_bm_lock(device, why, flags);
3560 rv = io_fn(device, peer_device);
3561 drbd_bm_unlock(device);
3562
3563 if (do_suspend_io)
3564 drbd_resume_io(device);
3565
3566 return rv;
3567}
3568
3569void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3570{
3571 if ((device->ldev->md.flags & flag) != flag) {
3572 drbd_md_mark_dirty(device);
3573 device->ldev->md.flags |= flag;
3574 }
3575}
3576
3577void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3578{
3579 if ((device->ldev->md.flags & flag) != 0) {
3580 drbd_md_mark_dirty(device);
3581 device->ldev->md.flags &= ~flag;
3582 }
3583}
3584int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3585{
3586 return (bdev->md.flags & flag) != 0;
3587}
3588
3589static void md_sync_timer_fn(struct timer_list *t)
3590{
3591 struct drbd_device *device = from_timer(device, t, md_sync_timer);
3592 drbd_device_post_work(device, MD_SYNC);
3593}
3594
3595const char *cmdname(enum drbd_packet cmd)
3596{
3597 /* THINK may need to become several global tables
3598 * when we want to support more than
3599 * one PRO_VERSION */
3600 static const char *cmdnames[] = {
3601
3602 [P_DATA] = "Data",
3603 [P_DATA_REPLY] = "DataReply",
3604 [P_RS_DATA_REPLY] = "RSDataReply",
3605 [P_BARRIER] = "Barrier",
3606 [P_BITMAP] = "ReportBitMap",
3607 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3608 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3609 [P_UNPLUG_REMOTE] = "UnplugRemote",
3610 [P_DATA_REQUEST] = "DataRequest",
3611 [P_RS_DATA_REQUEST] = "RSDataRequest",
3612 [P_SYNC_PARAM] = "SyncParam",
3613 [P_PROTOCOL] = "ReportProtocol",
3614 [P_UUIDS] = "ReportUUIDs",
3615 [P_SIZES] = "ReportSizes",
3616 [P_STATE] = "ReportState",
3617 [P_SYNC_UUID] = "ReportSyncUUID",
3618 [P_AUTH_CHALLENGE] = "AuthChallenge",
3619 [P_AUTH_RESPONSE] = "AuthResponse",
3620 [P_STATE_CHG_REQ] = "StateChgRequest",
3621 [P_PING] = "Ping",
3622 [P_PING_ACK] = "PingAck",
3623 [P_RECV_ACK] = "RecvAck",
3624 [P_WRITE_ACK] = "WriteAck",
3625 [P_RS_WRITE_ACK] = "RSWriteAck",
3626 [P_SUPERSEDED] = "Superseded",
3627 [P_NEG_ACK] = "NegAck",
3628 [P_NEG_DREPLY] = "NegDReply",
3629 [P_NEG_RS_DREPLY] = "NegRSDReply",
3630 [P_BARRIER_ACK] = "BarrierAck",
3631 [P_STATE_CHG_REPLY] = "StateChgReply",
3632 [P_OV_REQUEST] = "OVRequest",
3633 [P_OV_REPLY] = "OVReply",
3634 [P_OV_RESULT] = "OVResult",
3635 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3636 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3637 [P_SYNC_PARAM89] = "SyncParam89",
3638 [P_COMPRESSED_BITMAP] = "CBitmap",
3639 [P_DELAY_PROBE] = "DelayProbe",
3640 [P_OUT_OF_SYNC] = "OutOfSync",
3641 [P_RS_CANCEL] = "RSCancel",
3642 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3643 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
3644 [P_PROTOCOL_UPDATE] = "protocol_update",
3645 [P_TRIM] = "Trim",
3646 [P_RS_THIN_REQ] = "rs_thin_req",
3647 [P_RS_DEALLOCATED] = "rs_deallocated",
3648 [P_WSAME] = "WriteSame",
3649 [P_ZEROES] = "Zeroes",
3650
3651 /* enum drbd_packet, but not commands - obsoleted flags:
3652 * P_MAY_IGNORE
3653 * P_MAX_OPT_CMD
3654 */
3655 };
3656
3657 /* too big for the array: 0xfffX */
3658 if (cmd == P_INITIAL_META)
3659 return "InitialMeta";
3660 if (cmd == P_INITIAL_DATA)
3661 return "InitialData";
3662 if (cmd == P_CONNECTION_FEATURES)
3663 return "ConnectionFeatures";
3664 if (cmd >= ARRAY_SIZE(cmdnames))
3665 return "Unknown";
3666 return cmdnames[cmd];
3667}
3668
3669/**
3670 * drbd_wait_misc - wait for a request to make progress
3671 * @device: device associated with the request
3672 * @i: the struct drbd_interval embedded in struct drbd_request or
3673 * struct drbd_peer_request
3674 */
3675int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3676{
3677 struct net_conf *nc;
3678 DEFINE_WAIT(wait);
3679 long timeout;
3680
3681 rcu_read_lock();
3682 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3683 if (!nc) {
3684 rcu_read_unlock();
3685 return -ETIMEDOUT;
3686 }
3687 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3688 rcu_read_unlock();
3689
3690 /* Indicate to wake up device->misc_wait on progress. */
3691 i->waiting = true;
3692 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3693 spin_unlock_irq(&device->resource->req_lock);
3694 timeout = schedule_timeout(timeout);
3695 finish_wait(&device->misc_wait, &wait);
3696 spin_lock_irq(&device->resource->req_lock);
3697 if (!timeout || device->state.conn < C_CONNECTED)
3698 return -ETIMEDOUT;
3699 if (signal_pending(current))
3700 return -ERESTARTSYS;
3701 return 0;
3702}
3703
3704void lock_all_resources(void)
3705{
3706 struct drbd_resource *resource;
3707 int __maybe_unused i = 0;
3708
3709 mutex_lock(&resources_mutex);
3710 local_irq_disable();
3711 for_each_resource(resource, &drbd_resources)
3712 spin_lock_nested(&resource->req_lock, i++);
3713}
3714
3715void unlock_all_resources(void)
3716{
3717 struct drbd_resource *resource;
3718
3719 for_each_resource(resource, &drbd_resources)
3720 spin_unlock(&resource->req_lock);
3721 local_irq_enable();
3722 mutex_unlock(&resources_mutex);
3723}
3724
3725#ifdef CONFIG_DRBD_FAULT_INJECTION
3726/* Fault insertion support including random number generator shamelessly
3727 * stolen from kernel/rcutorture.c */
3728struct fault_random_state {
3729 unsigned long state;
3730 unsigned long count;
3731};
3732
3733#define FAULT_RANDOM_MULT 39916801 /* prime */
3734#define FAULT_RANDOM_ADD 479001701 /* prime */
3735#define FAULT_RANDOM_REFRESH 10000
3736
3737/*
3738 * Crude but fast random-number generator. Uses a linear congruential
3739 * generator, with occasional help from get_random_bytes().
3740 */
3741static unsigned long
3742_drbd_fault_random(struct fault_random_state *rsp)
3743{
3744 long refresh;
3745
3746 if (!rsp->count--) {
3747 get_random_bytes(&refresh, sizeof(refresh));
3748 rsp->state += refresh;
3749 rsp->count = FAULT_RANDOM_REFRESH;
3750 }
3751 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3752 return swahw32(rsp->state);
3753}
3754
3755static char *
3756_drbd_fault_str(unsigned int type) {
3757 static char *_faults[] = {
3758 [DRBD_FAULT_MD_WR] = "Meta-data write",
3759 [DRBD_FAULT_MD_RD] = "Meta-data read",
3760 [DRBD_FAULT_RS_WR] = "Resync write",
3761 [DRBD_FAULT_RS_RD] = "Resync read",
3762 [DRBD_FAULT_DT_WR] = "Data write",
3763 [DRBD_FAULT_DT_RD] = "Data read",
3764 [DRBD_FAULT_DT_RA] = "Data read ahead",
3765 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3766 [DRBD_FAULT_AL_EE] = "EE allocation",
3767 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3768 };
3769
3770 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3771}
3772
3773unsigned int
3774_drbd_insert_fault(struct drbd_device *device, unsigned int type)
3775{
3776 static struct fault_random_state rrs = {0, 0};
3777
3778 unsigned int ret = (
3779 (drbd_fault_devs == 0 ||
3780 ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
3781 (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
3782
3783 if (ret) {
3784 drbd_fault_count++;
3785
3786 if (drbd_ratelimit())
3787 drbd_warn(device, "***Simulating %s failure\n",
3788 _drbd_fault_str(type));
3789 }
3790
3791 return ret;
3792}
3793#endif
3794
3795module_init(drbd_init)
3796module_exit(drbd_cleanup)
3797
3798EXPORT_SYMBOL(drbd_conn_str);
3799EXPORT_SYMBOL(drbd_role_str);
3800EXPORT_SYMBOL(drbd_disk_str);
3801EXPORT_SYMBOL(drbd_set_st_err_str);