Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/module.h>
32#include <linux/jiffies.h>
33#include <linux/drbd.h>
34#include <linux/uaccess.h>
35#include <asm/types.h>
36#include <net/sock.h>
37#include <linux/ctype.h>
38#include <linux/mutex.h>
39#include <linux/fs.h>
40#include <linux/file.h>
41#include <linux/proc_fs.h>
42#include <linux/init.h>
43#include <linux/mm.h>
44#include <linux/memcontrol.h>
45#include <linux/mm_inline.h>
46#include <linux/slab.h>
47#include <linux/random.h>
48#include <linux/reboot.h>
49#include <linux/notifier.h>
50#include <linux/kthread.h>
51#include <linux/workqueue.h>
52#define __KERNEL_SYSCALLS__
53#include <linux/unistd.h>
54#include <linux/vmalloc.h>
55#include <linux/sched/signal.h>
56
57#include <linux/drbd_limits.h>
58#include "drbd_int.h"
59#include "drbd_protocol.h"
60#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
61#include "drbd_vli.h"
62#include "drbd_debugfs.h"
63
64static DEFINE_MUTEX(drbd_main_mutex);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static void drbd_release(struct gendisk *gd, fmode_t mode);
67static void md_sync_timer_fn(unsigned long data);
68static int w_bitmap_io(struct drbd_work *w, int unused);
69
70MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
71 "Lars Ellenberg <lars@linbit.com>");
72MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
73MODULE_VERSION(REL_VERSION);
74MODULE_LICENSE("GPL");
75MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
76 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
77MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
78
79#include <linux/moduleparam.h>
80/* allow_open_on_secondary */
81MODULE_PARM_DESC(allow_oos, "DONT USE!");
82/* thanks to these macros, if compiled into the kernel (not-module),
83 * this becomes the boot parameter drbd.minor_count */
84module_param(minor_count, uint, 0444);
85module_param(disable_sendpage, bool, 0644);
86module_param(allow_oos, bool, 0);
87module_param(proc_details, int, 0644);
88
89#ifdef CONFIG_DRBD_FAULT_INJECTION
90int enable_faults;
91int fault_rate;
92static int fault_count;
93int fault_devs;
94/* bitmap of enabled faults */
95module_param(enable_faults, int, 0664);
96/* fault rate % value - applies to all enabled faults */
97module_param(fault_rate, int, 0664);
98/* count of faults inserted */
99module_param(fault_count, int, 0664);
100/* bitmap of devices to insert faults on */
101module_param(fault_devs, int, 0644);
102#endif
103
104/* module parameter, defined */
105unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
106bool disable_sendpage;
107bool allow_oos;
108int proc_details; /* Detail level in proc drbd*/
109
110/* Module parameter for setting the user mode helper program
111 * to run. Default is /sbin/drbdadm */
112char usermode_helper[80] = "/sbin/drbdadm";
113
114module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
115
116/* in 2.6.x, our device mapping and config info contains our virtual gendisks
117 * as member "struct gendisk *vdisk;"
118 */
119struct idr drbd_devices;
120struct list_head drbd_resources;
121struct mutex resources_mutex;
122
123struct kmem_cache *drbd_request_cache;
124struct kmem_cache *drbd_ee_cache; /* peer requests */
125struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
126struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
127mempool_t *drbd_request_mempool;
128mempool_t *drbd_ee_mempool;
129mempool_t *drbd_md_io_page_pool;
130struct bio_set *drbd_md_io_bio_set;
131
132/* I do not use a standard mempool, because:
133 1) I want to hand out the pre-allocated objects first.
134 2) I want to be able to interrupt sleeping allocation with a signal.
135 Note: This is a single linked list, the next pointer is the private
136 member of struct page.
137 */
138struct page *drbd_pp_pool;
139spinlock_t drbd_pp_lock;
140int drbd_pp_vacant;
141wait_queue_head_t drbd_pp_wait;
142
143DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
144
145static const struct block_device_operations drbd_ops = {
146 .owner = THIS_MODULE,
147 .open = drbd_open,
148 .release = drbd_release,
149};
150
151struct bio *bio_alloc_drbd(gfp_t gfp_mask)
152{
153 struct bio *bio;
154
155 if (!drbd_md_io_bio_set)
156 return bio_alloc(gfp_mask, 1);
157
158 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
159 if (!bio)
160 return NULL;
161 return bio;
162}
163
164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
169{
170 int io_allowed;
171
172 atomic_inc(&device->local_cnt);
173 io_allowed = (device->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&device->local_cnt))
176 wake_up(&device->misc_wait);
177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
184 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
185 * @connection: DRBD connection.
186 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
187 * @set_size: Expected number of requests before that barrier.
188 *
189 * In case the passed barrier_nr or set_size does not match the oldest
190 * epoch of not yet barrier-acked requests, this function will cause a
191 * termination of the connection.
192 */
193void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
194 unsigned int set_size)
195{
196 struct drbd_request *r;
197 struct drbd_request *req = NULL;
198 int expect_epoch = 0;
199 int expect_size = 0;
200
201 spin_lock_irq(&connection->resource->req_lock);
202
203 /* find oldest not yet barrier-acked write request,
204 * count writes in its epoch. */
205 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
206 const unsigned s = r->rq_state;
207 if (!req) {
208 if (!(s & RQ_WRITE))
209 continue;
210 if (!(s & RQ_NET_MASK))
211 continue;
212 if (s & RQ_NET_DONE)
213 continue;
214 req = r;
215 expect_epoch = req->epoch;
216 expect_size ++;
217 } else {
218 if (r->epoch != expect_epoch)
219 break;
220 if (!(s & RQ_WRITE))
221 continue;
222 /* if (s & RQ_DONE): not expected */
223 /* if (!(s & RQ_NET_MASK)): not expected */
224 expect_size++;
225 }
226 }
227
228 /* first some paranoia code */
229 if (req == NULL) {
230 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
231 barrier_nr);
232 goto bail;
233 }
234 if (expect_epoch != barrier_nr) {
235 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
236 barrier_nr, expect_epoch);
237 goto bail;
238 }
239
240 if (expect_size != set_size) {
241 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
242 barrier_nr, set_size, expect_size);
243 goto bail;
244 }
245
246 /* Clean up list of requests processed during current epoch. */
247 /* this extra list walk restart is paranoia,
248 * to catch requests being barrier-acked "unexpectedly".
249 * It usually should find the same req again, or some READ preceding it. */
250 list_for_each_entry(req, &connection->transfer_log, tl_requests)
251 if (req->epoch == expect_epoch)
252 break;
253 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
254 if (req->epoch != expect_epoch)
255 break;
256 _req_mod(req, BARRIER_ACKED);
257 }
258 spin_unlock_irq(&connection->resource->req_lock);
259
260 return;
261
262bail:
263 spin_unlock_irq(&connection->resource->req_lock);
264 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
265}
266
267
268/**
269 * _tl_restart() - Walks the transfer log, and applies an action to all requests
270 * @connection: DRBD connection to operate on.
271 * @what: The action/event to perform with all request objects
272 *
273 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
274 * RESTART_FROZEN_DISK_IO.
275 */
276/* must hold resource->req_lock */
277void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
278{
279 struct drbd_request *req, *r;
280
281 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
282 _req_mod(req, what);
283}
284
285void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
286{
287 spin_lock_irq(&connection->resource->req_lock);
288 _tl_restart(connection, what);
289 spin_unlock_irq(&connection->resource->req_lock);
290}
291
292/**
293 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
294 * @device: DRBD device.
295 *
296 * This is called after the connection to the peer was lost. The storage covered
297 * by the requests on the transfer gets marked as our of sync. Called from the
298 * receiver thread and the worker thread.
299 */
300void tl_clear(struct drbd_connection *connection)
301{
302 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
303}
304
305/**
306 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
307 * @device: DRBD device.
308 */
309void tl_abort_disk_io(struct drbd_device *device)
310{
311 struct drbd_connection *connection = first_peer_device(device)->connection;
312 struct drbd_request *req, *r;
313
314 spin_lock_irq(&connection->resource->req_lock);
315 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
316 if (!(req->rq_state & RQ_LOCAL_PENDING))
317 continue;
318 if (req->device != device)
319 continue;
320 _req_mod(req, ABORT_DISK_IO);
321 }
322 spin_unlock_irq(&connection->resource->req_lock);
323}
324
325static int drbd_thread_setup(void *arg)
326{
327 struct drbd_thread *thi = (struct drbd_thread *) arg;
328 struct drbd_resource *resource = thi->resource;
329 unsigned long flags;
330 int retval;
331
332 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
333 thi->name[0],
334 resource->name);
335
336restart:
337 retval = thi->function(thi);
338
339 spin_lock_irqsave(&thi->t_lock, flags);
340
341 /* if the receiver has been "EXITING", the last thing it did
342 * was set the conn state to "StandAlone",
343 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
344 * and receiver thread will be "started".
345 * drbd_thread_start needs to set "RESTARTING" in that case.
346 * t_state check and assignment needs to be within the same spinlock,
347 * so either thread_start sees EXITING, and can remap to RESTARTING,
348 * or thread_start see NONE, and can proceed as normal.
349 */
350
351 if (thi->t_state == RESTARTING) {
352 drbd_info(resource, "Restarting %s thread\n", thi->name);
353 thi->t_state = RUNNING;
354 spin_unlock_irqrestore(&thi->t_lock, flags);
355 goto restart;
356 }
357
358 thi->task = NULL;
359 thi->t_state = NONE;
360 smp_mb();
361 complete_all(&thi->stop);
362 spin_unlock_irqrestore(&thi->t_lock, flags);
363
364 drbd_info(resource, "Terminating %s\n", current->comm);
365
366 /* Release mod reference taken when thread was started */
367
368 if (thi->connection)
369 kref_put(&thi->connection->kref, drbd_destroy_connection);
370 kref_put(&resource->kref, drbd_destroy_resource);
371 module_put(THIS_MODULE);
372 return retval;
373}
374
375static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
376 int (*func) (struct drbd_thread *), const char *name)
377{
378 spin_lock_init(&thi->t_lock);
379 thi->task = NULL;
380 thi->t_state = NONE;
381 thi->function = func;
382 thi->resource = resource;
383 thi->connection = NULL;
384 thi->name = name;
385}
386
387int drbd_thread_start(struct drbd_thread *thi)
388{
389 struct drbd_resource *resource = thi->resource;
390 struct task_struct *nt;
391 unsigned long flags;
392
393 /* is used from state engine doing drbd_thread_stop_nowait,
394 * while holding the req lock irqsave */
395 spin_lock_irqsave(&thi->t_lock, flags);
396
397 switch (thi->t_state) {
398 case NONE:
399 drbd_info(resource, "Starting %s thread (from %s [%d])\n",
400 thi->name, current->comm, current->pid);
401
402 /* Get ref on module for thread - this is released when thread exits */
403 if (!try_module_get(THIS_MODULE)) {
404 drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
405 spin_unlock_irqrestore(&thi->t_lock, flags);
406 return false;
407 }
408
409 kref_get(&resource->kref);
410 if (thi->connection)
411 kref_get(&thi->connection->kref);
412
413 init_completion(&thi->stop);
414 thi->reset_cpu_mask = 1;
415 thi->t_state = RUNNING;
416 spin_unlock_irqrestore(&thi->t_lock, flags);
417 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
418
419 nt = kthread_create(drbd_thread_setup, (void *) thi,
420 "drbd_%c_%s", thi->name[0], thi->resource->name);
421
422 if (IS_ERR(nt)) {
423 drbd_err(resource, "Couldn't start thread\n");
424
425 if (thi->connection)
426 kref_put(&thi->connection->kref, drbd_destroy_connection);
427 kref_put(&resource->kref, drbd_destroy_resource);
428 module_put(THIS_MODULE);
429 return false;
430 }
431 spin_lock_irqsave(&thi->t_lock, flags);
432 thi->task = nt;
433 thi->t_state = RUNNING;
434 spin_unlock_irqrestore(&thi->t_lock, flags);
435 wake_up_process(nt);
436 break;
437 case EXITING:
438 thi->t_state = RESTARTING;
439 drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
440 thi->name, current->comm, current->pid);
441 /* fall through */
442 case RUNNING:
443 case RESTARTING:
444 default:
445 spin_unlock_irqrestore(&thi->t_lock, flags);
446 break;
447 }
448
449 return true;
450}
451
452
453void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
454{
455 unsigned long flags;
456
457 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
458
459 /* may be called from state engine, holding the req lock irqsave */
460 spin_lock_irqsave(&thi->t_lock, flags);
461
462 if (thi->t_state == NONE) {
463 spin_unlock_irqrestore(&thi->t_lock, flags);
464 if (restart)
465 drbd_thread_start(thi);
466 return;
467 }
468
469 if (thi->t_state != ns) {
470 if (thi->task == NULL) {
471 spin_unlock_irqrestore(&thi->t_lock, flags);
472 return;
473 }
474
475 thi->t_state = ns;
476 smp_mb();
477 init_completion(&thi->stop);
478 if (thi->task != current)
479 force_sig(DRBD_SIGKILL, thi->task);
480 }
481
482 spin_unlock_irqrestore(&thi->t_lock, flags);
483
484 if (wait)
485 wait_for_completion(&thi->stop);
486}
487
488int conn_lowest_minor(struct drbd_connection *connection)
489{
490 struct drbd_peer_device *peer_device;
491 int vnr = 0, minor = -1;
492
493 rcu_read_lock();
494 peer_device = idr_get_next(&connection->peer_devices, &vnr);
495 if (peer_device)
496 minor = device_to_minor(peer_device->device);
497 rcu_read_unlock();
498
499 return minor;
500}
501
502#ifdef CONFIG_SMP
503/**
504 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
505 *
506 * Forces all threads of a resource onto the same CPU. This is beneficial for
507 * DRBD's performance. May be overwritten by user's configuration.
508 */
509static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
510{
511 unsigned int *resources_per_cpu, min_index = ~0;
512
513 resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL);
514 if (resources_per_cpu) {
515 struct drbd_resource *resource;
516 unsigned int cpu, min = ~0;
517
518 rcu_read_lock();
519 for_each_resource_rcu(resource, &drbd_resources) {
520 for_each_cpu(cpu, resource->cpu_mask)
521 resources_per_cpu[cpu]++;
522 }
523 rcu_read_unlock();
524 for_each_online_cpu(cpu) {
525 if (resources_per_cpu[cpu] < min) {
526 min = resources_per_cpu[cpu];
527 min_index = cpu;
528 }
529 }
530 kfree(resources_per_cpu);
531 }
532 if (min_index == ~0) {
533 cpumask_setall(*cpu_mask);
534 return;
535 }
536 cpumask_set_cpu(min_index, *cpu_mask);
537}
538
539/**
540 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
541 * @device: DRBD device.
542 * @thi: drbd_thread object
543 *
544 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
545 * prematurely.
546 */
547void drbd_thread_current_set_cpu(struct drbd_thread *thi)
548{
549 struct drbd_resource *resource = thi->resource;
550 struct task_struct *p = current;
551
552 if (!thi->reset_cpu_mask)
553 return;
554 thi->reset_cpu_mask = 0;
555 set_cpus_allowed_ptr(p, resource->cpu_mask);
556}
557#else
558#define drbd_calc_cpu_mask(A) ({})
559#endif
560
561/**
562 * drbd_header_size - size of a packet header
563 *
564 * The header size is a multiple of 8, so any payload following the header is
565 * word aligned on 64-bit architectures. (The bitmap send and receive code
566 * relies on this.)
567 */
568unsigned int drbd_header_size(struct drbd_connection *connection)
569{
570 if (connection->agreed_pro_version >= 100) {
571 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
572 return sizeof(struct p_header100);
573 } else {
574 BUILD_BUG_ON(sizeof(struct p_header80) !=
575 sizeof(struct p_header95));
576 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
577 return sizeof(struct p_header80);
578 }
579}
580
581static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
582{
583 h->magic = cpu_to_be32(DRBD_MAGIC);
584 h->command = cpu_to_be16(cmd);
585 h->length = cpu_to_be16(size);
586 return sizeof(struct p_header80);
587}
588
589static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
590{
591 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
592 h->command = cpu_to_be16(cmd);
593 h->length = cpu_to_be32(size);
594 return sizeof(struct p_header95);
595}
596
597static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
598 int size, int vnr)
599{
600 h->magic = cpu_to_be32(DRBD_MAGIC_100);
601 h->volume = cpu_to_be16(vnr);
602 h->command = cpu_to_be16(cmd);
603 h->length = cpu_to_be32(size);
604 h->pad = 0;
605 return sizeof(struct p_header100);
606}
607
608static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
609 void *buffer, enum drbd_packet cmd, int size)
610{
611 if (connection->agreed_pro_version >= 100)
612 return prepare_header100(buffer, cmd, size, vnr);
613 else if (connection->agreed_pro_version >= 95 &&
614 size > DRBD_MAX_SIZE_H80_PACKET)
615 return prepare_header95(buffer, cmd, size);
616 else
617 return prepare_header80(buffer, cmd, size);
618}
619
620static void *__conn_prepare_command(struct drbd_connection *connection,
621 struct drbd_socket *sock)
622{
623 if (!sock->socket)
624 return NULL;
625 return sock->sbuf + drbd_header_size(connection);
626}
627
628void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
629{
630 void *p;
631
632 mutex_lock(&sock->mutex);
633 p = __conn_prepare_command(connection, sock);
634 if (!p)
635 mutex_unlock(&sock->mutex);
636
637 return p;
638}
639
640void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
641{
642 return conn_prepare_command(peer_device->connection, sock);
643}
644
645static int __send_command(struct drbd_connection *connection, int vnr,
646 struct drbd_socket *sock, enum drbd_packet cmd,
647 unsigned int header_size, void *data,
648 unsigned int size)
649{
650 int msg_flags;
651 int err;
652
653 /*
654 * Called with @data == NULL and the size of the data blocks in @size
655 * for commands that send data blocks. For those commands, omit the
656 * MSG_MORE flag: this will increase the likelihood that data blocks
657 * which are page aligned on the sender will end up page aligned on the
658 * receiver.
659 */
660 msg_flags = data ? MSG_MORE : 0;
661
662 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
663 header_size + size);
664 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
665 msg_flags);
666 if (data && !err)
667 err = drbd_send_all(connection, sock->socket, data, size, 0);
668 /* DRBD protocol "pings" are latency critical.
669 * This is supposed to trigger tcp_push_pending_frames() */
670 if (!err && (cmd == P_PING || cmd == P_PING_ACK))
671 drbd_tcp_nodelay(sock->socket);
672
673 return err;
674}
675
676static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
677 enum drbd_packet cmd, unsigned int header_size,
678 void *data, unsigned int size)
679{
680 return __send_command(connection, 0, sock, cmd, header_size, data, size);
681}
682
683int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
684 enum drbd_packet cmd, unsigned int header_size,
685 void *data, unsigned int size)
686{
687 int err;
688
689 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
690 mutex_unlock(&sock->mutex);
691 return err;
692}
693
694int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
695 enum drbd_packet cmd, unsigned int header_size,
696 void *data, unsigned int size)
697{
698 int err;
699
700 err = __send_command(peer_device->connection, peer_device->device->vnr,
701 sock, cmd, header_size, data, size);
702 mutex_unlock(&sock->mutex);
703 return err;
704}
705
706int drbd_send_ping(struct drbd_connection *connection)
707{
708 struct drbd_socket *sock;
709
710 sock = &connection->meta;
711 if (!conn_prepare_command(connection, sock))
712 return -EIO;
713 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
714}
715
716int drbd_send_ping_ack(struct drbd_connection *connection)
717{
718 struct drbd_socket *sock;
719
720 sock = &connection->meta;
721 if (!conn_prepare_command(connection, sock))
722 return -EIO;
723 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
724}
725
726int drbd_send_sync_param(struct drbd_peer_device *peer_device)
727{
728 struct drbd_socket *sock;
729 struct p_rs_param_95 *p;
730 int size;
731 const int apv = peer_device->connection->agreed_pro_version;
732 enum drbd_packet cmd;
733 struct net_conf *nc;
734 struct disk_conf *dc;
735
736 sock = &peer_device->connection->data;
737 p = drbd_prepare_command(peer_device, sock);
738 if (!p)
739 return -EIO;
740
741 rcu_read_lock();
742 nc = rcu_dereference(peer_device->connection->net_conf);
743
744 size = apv <= 87 ? sizeof(struct p_rs_param)
745 : apv == 88 ? sizeof(struct p_rs_param)
746 + strlen(nc->verify_alg) + 1
747 : apv <= 94 ? sizeof(struct p_rs_param_89)
748 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
749
750 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
751
752 /* initialize verify_alg and csums_alg */
753 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
754
755 if (get_ldev(peer_device->device)) {
756 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
757 p->resync_rate = cpu_to_be32(dc->resync_rate);
758 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
759 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
760 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
761 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
762 put_ldev(peer_device->device);
763 } else {
764 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
765 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
766 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
767 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
768 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
769 }
770
771 if (apv >= 88)
772 strcpy(p->verify_alg, nc->verify_alg);
773 if (apv >= 89)
774 strcpy(p->csums_alg, nc->csums_alg);
775 rcu_read_unlock();
776
777 return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
778}
779
780int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
781{
782 struct drbd_socket *sock;
783 struct p_protocol *p;
784 struct net_conf *nc;
785 int size, cf;
786
787 sock = &connection->data;
788 p = __conn_prepare_command(connection, sock);
789 if (!p)
790 return -EIO;
791
792 rcu_read_lock();
793 nc = rcu_dereference(connection->net_conf);
794
795 if (nc->tentative && connection->agreed_pro_version < 92) {
796 rcu_read_unlock();
797 mutex_unlock(&sock->mutex);
798 drbd_err(connection, "--dry-run is not supported by peer");
799 return -EOPNOTSUPP;
800 }
801
802 size = sizeof(*p);
803 if (connection->agreed_pro_version >= 87)
804 size += strlen(nc->integrity_alg) + 1;
805
806 p->protocol = cpu_to_be32(nc->wire_protocol);
807 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
808 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
809 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
810 p->two_primaries = cpu_to_be32(nc->two_primaries);
811 cf = 0;
812 if (nc->discard_my_data)
813 cf |= CF_DISCARD_MY_DATA;
814 if (nc->tentative)
815 cf |= CF_DRY_RUN;
816 p->conn_flags = cpu_to_be32(cf);
817
818 if (connection->agreed_pro_version >= 87)
819 strcpy(p->integrity_alg, nc->integrity_alg);
820 rcu_read_unlock();
821
822 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
823}
824
825int drbd_send_protocol(struct drbd_connection *connection)
826{
827 int err;
828
829 mutex_lock(&connection->data.mutex);
830 err = __drbd_send_protocol(connection, P_PROTOCOL);
831 mutex_unlock(&connection->data.mutex);
832
833 return err;
834}
835
836static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
837{
838 struct drbd_device *device = peer_device->device;
839 struct drbd_socket *sock;
840 struct p_uuids *p;
841 int i;
842
843 if (!get_ldev_if_state(device, D_NEGOTIATING))
844 return 0;
845
846 sock = &peer_device->connection->data;
847 p = drbd_prepare_command(peer_device, sock);
848 if (!p) {
849 put_ldev(device);
850 return -EIO;
851 }
852 spin_lock_irq(&device->ldev->md.uuid_lock);
853 for (i = UI_CURRENT; i < UI_SIZE; i++)
854 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
855 spin_unlock_irq(&device->ldev->md.uuid_lock);
856
857 device->comm_bm_set = drbd_bm_total_weight(device);
858 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
859 rcu_read_lock();
860 uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
861 rcu_read_unlock();
862 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
863 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
864 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
865
866 put_ldev(device);
867 return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
868}
869
870int drbd_send_uuids(struct drbd_peer_device *peer_device)
871{
872 return _drbd_send_uuids(peer_device, 0);
873}
874
875int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
876{
877 return _drbd_send_uuids(peer_device, 8);
878}
879
880void drbd_print_uuids(struct drbd_device *device, const char *text)
881{
882 if (get_ldev_if_state(device, D_NEGOTIATING)) {
883 u64 *uuid = device->ldev->md.uuid;
884 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
885 text,
886 (unsigned long long)uuid[UI_CURRENT],
887 (unsigned long long)uuid[UI_BITMAP],
888 (unsigned long long)uuid[UI_HISTORY_START],
889 (unsigned long long)uuid[UI_HISTORY_END]);
890 put_ldev(device);
891 } else {
892 drbd_info(device, "%s effective data uuid: %016llX\n",
893 text,
894 (unsigned long long)device->ed_uuid);
895 }
896}
897
898void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
899{
900 struct drbd_device *device = peer_device->device;
901 struct drbd_socket *sock;
902 struct p_rs_uuid *p;
903 u64 uuid;
904
905 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
906
907 uuid = device->ldev->md.uuid[UI_BITMAP];
908 if (uuid && uuid != UUID_JUST_CREATED)
909 uuid = uuid + UUID_NEW_BM_OFFSET;
910 else
911 get_random_bytes(&uuid, sizeof(u64));
912 drbd_uuid_set(device, UI_BITMAP, uuid);
913 drbd_print_uuids(device, "updated sync UUID");
914 drbd_md_sync(device);
915
916 sock = &peer_device->connection->data;
917 p = drbd_prepare_command(peer_device, sock);
918 if (p) {
919 p->uuid = cpu_to_be64(uuid);
920 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
921 }
922}
923
924/* communicated if (agreed_features & DRBD_FF_WSAME) */
925void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct request_queue *q)
926{
927 if (q) {
928 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
929 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
930 p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
931 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
932 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
933 p->qlim->discard_enabled = blk_queue_discard(q);
934 p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
935 } else {
936 q = device->rq_queue;
937 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
938 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
939 p->qlim->alignment_offset = 0;
940 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
941 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
942 p->qlim->discard_enabled = 0;
943 p->qlim->write_same_capable = 0;
944 }
945}
946
947int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
948{
949 struct drbd_device *device = peer_device->device;
950 struct drbd_socket *sock;
951 struct p_sizes *p;
952 sector_t d_size, u_size;
953 int q_order_type;
954 unsigned int max_bio_size;
955 unsigned int packet_size;
956
957 sock = &peer_device->connection->data;
958 p = drbd_prepare_command(peer_device, sock);
959 if (!p)
960 return -EIO;
961
962 packet_size = sizeof(*p);
963 if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
964 packet_size += sizeof(p->qlim[0]);
965
966 memset(p, 0, packet_size);
967 if (get_ldev_if_state(device, D_NEGOTIATING)) {
968 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
969 d_size = drbd_get_max_capacity(device->ldev);
970 rcu_read_lock();
971 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
972 rcu_read_unlock();
973 q_order_type = drbd_queue_order_type(device);
974 max_bio_size = queue_max_hw_sectors(q) << 9;
975 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
976 assign_p_sizes_qlim(device, p, q);
977 put_ldev(device);
978 } else {
979 d_size = 0;
980 u_size = 0;
981 q_order_type = QUEUE_ORDERED_NONE;
982 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
983 assign_p_sizes_qlim(device, p, NULL);
984 }
985
986 if (peer_device->connection->agreed_pro_version <= 94)
987 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
988 else if (peer_device->connection->agreed_pro_version < 100)
989 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
990
991 p->d_size = cpu_to_be64(d_size);
992 p->u_size = cpu_to_be64(u_size);
993 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
994 p->max_bio_size = cpu_to_be32(max_bio_size);
995 p->queue_order_type = cpu_to_be16(q_order_type);
996 p->dds_flags = cpu_to_be16(flags);
997
998 return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
999}
1000
1001/**
1002 * drbd_send_current_state() - Sends the drbd state to the peer
1003 * @peer_device: DRBD peer device.
1004 */
1005int drbd_send_current_state(struct drbd_peer_device *peer_device)
1006{
1007 struct drbd_socket *sock;
1008 struct p_state *p;
1009
1010 sock = &peer_device->connection->data;
1011 p = drbd_prepare_command(peer_device, sock);
1012 if (!p)
1013 return -EIO;
1014 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
1015 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1016}
1017
1018/**
1019 * drbd_send_state() - After a state change, sends the new state to the peer
1020 * @peer_device: DRBD peer device.
1021 * @state: the state to send, not necessarily the current state.
1022 *
1023 * Each state change queues an "after_state_ch" work, which will eventually
1024 * send the resulting new state to the peer. If more state changes happen
1025 * between queuing and processing of the after_state_ch work, we still
1026 * want to send each intermediary state in the order it occurred.
1027 */
1028int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
1029{
1030 struct drbd_socket *sock;
1031 struct p_state *p;
1032
1033 sock = &peer_device->connection->data;
1034 p = drbd_prepare_command(peer_device, sock);
1035 if (!p)
1036 return -EIO;
1037 p->state = cpu_to_be32(state.i); /* Within the send mutex */
1038 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1039}
1040
1041int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
1042{
1043 struct drbd_socket *sock;
1044 struct p_req_state *p;
1045
1046 sock = &peer_device->connection->data;
1047 p = drbd_prepare_command(peer_device, sock);
1048 if (!p)
1049 return -EIO;
1050 p->mask = cpu_to_be32(mask.i);
1051 p->val = cpu_to_be32(val.i);
1052 return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1053}
1054
1055int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1056{
1057 enum drbd_packet cmd;
1058 struct drbd_socket *sock;
1059 struct p_req_state *p;
1060
1061 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1062 sock = &connection->data;
1063 p = conn_prepare_command(connection, sock);
1064 if (!p)
1065 return -EIO;
1066 p->mask = cpu_to_be32(mask.i);
1067 p->val = cpu_to_be32(val.i);
1068 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1069}
1070
1071void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
1072{
1073 struct drbd_socket *sock;
1074 struct p_req_state_reply *p;
1075
1076 sock = &peer_device->connection->meta;
1077 p = drbd_prepare_command(peer_device, sock);
1078 if (p) {
1079 p->retcode = cpu_to_be32(retcode);
1080 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1081 }
1082}
1083
1084void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1085{
1086 struct drbd_socket *sock;
1087 struct p_req_state_reply *p;
1088 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1089
1090 sock = &connection->meta;
1091 p = conn_prepare_command(connection, sock);
1092 if (p) {
1093 p->retcode = cpu_to_be32(retcode);
1094 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1095 }
1096}
1097
1098static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1099{
1100 BUG_ON(code & ~0xf);
1101 p->encoding = (p->encoding & ~0xf) | code;
1102}
1103
1104static void dcbp_set_start(struct p_compressed_bm *p, int set)
1105{
1106 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1107}
1108
1109static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1110{
1111 BUG_ON(n & ~0x7);
1112 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1113}
1114
1115static int fill_bitmap_rle_bits(struct drbd_device *device,
1116 struct p_compressed_bm *p,
1117 unsigned int size,
1118 struct bm_xfer_ctx *c)
1119{
1120 struct bitstream bs;
1121 unsigned long plain_bits;
1122 unsigned long tmp;
1123 unsigned long rl;
1124 unsigned len;
1125 unsigned toggle;
1126 int bits, use_rle;
1127
1128 /* may we use this feature? */
1129 rcu_read_lock();
1130 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1131 rcu_read_unlock();
1132 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1133 return 0;
1134
1135 if (c->bit_offset >= c->bm_bits)
1136 return 0; /* nothing to do. */
1137
1138 /* use at most thus many bytes */
1139 bitstream_init(&bs, p->code, size, 0);
1140 memset(p->code, 0, size);
1141 /* plain bits covered in this code string */
1142 plain_bits = 0;
1143
1144 /* p->encoding & 0x80 stores whether the first run length is set.
1145 * bit offset is implicit.
1146 * start with toggle == 2 to be able to tell the first iteration */
1147 toggle = 2;
1148
1149 /* see how much plain bits we can stuff into one packet
1150 * using RLE and VLI. */
1151 do {
1152 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1153 : _drbd_bm_find_next(device, c->bit_offset);
1154 if (tmp == -1UL)
1155 tmp = c->bm_bits;
1156 rl = tmp - c->bit_offset;
1157
1158 if (toggle == 2) { /* first iteration */
1159 if (rl == 0) {
1160 /* the first checked bit was set,
1161 * store start value, */
1162 dcbp_set_start(p, 1);
1163 /* but skip encoding of zero run length */
1164 toggle = !toggle;
1165 continue;
1166 }
1167 dcbp_set_start(p, 0);
1168 }
1169
1170 /* paranoia: catch zero runlength.
1171 * can only happen if bitmap is modified while we scan it. */
1172 if (rl == 0) {
1173 drbd_err(device, "unexpected zero runlength while encoding bitmap "
1174 "t:%u bo:%lu\n", toggle, c->bit_offset);
1175 return -1;
1176 }
1177
1178 bits = vli_encode_bits(&bs, rl);
1179 if (bits == -ENOBUFS) /* buffer full */
1180 break;
1181 if (bits <= 0) {
1182 drbd_err(device, "error while encoding bitmap: %d\n", bits);
1183 return 0;
1184 }
1185
1186 toggle = !toggle;
1187 plain_bits += rl;
1188 c->bit_offset = tmp;
1189 } while (c->bit_offset < c->bm_bits);
1190
1191 len = bs.cur.b - p->code + !!bs.cur.bit;
1192
1193 if (plain_bits < (len << 3)) {
1194 /* incompressible with this method.
1195 * we need to rewind both word and bit position. */
1196 c->bit_offset -= plain_bits;
1197 bm_xfer_ctx_bit_to_word_offset(c);
1198 c->bit_offset = c->word_offset * BITS_PER_LONG;
1199 return 0;
1200 }
1201
1202 /* RLE + VLI was able to compress it just fine.
1203 * update c->word_offset. */
1204 bm_xfer_ctx_bit_to_word_offset(c);
1205
1206 /* store pad_bits */
1207 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1208
1209 return len;
1210}
1211
1212/**
1213 * send_bitmap_rle_or_plain
1214 *
1215 * Return 0 when done, 1 when another iteration is needed, and a negative error
1216 * code upon failure.
1217 */
1218static int
1219send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1220{
1221 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1222 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1223 struct p_compressed_bm *p = sock->sbuf + header_size;
1224 int len, err;
1225
1226 len = fill_bitmap_rle_bits(device, p,
1227 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1228 if (len < 0)
1229 return -EIO;
1230
1231 if (len) {
1232 dcbp_set_code(p, RLE_VLI_Bits);
1233 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1234 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1235 NULL, 0);
1236 c->packets[0]++;
1237 c->bytes[0] += header_size + sizeof(*p) + len;
1238
1239 if (c->bit_offset >= c->bm_bits)
1240 len = 0; /* DONE */
1241 } else {
1242 /* was not compressible.
1243 * send a buffer full of plain text bits instead. */
1244 unsigned int data_size;
1245 unsigned long num_words;
1246 unsigned long *p = sock->sbuf + header_size;
1247
1248 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1249 num_words = min_t(size_t, data_size / sizeof(*p),
1250 c->bm_words - c->word_offset);
1251 len = num_words * sizeof(*p);
1252 if (len)
1253 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1254 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1255 c->word_offset += num_words;
1256 c->bit_offset = c->word_offset * BITS_PER_LONG;
1257
1258 c->packets[1]++;
1259 c->bytes[1] += header_size + len;
1260
1261 if (c->bit_offset > c->bm_bits)
1262 c->bit_offset = c->bm_bits;
1263 }
1264 if (!err) {
1265 if (len == 0) {
1266 INFO_bm_xfer_stats(device, "send", c);
1267 return 0;
1268 } else
1269 return 1;
1270 }
1271 return -EIO;
1272}
1273
1274/* See the comment at receive_bitmap() */
1275static int _drbd_send_bitmap(struct drbd_device *device)
1276{
1277 struct bm_xfer_ctx c;
1278 int err;
1279
1280 if (!expect(device->bitmap))
1281 return false;
1282
1283 if (get_ldev(device)) {
1284 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1285 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1286 drbd_bm_set_all(device);
1287 if (drbd_bm_write(device)) {
1288 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1289 * but otherwise process as per normal - need to tell other
1290 * side that a full resync is required! */
1291 drbd_err(device, "Failed to write bitmap to disk!\n");
1292 } else {
1293 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1294 drbd_md_sync(device);
1295 }
1296 }
1297 put_ldev(device);
1298 }
1299
1300 c = (struct bm_xfer_ctx) {
1301 .bm_bits = drbd_bm_bits(device),
1302 .bm_words = drbd_bm_words(device),
1303 };
1304
1305 do {
1306 err = send_bitmap_rle_or_plain(device, &c);
1307 } while (err > 0);
1308
1309 return err == 0;
1310}
1311
1312int drbd_send_bitmap(struct drbd_device *device)
1313{
1314 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1315 int err = -1;
1316
1317 mutex_lock(&sock->mutex);
1318 if (sock->socket)
1319 err = !_drbd_send_bitmap(device);
1320 mutex_unlock(&sock->mutex);
1321 return err;
1322}
1323
1324void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1325{
1326 struct drbd_socket *sock;
1327 struct p_barrier_ack *p;
1328
1329 if (connection->cstate < C_WF_REPORT_PARAMS)
1330 return;
1331
1332 sock = &connection->meta;
1333 p = conn_prepare_command(connection, sock);
1334 if (!p)
1335 return;
1336 p->barrier = barrier_nr;
1337 p->set_size = cpu_to_be32(set_size);
1338 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1339}
1340
1341/**
1342 * _drbd_send_ack() - Sends an ack packet
1343 * @device: DRBD device.
1344 * @cmd: Packet command code.
1345 * @sector: sector, needs to be in big endian byte order
1346 * @blksize: size in byte, needs to be in big endian byte order
1347 * @block_id: Id, big endian byte order
1348 */
1349static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1350 u64 sector, u32 blksize, u64 block_id)
1351{
1352 struct drbd_socket *sock;
1353 struct p_block_ack *p;
1354
1355 if (peer_device->device->state.conn < C_CONNECTED)
1356 return -EIO;
1357
1358 sock = &peer_device->connection->meta;
1359 p = drbd_prepare_command(peer_device, sock);
1360 if (!p)
1361 return -EIO;
1362 p->sector = sector;
1363 p->block_id = block_id;
1364 p->blksize = blksize;
1365 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1366 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1367}
1368
1369/* dp->sector and dp->block_id already/still in network byte order,
1370 * data_size is payload size according to dp->head,
1371 * and may need to be corrected for digest size. */
1372void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1373 struct p_data *dp, int data_size)
1374{
1375 if (peer_device->connection->peer_integrity_tfm)
1376 data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
1377 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
1378 dp->block_id);
1379}
1380
1381void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1382 struct p_block_req *rp)
1383{
1384 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
1385}
1386
1387/**
1388 * drbd_send_ack() - Sends an ack packet
1389 * @device: DRBD device
1390 * @cmd: packet command code
1391 * @peer_req: peer request
1392 */
1393int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1394 struct drbd_peer_request *peer_req)
1395{
1396 return _drbd_send_ack(peer_device, cmd,
1397 cpu_to_be64(peer_req->i.sector),
1398 cpu_to_be32(peer_req->i.size),
1399 peer_req->block_id);
1400}
1401
1402/* This function misuses the block_id field to signal if the blocks
1403 * are is sync or not. */
1404int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1405 sector_t sector, int blksize, u64 block_id)
1406{
1407 return _drbd_send_ack(peer_device, cmd,
1408 cpu_to_be64(sector),
1409 cpu_to_be32(blksize),
1410 cpu_to_be64(block_id));
1411}
1412
1413int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
1414 struct drbd_peer_request *peer_req)
1415{
1416 struct drbd_socket *sock;
1417 struct p_block_desc *p;
1418
1419 sock = &peer_device->connection->data;
1420 p = drbd_prepare_command(peer_device, sock);
1421 if (!p)
1422 return -EIO;
1423 p->sector = cpu_to_be64(peer_req->i.sector);
1424 p->blksize = cpu_to_be32(peer_req->i.size);
1425 p->pad = 0;
1426 return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
1427}
1428
1429int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
1430 sector_t sector, int size, u64 block_id)
1431{
1432 struct drbd_socket *sock;
1433 struct p_block_req *p;
1434
1435 sock = &peer_device->connection->data;
1436 p = drbd_prepare_command(peer_device, sock);
1437 if (!p)
1438 return -EIO;
1439 p->sector = cpu_to_be64(sector);
1440 p->block_id = block_id;
1441 p->blksize = cpu_to_be32(size);
1442 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1443}
1444
1445int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
1446 void *digest, int digest_size, enum drbd_packet cmd)
1447{
1448 struct drbd_socket *sock;
1449 struct p_block_req *p;
1450
1451 /* FIXME: Put the digest into the preallocated socket buffer. */
1452
1453 sock = &peer_device->connection->data;
1454 p = drbd_prepare_command(peer_device, sock);
1455 if (!p)
1456 return -EIO;
1457 p->sector = cpu_to_be64(sector);
1458 p->block_id = ID_SYNCER /* unused */;
1459 p->blksize = cpu_to_be32(size);
1460 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
1461}
1462
1463int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
1464{
1465 struct drbd_socket *sock;
1466 struct p_block_req *p;
1467
1468 sock = &peer_device->connection->data;
1469 p = drbd_prepare_command(peer_device, sock);
1470 if (!p)
1471 return -EIO;
1472 p->sector = cpu_to_be64(sector);
1473 p->block_id = ID_SYNCER /* unused */;
1474 p->blksize = cpu_to_be32(size);
1475 return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1476}
1477
1478/* called on sndtimeo
1479 * returns false if we should retry,
1480 * true if we think connection is dead
1481 */
1482static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1483{
1484 int drop_it;
1485 /* long elapsed = (long)(jiffies - device->last_received); */
1486
1487 drop_it = connection->meta.socket == sock
1488 || !connection->ack_receiver.task
1489 || get_t_state(&connection->ack_receiver) != RUNNING
1490 || connection->cstate < C_WF_REPORT_PARAMS;
1491
1492 if (drop_it)
1493 return true;
1494
1495 drop_it = !--connection->ko_count;
1496 if (!drop_it) {
1497 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1498 current->comm, current->pid, connection->ko_count);
1499 request_ping(connection);
1500 }
1501
1502 return drop_it; /* && (device->state == R_PRIMARY) */;
1503}
1504
1505static void drbd_update_congested(struct drbd_connection *connection)
1506{
1507 struct sock *sk = connection->data.socket->sk;
1508 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1509 set_bit(NET_CONGESTED, &connection->flags);
1510}
1511
1512/* The idea of sendpage seems to be to put some kind of reference
1513 * to the page into the skb, and to hand it over to the NIC. In
1514 * this process get_page() gets called.
1515 *
1516 * As soon as the page was really sent over the network put_page()
1517 * gets called by some part of the network layer. [ NIC driver? ]
1518 *
1519 * [ get_page() / put_page() increment/decrement the count. If count
1520 * reaches 0 the page will be freed. ]
1521 *
1522 * This works nicely with pages from FSs.
1523 * But this means that in protocol A we might signal IO completion too early!
1524 *
1525 * In order not to corrupt data during a resync we must make sure
1526 * that we do not reuse our own buffer pages (EEs) to early, therefore
1527 * we have the net_ee list.
1528 *
1529 * XFS seems to have problems, still, it submits pages with page_count == 0!
1530 * As a workaround, we disable sendpage on pages
1531 * with page_count == 0 or PageSlab.
1532 */
1533static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
1534 int offset, size_t size, unsigned msg_flags)
1535{
1536 struct socket *socket;
1537 void *addr;
1538 int err;
1539
1540 socket = peer_device->connection->data.socket;
1541 addr = kmap(page) + offset;
1542 err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
1543 kunmap(page);
1544 if (!err)
1545 peer_device->device->send_cnt += size >> 9;
1546 return err;
1547}
1548
1549static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
1550 int offset, size_t size, unsigned msg_flags)
1551{
1552 struct socket *socket = peer_device->connection->data.socket;
1553 mm_segment_t oldfs = get_fs();
1554 int len = size;
1555 int err = -EIO;
1556
1557 /* e.g. XFS meta- & log-data is in slab pages, which have a
1558 * page_count of 0 and/or have PageSlab() set.
1559 * we cannot use send_page for those, as that does get_page();
1560 * put_page(); and would cause either a VM_BUG directly, or
1561 * __page_cache_release a page that would actually still be referenced
1562 * by someone, leading to some obscure delayed Oops somewhere else. */
1563 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1564 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
1565
1566 msg_flags |= MSG_NOSIGNAL;
1567 drbd_update_congested(peer_device->connection);
1568 set_fs(KERNEL_DS);
1569 do {
1570 int sent;
1571
1572 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1573 if (sent <= 0) {
1574 if (sent == -EAGAIN) {
1575 if (we_should_drop_the_connection(peer_device->connection, socket))
1576 break;
1577 continue;
1578 }
1579 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1580 __func__, (int)size, len, sent);
1581 if (sent < 0)
1582 err = sent;
1583 break;
1584 }
1585 len -= sent;
1586 offset += sent;
1587 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1588 set_fs(oldfs);
1589 clear_bit(NET_CONGESTED, &peer_device->connection->flags);
1590
1591 if (len == 0) {
1592 err = 0;
1593 peer_device->device->send_cnt += size >> 9;
1594 }
1595 return err;
1596}
1597
1598static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1599{
1600 struct bio_vec bvec;
1601 struct bvec_iter iter;
1602
1603 /* hint all but last page with MSG_MORE */
1604 bio_for_each_segment(bvec, bio, iter) {
1605 int err;
1606
1607 err = _drbd_no_send_page(peer_device, bvec.bv_page,
1608 bvec.bv_offset, bvec.bv_len,
1609 bio_iter_last(bvec, iter)
1610 ? 0 : MSG_MORE);
1611 if (err)
1612 return err;
1613 /* REQ_OP_WRITE_SAME has only one segment */
1614 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1615 break;
1616 }
1617 return 0;
1618}
1619
1620static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1621{
1622 struct bio_vec bvec;
1623 struct bvec_iter iter;
1624
1625 /* hint all but last page with MSG_MORE */
1626 bio_for_each_segment(bvec, bio, iter) {
1627 int err;
1628
1629 err = _drbd_send_page(peer_device, bvec.bv_page,
1630 bvec.bv_offset, bvec.bv_len,
1631 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1632 if (err)
1633 return err;
1634 /* REQ_OP_WRITE_SAME has only one segment */
1635 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1636 break;
1637 }
1638 return 0;
1639}
1640
1641static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1642 struct drbd_peer_request *peer_req)
1643{
1644 struct page *page = peer_req->pages;
1645 unsigned len = peer_req->i.size;
1646 int err;
1647
1648 /* hint all but last page with MSG_MORE */
1649 page_chain_for_each(page) {
1650 unsigned l = min_t(unsigned, len, PAGE_SIZE);
1651
1652 err = _drbd_send_page(peer_device, page, 0, l,
1653 page_chain_next(page) ? MSG_MORE : 0);
1654 if (err)
1655 return err;
1656 len -= l;
1657 }
1658 return 0;
1659}
1660
1661static u32 bio_flags_to_wire(struct drbd_connection *connection,
1662 struct bio *bio)
1663{
1664 if (connection->agreed_pro_version >= 95)
1665 return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1666 (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1667 (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1668 (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
1669 (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
1670 (bio_op(bio) == REQ_OP_WRITE_ZEROES ? DP_DISCARD : 0);
1671 else
1672 return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1673}
1674
1675/* Used to send write or TRIM aka REQ_DISCARD requests
1676 * R_PRIMARY -> Peer (P_DATA, P_TRIM)
1677 */
1678int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
1679{
1680 struct drbd_device *device = peer_device->device;
1681 struct drbd_socket *sock;
1682 struct p_data *p;
1683 struct p_wsame *wsame = NULL;
1684 void *digest_out;
1685 unsigned int dp_flags = 0;
1686 int digest_size;
1687 int err;
1688
1689 sock = &peer_device->connection->data;
1690 p = drbd_prepare_command(peer_device, sock);
1691 digest_size = peer_device->connection->integrity_tfm ?
1692 crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1693
1694 if (!p)
1695 return -EIO;
1696 p->sector = cpu_to_be64(req->i.sector);
1697 p->block_id = (unsigned long)req;
1698 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1699 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
1700 if (device->state.conn >= C_SYNC_SOURCE &&
1701 device->state.conn <= C_PAUSED_SYNC_T)
1702 dp_flags |= DP_MAY_SET_IN_SYNC;
1703 if (peer_device->connection->agreed_pro_version >= 100) {
1704 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1705 dp_flags |= DP_SEND_RECEIVE_ACK;
1706 /* During resync, request an explicit write ack,
1707 * even in protocol != C */
1708 if (req->rq_state & RQ_EXP_WRITE_ACK
1709 || (dp_flags & DP_MAY_SET_IN_SYNC))
1710 dp_flags |= DP_SEND_WRITE_ACK;
1711 }
1712 p->dp_flags = cpu_to_be32(dp_flags);
1713
1714 if (dp_flags & DP_DISCARD) {
1715 struct p_trim *t = (struct p_trim*)p;
1716 t->size = cpu_to_be32(req->i.size);
1717 err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
1718 goto out;
1719 }
1720 if (dp_flags & DP_WSAME) {
1721 /* this will only work if DRBD_FF_WSAME is set AND the
1722 * handshake agreed that all nodes and backend devices are
1723 * WRITE_SAME capable and agree on logical_block_size */
1724 wsame = (struct p_wsame*)p;
1725 digest_out = wsame + 1;
1726 wsame->size = cpu_to_be32(req->i.size);
1727 } else
1728 digest_out = p + 1;
1729
1730 /* our digest is still only over the payload.
1731 * TRIM does not carry any payload. */
1732 if (digest_size)
1733 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
1734 if (wsame) {
1735 err =
1736 __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
1737 sizeof(*wsame) + digest_size, NULL,
1738 bio_iovec(req->master_bio).bv_len);
1739 } else
1740 err =
1741 __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1742 sizeof(*p) + digest_size, NULL, req->i.size);
1743 if (!err) {
1744 /* For protocol A, we have to memcpy the payload into
1745 * socket buffers, as we may complete right away
1746 * as soon as we handed it over to tcp, at which point the data
1747 * pages may become invalid.
1748 *
1749 * For data-integrity enabled, we copy it as well, so we can be
1750 * sure that even if the bio pages may still be modified, it
1751 * won't change the data on the wire, thus if the digest checks
1752 * out ok after sending on this side, but does not fit on the
1753 * receiving side, we sure have detected corruption elsewhere.
1754 */
1755 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
1756 err = _drbd_send_bio(peer_device, req->master_bio);
1757 else
1758 err = _drbd_send_zc_bio(peer_device, req->master_bio);
1759
1760 /* double check digest, sometimes buffers have been modified in flight. */
1761 if (digest_size > 0 && digest_size <= 64) {
1762 /* 64 byte, 512 bit, is the largest digest size
1763 * currently supported in kernel crypto. */
1764 unsigned char digest[64];
1765 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
1766 if (memcmp(p + 1, digest, digest_size)) {
1767 drbd_warn(device,
1768 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1769 (unsigned long long)req->i.sector, req->i.size);
1770 }
1771 } /* else if (digest_size > 64) {
1772 ... Be noisy about digest too large ...
1773 } */
1774 }
1775out:
1776 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1777
1778 return err;
1779}
1780
1781/* answer packet, used to send data back for read requests:
1782 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1783 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1784 */
1785int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1786 struct drbd_peer_request *peer_req)
1787{
1788 struct drbd_device *device = peer_device->device;
1789 struct drbd_socket *sock;
1790 struct p_data *p;
1791 int err;
1792 int digest_size;
1793
1794 sock = &peer_device->connection->data;
1795 p = drbd_prepare_command(peer_device, sock);
1796
1797 digest_size = peer_device->connection->integrity_tfm ?
1798 crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1799
1800 if (!p)
1801 return -EIO;
1802 p->sector = cpu_to_be64(peer_req->i.sector);
1803 p->block_id = peer_req->block_id;
1804 p->seq_num = 0; /* unused */
1805 p->dp_flags = 0;
1806 if (digest_size)
1807 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1808 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1809 if (!err)
1810 err = _drbd_send_zc_ee(peer_device, peer_req);
1811 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1812
1813 return err;
1814}
1815
1816int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
1817{
1818 struct drbd_socket *sock;
1819 struct p_block_desc *p;
1820
1821 sock = &peer_device->connection->data;
1822 p = drbd_prepare_command(peer_device, sock);
1823 if (!p)
1824 return -EIO;
1825 p->sector = cpu_to_be64(req->i.sector);
1826 p->blksize = cpu_to_be32(req->i.size);
1827 return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1828}
1829
1830/*
1831 drbd_send distinguishes two cases:
1832
1833 Packets sent via the data socket "sock"
1834 and packets sent via the meta data socket "msock"
1835
1836 sock msock
1837 -----------------+-------------------------+------------------------------
1838 timeout conf.timeout / 2 conf.timeout / 2
1839 timeout action send a ping via msock Abort communication
1840 and close all sockets
1841*/
1842
1843/*
1844 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1845 */
1846int drbd_send(struct drbd_connection *connection, struct socket *sock,
1847 void *buf, size_t size, unsigned msg_flags)
1848{
1849 struct kvec iov = {.iov_base = buf, .iov_len = size};
1850 struct msghdr msg;
1851 int rv, sent = 0;
1852
1853 if (!sock)
1854 return -EBADR;
1855
1856 /* THINK if (signal_pending) return ... ? */
1857
1858 msg.msg_name = NULL;
1859 msg.msg_namelen = 0;
1860 msg.msg_control = NULL;
1861 msg.msg_controllen = 0;
1862 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1863
1864 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
1865
1866 if (sock == connection->data.socket) {
1867 rcu_read_lock();
1868 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1869 rcu_read_unlock();
1870 drbd_update_congested(connection);
1871 }
1872 do {
1873 rv = sock_sendmsg(sock, &msg);
1874 if (rv == -EAGAIN) {
1875 if (we_should_drop_the_connection(connection, sock))
1876 break;
1877 else
1878 continue;
1879 }
1880 if (rv == -EINTR) {
1881 flush_signals(current);
1882 rv = 0;
1883 }
1884 if (rv < 0)
1885 break;
1886 sent += rv;
1887 } while (sent < size);
1888
1889 if (sock == connection->data.socket)
1890 clear_bit(NET_CONGESTED, &connection->flags);
1891
1892 if (rv <= 0) {
1893 if (rv != -EAGAIN) {
1894 drbd_err(connection, "%s_sendmsg returned %d\n",
1895 sock == connection->meta.socket ? "msock" : "sock",
1896 rv);
1897 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1898 } else
1899 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1900 }
1901
1902 return sent;
1903}
1904
1905/**
1906 * drbd_send_all - Send an entire buffer
1907 *
1908 * Returns 0 upon success and a negative error value otherwise.
1909 */
1910int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1911 size_t size, unsigned msg_flags)
1912{
1913 int err;
1914
1915 err = drbd_send(connection, sock, buffer, size, msg_flags);
1916 if (err < 0)
1917 return err;
1918 if (err != size)
1919 return -EIO;
1920 return 0;
1921}
1922
1923static int drbd_open(struct block_device *bdev, fmode_t mode)
1924{
1925 struct drbd_device *device = bdev->bd_disk->private_data;
1926 unsigned long flags;
1927 int rv = 0;
1928
1929 mutex_lock(&drbd_main_mutex);
1930 spin_lock_irqsave(&device->resource->req_lock, flags);
1931 /* to have a stable device->state.role
1932 * and no race with updating open_cnt */
1933
1934 if (device->state.role != R_PRIMARY) {
1935 if (mode & FMODE_WRITE)
1936 rv = -EROFS;
1937 else if (!allow_oos)
1938 rv = -EMEDIUMTYPE;
1939 }
1940
1941 if (!rv)
1942 device->open_cnt++;
1943 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1944 mutex_unlock(&drbd_main_mutex);
1945
1946 return rv;
1947}
1948
1949static void drbd_release(struct gendisk *gd, fmode_t mode)
1950{
1951 struct drbd_device *device = gd->private_data;
1952 mutex_lock(&drbd_main_mutex);
1953 device->open_cnt--;
1954 mutex_unlock(&drbd_main_mutex);
1955}
1956
1957static void drbd_set_defaults(struct drbd_device *device)
1958{
1959 /* Beware! The actual layout differs
1960 * between big endian and little endian */
1961 device->state = (union drbd_dev_state) {
1962 { .role = R_SECONDARY,
1963 .peer = R_UNKNOWN,
1964 .conn = C_STANDALONE,
1965 .disk = D_DISKLESS,
1966 .pdsk = D_UNKNOWN,
1967 } };
1968}
1969
1970void drbd_init_set_defaults(struct drbd_device *device)
1971{
1972 /* the memset(,0,) did most of this.
1973 * note: only assignments, no allocation in here */
1974
1975 drbd_set_defaults(device);
1976
1977 atomic_set(&device->ap_bio_cnt, 0);
1978 atomic_set(&device->ap_actlog_cnt, 0);
1979 atomic_set(&device->ap_pending_cnt, 0);
1980 atomic_set(&device->rs_pending_cnt, 0);
1981 atomic_set(&device->unacked_cnt, 0);
1982 atomic_set(&device->local_cnt, 0);
1983 atomic_set(&device->pp_in_use_by_net, 0);
1984 atomic_set(&device->rs_sect_in, 0);
1985 atomic_set(&device->rs_sect_ev, 0);
1986 atomic_set(&device->ap_in_flight, 0);
1987 atomic_set(&device->md_io.in_use, 0);
1988
1989 mutex_init(&device->own_state_mutex);
1990 device->state_mutex = &device->own_state_mutex;
1991
1992 spin_lock_init(&device->al_lock);
1993 spin_lock_init(&device->peer_seq_lock);
1994
1995 INIT_LIST_HEAD(&device->active_ee);
1996 INIT_LIST_HEAD(&device->sync_ee);
1997 INIT_LIST_HEAD(&device->done_ee);
1998 INIT_LIST_HEAD(&device->read_ee);
1999 INIT_LIST_HEAD(&device->net_ee);
2000 INIT_LIST_HEAD(&device->resync_reads);
2001 INIT_LIST_HEAD(&device->resync_work.list);
2002 INIT_LIST_HEAD(&device->unplug_work.list);
2003 INIT_LIST_HEAD(&device->bm_io_work.w.list);
2004 INIT_LIST_HEAD(&device->pending_master_completion[0]);
2005 INIT_LIST_HEAD(&device->pending_master_completion[1]);
2006 INIT_LIST_HEAD(&device->pending_completion[0]);
2007 INIT_LIST_HEAD(&device->pending_completion[1]);
2008
2009 device->resync_work.cb = w_resync_timer;
2010 device->unplug_work.cb = w_send_write_hint;
2011 device->bm_io_work.w.cb = w_bitmap_io;
2012
2013 init_timer(&device->resync_timer);
2014 init_timer(&device->md_sync_timer);
2015 init_timer(&device->start_resync_timer);
2016 init_timer(&device->request_timer);
2017 device->resync_timer.function = resync_timer_fn;
2018 device->resync_timer.data = (unsigned long) device;
2019 device->md_sync_timer.function = md_sync_timer_fn;
2020 device->md_sync_timer.data = (unsigned long) device;
2021 device->start_resync_timer.function = start_resync_timer_fn;
2022 device->start_resync_timer.data = (unsigned long) device;
2023 device->request_timer.function = request_timer_fn;
2024 device->request_timer.data = (unsigned long) device;
2025
2026 init_waitqueue_head(&device->misc_wait);
2027 init_waitqueue_head(&device->state_wait);
2028 init_waitqueue_head(&device->ee_wait);
2029 init_waitqueue_head(&device->al_wait);
2030 init_waitqueue_head(&device->seq_wait);
2031
2032 device->resync_wenr = LC_FREE;
2033 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2034 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2035}
2036
2037void drbd_device_cleanup(struct drbd_device *device)
2038{
2039 int i;
2040 if (first_peer_device(device)->connection->receiver.t_state != NONE)
2041 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2042 first_peer_device(device)->connection->receiver.t_state);
2043
2044 device->al_writ_cnt =
2045 device->bm_writ_cnt =
2046 device->read_cnt =
2047 device->recv_cnt =
2048 device->send_cnt =
2049 device->writ_cnt =
2050 device->p_size =
2051 device->rs_start =
2052 device->rs_total =
2053 device->rs_failed = 0;
2054 device->rs_last_events = 0;
2055 device->rs_last_sect_ev = 0;
2056 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2057 device->rs_mark_left[i] = 0;
2058 device->rs_mark_time[i] = 0;
2059 }
2060 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2061
2062 drbd_set_my_capacity(device, 0);
2063 if (device->bitmap) {
2064 /* maybe never allocated. */
2065 drbd_bm_resize(device, 0, 1);
2066 drbd_bm_cleanup(device);
2067 }
2068
2069 drbd_backing_dev_free(device, device->ldev);
2070 device->ldev = NULL;
2071
2072 clear_bit(AL_SUSPENDED, &device->flags);
2073
2074 D_ASSERT(device, list_empty(&device->active_ee));
2075 D_ASSERT(device, list_empty(&device->sync_ee));
2076 D_ASSERT(device, list_empty(&device->done_ee));
2077 D_ASSERT(device, list_empty(&device->read_ee));
2078 D_ASSERT(device, list_empty(&device->net_ee));
2079 D_ASSERT(device, list_empty(&device->resync_reads));
2080 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2081 D_ASSERT(device, list_empty(&device->resync_work.list));
2082 D_ASSERT(device, list_empty(&device->unplug_work.list));
2083
2084 drbd_set_defaults(device);
2085}
2086
2087
2088static void drbd_destroy_mempools(void)
2089{
2090 struct page *page;
2091
2092 while (drbd_pp_pool) {
2093 page = drbd_pp_pool;
2094 drbd_pp_pool = (struct page *)page_private(page);
2095 __free_page(page);
2096 drbd_pp_vacant--;
2097 }
2098
2099 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2100
2101 if (drbd_md_io_bio_set)
2102 bioset_free(drbd_md_io_bio_set);
2103 if (drbd_md_io_page_pool)
2104 mempool_destroy(drbd_md_io_page_pool);
2105 if (drbd_ee_mempool)
2106 mempool_destroy(drbd_ee_mempool);
2107 if (drbd_request_mempool)
2108 mempool_destroy(drbd_request_mempool);
2109 if (drbd_ee_cache)
2110 kmem_cache_destroy(drbd_ee_cache);
2111 if (drbd_request_cache)
2112 kmem_cache_destroy(drbd_request_cache);
2113 if (drbd_bm_ext_cache)
2114 kmem_cache_destroy(drbd_bm_ext_cache);
2115 if (drbd_al_ext_cache)
2116 kmem_cache_destroy(drbd_al_ext_cache);
2117
2118 drbd_md_io_bio_set = NULL;
2119 drbd_md_io_page_pool = NULL;
2120 drbd_ee_mempool = NULL;
2121 drbd_request_mempool = NULL;
2122 drbd_ee_cache = NULL;
2123 drbd_request_cache = NULL;
2124 drbd_bm_ext_cache = NULL;
2125 drbd_al_ext_cache = NULL;
2126
2127 return;
2128}
2129
2130static int drbd_create_mempools(void)
2131{
2132 struct page *page;
2133 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
2134 int i;
2135
2136 /* prepare our caches and mempools */
2137 drbd_request_mempool = NULL;
2138 drbd_ee_cache = NULL;
2139 drbd_request_cache = NULL;
2140 drbd_bm_ext_cache = NULL;
2141 drbd_al_ext_cache = NULL;
2142 drbd_pp_pool = NULL;
2143 drbd_md_io_page_pool = NULL;
2144 drbd_md_io_bio_set = NULL;
2145
2146 /* caches */
2147 drbd_request_cache = kmem_cache_create(
2148 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2149 if (drbd_request_cache == NULL)
2150 goto Enomem;
2151
2152 drbd_ee_cache = kmem_cache_create(
2153 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2154 if (drbd_ee_cache == NULL)
2155 goto Enomem;
2156
2157 drbd_bm_ext_cache = kmem_cache_create(
2158 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2159 if (drbd_bm_ext_cache == NULL)
2160 goto Enomem;
2161
2162 drbd_al_ext_cache = kmem_cache_create(
2163 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2164 if (drbd_al_ext_cache == NULL)
2165 goto Enomem;
2166
2167 /* mempools */
2168 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2169 if (drbd_md_io_bio_set == NULL)
2170 goto Enomem;
2171
2172 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2173 if (drbd_md_io_page_pool == NULL)
2174 goto Enomem;
2175
2176 drbd_request_mempool = mempool_create_slab_pool(number,
2177 drbd_request_cache);
2178 if (drbd_request_mempool == NULL)
2179 goto Enomem;
2180
2181 drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache);
2182 if (drbd_ee_mempool == NULL)
2183 goto Enomem;
2184
2185 /* drbd's page pool */
2186 spin_lock_init(&drbd_pp_lock);
2187
2188 for (i = 0; i < number; i++) {
2189 page = alloc_page(GFP_HIGHUSER);
2190 if (!page)
2191 goto Enomem;
2192 set_page_private(page, (unsigned long)drbd_pp_pool);
2193 drbd_pp_pool = page;
2194 }
2195 drbd_pp_vacant = number;
2196
2197 return 0;
2198
2199Enomem:
2200 drbd_destroy_mempools(); /* in case we allocated some */
2201 return -ENOMEM;
2202}
2203
2204static void drbd_release_all_peer_reqs(struct drbd_device *device)
2205{
2206 int rr;
2207
2208 rr = drbd_free_peer_reqs(device, &device->active_ee);
2209 if (rr)
2210 drbd_err(device, "%d EEs in active list found!\n", rr);
2211
2212 rr = drbd_free_peer_reqs(device, &device->sync_ee);
2213 if (rr)
2214 drbd_err(device, "%d EEs in sync list found!\n", rr);
2215
2216 rr = drbd_free_peer_reqs(device, &device->read_ee);
2217 if (rr)
2218 drbd_err(device, "%d EEs in read list found!\n", rr);
2219
2220 rr = drbd_free_peer_reqs(device, &device->done_ee);
2221 if (rr)
2222 drbd_err(device, "%d EEs in done list found!\n", rr);
2223
2224 rr = drbd_free_peer_reqs(device, &device->net_ee);
2225 if (rr)
2226 drbd_err(device, "%d EEs in net list found!\n", rr);
2227}
2228
2229/* caution. no locking. */
2230void drbd_destroy_device(struct kref *kref)
2231{
2232 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2233 struct drbd_resource *resource = device->resource;
2234 struct drbd_peer_device *peer_device, *tmp_peer_device;
2235
2236 del_timer_sync(&device->request_timer);
2237
2238 /* paranoia asserts */
2239 D_ASSERT(device, device->open_cnt == 0);
2240 /* end paranoia asserts */
2241
2242 /* cleanup stuff that may have been allocated during
2243 * device (re-)configuration or state changes */
2244
2245 if (device->this_bdev)
2246 bdput(device->this_bdev);
2247
2248 drbd_backing_dev_free(device, device->ldev);
2249 device->ldev = NULL;
2250
2251 drbd_release_all_peer_reqs(device);
2252
2253 lc_destroy(device->act_log);
2254 lc_destroy(device->resync);
2255
2256 kfree(device->p_uuid);
2257 /* device->p_uuid = NULL; */
2258
2259 if (device->bitmap) /* should no longer be there. */
2260 drbd_bm_cleanup(device);
2261 __free_page(device->md_io.page);
2262 put_disk(device->vdisk);
2263 blk_cleanup_queue(device->rq_queue);
2264 kfree(device->rs_plan_s);
2265
2266 /* not for_each_connection(connection, resource):
2267 * those may have been cleaned up and disassociated already.
2268 */
2269 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2270 kref_put(&peer_device->connection->kref, drbd_destroy_connection);
2271 kfree(peer_device);
2272 }
2273 memset(device, 0xfd, sizeof(*device));
2274 kfree(device);
2275 kref_put(&resource->kref, drbd_destroy_resource);
2276}
2277
2278/* One global retry thread, if we need to push back some bio and have it
2279 * reinserted through our make request function.
2280 */
2281static struct retry_worker {
2282 struct workqueue_struct *wq;
2283 struct work_struct worker;
2284
2285 spinlock_t lock;
2286 struct list_head writes;
2287} retry;
2288
2289static void do_retry(struct work_struct *ws)
2290{
2291 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2292 LIST_HEAD(writes);
2293 struct drbd_request *req, *tmp;
2294
2295 spin_lock_irq(&retry->lock);
2296 list_splice_init(&retry->writes, &writes);
2297 spin_unlock_irq(&retry->lock);
2298
2299 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2300 struct drbd_device *device = req->device;
2301 struct bio *bio = req->master_bio;
2302 unsigned long start_jif = req->start_jif;
2303 bool expected;
2304
2305 expected =
2306 expect(atomic_read(&req->completion_ref) == 0) &&
2307 expect(req->rq_state & RQ_POSTPONED) &&
2308 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2309 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2310
2311 if (!expected)
2312 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2313 req, atomic_read(&req->completion_ref),
2314 req->rq_state);
2315
2316 /* We still need to put one kref associated with the
2317 * "completion_ref" going zero in the code path that queued it
2318 * here. The request object may still be referenced by a
2319 * frozen local req->private_bio, in case we force-detached.
2320 */
2321 kref_put(&req->kref, drbd_req_destroy);
2322
2323 /* A single suspended or otherwise blocking device may stall
2324 * all others as well. Fortunately, this code path is to
2325 * recover from a situation that "should not happen":
2326 * concurrent writes in multi-primary setup.
2327 * In a "normal" lifecycle, this workqueue is supposed to be
2328 * destroyed without ever doing anything.
2329 * If it turns out to be an issue anyways, we can do per
2330 * resource (replication group) or per device (minor) retry
2331 * workqueues instead.
2332 */
2333
2334 /* We are not just doing generic_make_request(),
2335 * as we want to keep the start_time information. */
2336 inc_ap_bio(device);
2337 __drbd_make_request(device, bio, start_jif);
2338 }
2339}
2340
2341/* called via drbd_req_put_completion_ref(),
2342 * holds resource->req_lock */
2343void drbd_restart_request(struct drbd_request *req)
2344{
2345 unsigned long flags;
2346 spin_lock_irqsave(&retry.lock, flags);
2347 list_move_tail(&req->tl_requests, &retry.writes);
2348 spin_unlock_irqrestore(&retry.lock, flags);
2349
2350 /* Drop the extra reference that would otherwise
2351 * have been dropped by complete_master_bio.
2352 * do_retry() needs to grab a new one. */
2353 dec_ap_bio(req->device);
2354
2355 queue_work(retry.wq, &retry.worker);
2356}
2357
2358void drbd_destroy_resource(struct kref *kref)
2359{
2360 struct drbd_resource *resource =
2361 container_of(kref, struct drbd_resource, kref);
2362
2363 idr_destroy(&resource->devices);
2364 free_cpumask_var(resource->cpu_mask);
2365 kfree(resource->name);
2366 memset(resource, 0xf2, sizeof(*resource));
2367 kfree(resource);
2368}
2369
2370void drbd_free_resource(struct drbd_resource *resource)
2371{
2372 struct drbd_connection *connection, *tmp;
2373
2374 for_each_connection_safe(connection, tmp, resource) {
2375 list_del(&connection->connections);
2376 drbd_debugfs_connection_cleanup(connection);
2377 kref_put(&connection->kref, drbd_destroy_connection);
2378 }
2379 drbd_debugfs_resource_cleanup(resource);
2380 kref_put(&resource->kref, drbd_destroy_resource);
2381}
2382
2383static void drbd_cleanup(void)
2384{
2385 unsigned int i;
2386 struct drbd_device *device;
2387 struct drbd_resource *resource, *tmp;
2388
2389 /* first remove proc,
2390 * drbdsetup uses it's presence to detect
2391 * whether DRBD is loaded.
2392 * If we would get stuck in proc removal,
2393 * but have netlink already deregistered,
2394 * some drbdsetup commands may wait forever
2395 * for an answer.
2396 */
2397 if (drbd_proc)
2398 remove_proc_entry("drbd", NULL);
2399
2400 if (retry.wq)
2401 destroy_workqueue(retry.wq);
2402
2403 drbd_genl_unregister();
2404 drbd_debugfs_cleanup();
2405
2406 idr_for_each_entry(&drbd_devices, device, i)
2407 drbd_delete_device(device);
2408
2409 /* not _rcu since, no other updater anymore. Genl already unregistered */
2410 for_each_resource_safe(resource, tmp, &drbd_resources) {
2411 list_del(&resource->resources);
2412 drbd_free_resource(resource);
2413 }
2414
2415 drbd_destroy_mempools();
2416 unregister_blkdev(DRBD_MAJOR, "drbd");
2417
2418 idr_destroy(&drbd_devices);
2419
2420 pr_info("module cleanup done.\n");
2421}
2422
2423/**
2424 * drbd_congested() - Callback for the flusher thread
2425 * @congested_data: User data
2426 * @bdi_bits: Bits the BDI flusher thread is currently interested in
2427 *
2428 * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested.
2429 */
2430static int drbd_congested(void *congested_data, int bdi_bits)
2431{
2432 struct drbd_device *device = congested_data;
2433 struct request_queue *q;
2434 char reason = '-';
2435 int r = 0;
2436
2437 if (!may_inc_ap_bio(device)) {
2438 /* DRBD has frozen IO */
2439 r = bdi_bits;
2440 reason = 'd';
2441 goto out;
2442 }
2443
2444 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
2445 r |= (1 << WB_async_congested);
2446 /* Without good local data, we would need to read from remote,
2447 * and that would need the worker thread as well, which is
2448 * currently blocked waiting for that usermode helper to
2449 * finish.
2450 */
2451 if (!get_ldev_if_state(device, D_UP_TO_DATE))
2452 r |= (1 << WB_sync_congested);
2453 else
2454 put_ldev(device);
2455 r &= bdi_bits;
2456 reason = 'c';
2457 goto out;
2458 }
2459
2460 if (get_ldev(device)) {
2461 q = bdev_get_queue(device->ldev->backing_bdev);
2462 r = bdi_congested(q->backing_dev_info, bdi_bits);
2463 put_ldev(device);
2464 if (r)
2465 reason = 'b';
2466 }
2467
2468 if (bdi_bits & (1 << WB_async_congested) &&
2469 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
2470 r |= (1 << WB_async_congested);
2471 reason = reason == 'b' ? 'a' : 'n';
2472 }
2473
2474out:
2475 device->congestion_reason = reason;
2476 return r;
2477}
2478
2479static void drbd_init_workqueue(struct drbd_work_queue* wq)
2480{
2481 spin_lock_init(&wq->q_lock);
2482 INIT_LIST_HEAD(&wq->q);
2483 init_waitqueue_head(&wq->q_wait);
2484}
2485
2486struct completion_work {
2487 struct drbd_work w;
2488 struct completion done;
2489};
2490
2491static int w_complete(struct drbd_work *w, int cancel)
2492{
2493 struct completion_work *completion_work =
2494 container_of(w, struct completion_work, w);
2495
2496 complete(&completion_work->done);
2497 return 0;
2498}
2499
2500void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2501{
2502 struct completion_work completion_work;
2503
2504 completion_work.w.cb = w_complete;
2505 init_completion(&completion_work.done);
2506 drbd_queue_work(work_queue, &completion_work.w);
2507 wait_for_completion(&completion_work.done);
2508}
2509
2510struct drbd_resource *drbd_find_resource(const char *name)
2511{
2512 struct drbd_resource *resource;
2513
2514 if (!name || !name[0])
2515 return NULL;
2516
2517 rcu_read_lock();
2518 for_each_resource_rcu(resource, &drbd_resources) {
2519 if (!strcmp(resource->name, name)) {
2520 kref_get(&resource->kref);
2521 goto found;
2522 }
2523 }
2524 resource = NULL;
2525found:
2526 rcu_read_unlock();
2527 return resource;
2528}
2529
2530struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2531 void *peer_addr, int peer_addr_len)
2532{
2533 struct drbd_resource *resource;
2534 struct drbd_connection *connection;
2535
2536 rcu_read_lock();
2537 for_each_resource_rcu(resource, &drbd_resources) {
2538 for_each_connection_rcu(connection, resource) {
2539 if (connection->my_addr_len == my_addr_len &&
2540 connection->peer_addr_len == peer_addr_len &&
2541 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2542 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2543 kref_get(&connection->kref);
2544 goto found;
2545 }
2546 }
2547 }
2548 connection = NULL;
2549found:
2550 rcu_read_unlock();
2551 return connection;
2552}
2553
2554static int drbd_alloc_socket(struct drbd_socket *socket)
2555{
2556 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2557 if (!socket->rbuf)
2558 return -ENOMEM;
2559 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2560 if (!socket->sbuf)
2561 return -ENOMEM;
2562 return 0;
2563}
2564
2565static void drbd_free_socket(struct drbd_socket *socket)
2566{
2567 free_page((unsigned long) socket->sbuf);
2568 free_page((unsigned long) socket->rbuf);
2569}
2570
2571void conn_free_crypto(struct drbd_connection *connection)
2572{
2573 drbd_free_sock(connection);
2574
2575 crypto_free_ahash(connection->csums_tfm);
2576 crypto_free_ahash(connection->verify_tfm);
2577 crypto_free_shash(connection->cram_hmac_tfm);
2578 crypto_free_ahash(connection->integrity_tfm);
2579 crypto_free_ahash(connection->peer_integrity_tfm);
2580 kfree(connection->int_dig_in);
2581 kfree(connection->int_dig_vv);
2582
2583 connection->csums_tfm = NULL;
2584 connection->verify_tfm = NULL;
2585 connection->cram_hmac_tfm = NULL;
2586 connection->integrity_tfm = NULL;
2587 connection->peer_integrity_tfm = NULL;
2588 connection->int_dig_in = NULL;
2589 connection->int_dig_vv = NULL;
2590}
2591
2592int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2593{
2594 struct drbd_connection *connection;
2595 cpumask_var_t new_cpu_mask;
2596 int err;
2597
2598 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2599 return -ENOMEM;
2600
2601 /* silently ignore cpu mask on UP kernel */
2602 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2603 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
2604 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2605 if (err == -EOVERFLOW) {
2606 /* So what. mask it out. */
2607 cpumask_var_t tmp_cpu_mask;
2608 if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) {
2609 cpumask_setall(tmp_cpu_mask);
2610 cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask);
2611 drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2612 res_opts->cpu_mask,
2613 strlen(res_opts->cpu_mask) > 12 ? "..." : "",
2614 nr_cpu_ids);
2615 free_cpumask_var(tmp_cpu_mask);
2616 err = 0;
2617 }
2618 }
2619 if (err) {
2620 drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2621 /* retcode = ERR_CPU_MASK_PARSE; */
2622 goto fail;
2623 }
2624 }
2625 resource->res_opts = *res_opts;
2626 if (cpumask_empty(new_cpu_mask))
2627 drbd_calc_cpu_mask(&new_cpu_mask);
2628 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2629 cpumask_copy(resource->cpu_mask, new_cpu_mask);
2630 for_each_connection_rcu(connection, resource) {
2631 connection->receiver.reset_cpu_mask = 1;
2632 connection->ack_receiver.reset_cpu_mask = 1;
2633 connection->worker.reset_cpu_mask = 1;
2634 }
2635 }
2636 err = 0;
2637
2638fail:
2639 free_cpumask_var(new_cpu_mask);
2640 return err;
2641
2642}
2643
2644struct drbd_resource *drbd_create_resource(const char *name)
2645{
2646 struct drbd_resource *resource;
2647
2648 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2649 if (!resource)
2650 goto fail;
2651 resource->name = kstrdup(name, GFP_KERNEL);
2652 if (!resource->name)
2653 goto fail_free_resource;
2654 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2655 goto fail_free_name;
2656 kref_init(&resource->kref);
2657 idr_init(&resource->devices);
2658 INIT_LIST_HEAD(&resource->connections);
2659 resource->write_ordering = WO_BDEV_FLUSH;
2660 list_add_tail_rcu(&resource->resources, &drbd_resources);
2661 mutex_init(&resource->conf_update);
2662 mutex_init(&resource->adm_mutex);
2663 spin_lock_init(&resource->req_lock);
2664 drbd_debugfs_resource_add(resource);
2665 return resource;
2666
2667fail_free_name:
2668 kfree(resource->name);
2669fail_free_resource:
2670 kfree(resource);
2671fail:
2672 return NULL;
2673}
2674
2675/* caller must be under adm_mutex */
2676struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2677{
2678 struct drbd_resource *resource;
2679 struct drbd_connection *connection;
2680
2681 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2682 if (!connection)
2683 return NULL;
2684
2685 if (drbd_alloc_socket(&connection->data))
2686 goto fail;
2687 if (drbd_alloc_socket(&connection->meta))
2688 goto fail;
2689
2690 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2691 if (!connection->current_epoch)
2692 goto fail;
2693
2694 INIT_LIST_HEAD(&connection->transfer_log);
2695
2696 INIT_LIST_HEAD(&connection->current_epoch->list);
2697 connection->epochs = 1;
2698 spin_lock_init(&connection->epoch_lock);
2699
2700 connection->send.seen_any_write_yet = false;
2701 connection->send.current_epoch_nr = 0;
2702 connection->send.current_epoch_writes = 0;
2703
2704 resource = drbd_create_resource(name);
2705 if (!resource)
2706 goto fail;
2707
2708 connection->cstate = C_STANDALONE;
2709 mutex_init(&connection->cstate_mutex);
2710 init_waitqueue_head(&connection->ping_wait);
2711 idr_init(&connection->peer_devices);
2712
2713 drbd_init_workqueue(&connection->sender_work);
2714 mutex_init(&connection->data.mutex);
2715 mutex_init(&connection->meta.mutex);
2716
2717 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2718 connection->receiver.connection = connection;
2719 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2720 connection->worker.connection = connection;
2721 drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
2722 connection->ack_receiver.connection = connection;
2723
2724 kref_init(&connection->kref);
2725
2726 connection->resource = resource;
2727
2728 if (set_resource_options(resource, res_opts))
2729 goto fail_resource;
2730
2731 kref_get(&resource->kref);
2732 list_add_tail_rcu(&connection->connections, &resource->connections);
2733 drbd_debugfs_connection_add(connection);
2734 return connection;
2735
2736fail_resource:
2737 list_del(&resource->resources);
2738 drbd_free_resource(resource);
2739fail:
2740 kfree(connection->current_epoch);
2741 drbd_free_socket(&connection->meta);
2742 drbd_free_socket(&connection->data);
2743 kfree(connection);
2744 return NULL;
2745}
2746
2747void drbd_destroy_connection(struct kref *kref)
2748{
2749 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2750 struct drbd_resource *resource = connection->resource;
2751
2752 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
2753 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2754 kfree(connection->current_epoch);
2755
2756 idr_destroy(&connection->peer_devices);
2757
2758 drbd_free_socket(&connection->meta);
2759 drbd_free_socket(&connection->data);
2760 kfree(connection->int_dig_in);
2761 kfree(connection->int_dig_vv);
2762 memset(connection, 0xfc, sizeof(*connection));
2763 kfree(connection);
2764 kref_put(&resource->kref, drbd_destroy_resource);
2765}
2766
2767static int init_submitter(struct drbd_device *device)
2768{
2769 /* opencoded create_singlethread_workqueue(),
2770 * to be able to say "drbd%d", ..., minor */
2771 device->submit.wq =
2772 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2773 if (!device->submit.wq)
2774 return -ENOMEM;
2775
2776 INIT_WORK(&device->submit.worker, do_submit);
2777 INIT_LIST_HEAD(&device->submit.writes);
2778 return 0;
2779}
2780
2781enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
2782{
2783 struct drbd_resource *resource = adm_ctx->resource;
2784 struct drbd_connection *connection;
2785 struct drbd_device *device;
2786 struct drbd_peer_device *peer_device, *tmp_peer_device;
2787 struct gendisk *disk;
2788 struct request_queue *q;
2789 int id;
2790 int vnr = adm_ctx->volume;
2791 enum drbd_ret_code err = ERR_NOMEM;
2792
2793 device = minor_to_device(minor);
2794 if (device)
2795 return ERR_MINOR_OR_VOLUME_EXISTS;
2796
2797 /* GFP_KERNEL, we are outside of all write-out paths */
2798 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2799 if (!device)
2800 return ERR_NOMEM;
2801 kref_init(&device->kref);
2802
2803 kref_get(&resource->kref);
2804 device->resource = resource;
2805 device->minor = minor;
2806 device->vnr = vnr;
2807
2808 drbd_init_set_defaults(device);
2809
2810 q = blk_alloc_queue(GFP_KERNEL);
2811 if (!q)
2812 goto out_no_q;
2813 device->rq_queue = q;
2814 q->queuedata = device;
2815
2816 disk = alloc_disk(1);
2817 if (!disk)
2818 goto out_no_disk;
2819 device->vdisk = disk;
2820
2821 set_disk_ro(disk, true);
2822
2823 disk->queue = q;
2824 disk->major = DRBD_MAJOR;
2825 disk->first_minor = minor;
2826 disk->fops = &drbd_ops;
2827 sprintf(disk->disk_name, "drbd%d", minor);
2828 disk->private_data = device;
2829
2830 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2831 /* we have no partitions. we contain only ourselves. */
2832 device->this_bdev->bd_contains = device->this_bdev;
2833
2834 q->backing_dev_info->congested_fn = drbd_congested;
2835 q->backing_dev_info->congested_data = device;
2836
2837 blk_queue_make_request(q, drbd_make_request);
2838 blk_queue_write_cache(q, true, true);
2839 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2840 This triggers a max_bio_size message upon first attach or connect */
2841 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2842 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2843 q->queue_lock = &resource->req_lock;
2844
2845 device->md_io.page = alloc_page(GFP_KERNEL);
2846 if (!device->md_io.page)
2847 goto out_no_io_page;
2848
2849 if (drbd_bm_init(device))
2850 goto out_no_bitmap;
2851 device->read_requests = RB_ROOT;
2852 device->write_requests = RB_ROOT;
2853
2854 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2855 if (id < 0) {
2856 if (id == -ENOSPC)
2857 err = ERR_MINOR_OR_VOLUME_EXISTS;
2858 goto out_no_minor_idr;
2859 }
2860 kref_get(&device->kref);
2861
2862 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2863 if (id < 0) {
2864 if (id == -ENOSPC)
2865 err = ERR_MINOR_OR_VOLUME_EXISTS;
2866 goto out_idr_remove_minor;
2867 }
2868 kref_get(&device->kref);
2869
2870 INIT_LIST_HEAD(&device->peer_devices);
2871 INIT_LIST_HEAD(&device->pending_bitmap_io);
2872 for_each_connection(connection, resource) {
2873 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2874 if (!peer_device)
2875 goto out_idr_remove_from_resource;
2876 peer_device->connection = connection;
2877 peer_device->device = device;
2878
2879 list_add(&peer_device->peer_devices, &device->peer_devices);
2880 kref_get(&device->kref);
2881
2882 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2883 if (id < 0) {
2884 if (id == -ENOSPC)
2885 err = ERR_INVALID_REQUEST;
2886 goto out_idr_remove_from_resource;
2887 }
2888 kref_get(&connection->kref);
2889 INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
2890 }
2891
2892 if (init_submitter(device)) {
2893 err = ERR_NOMEM;
2894 goto out_idr_remove_vol;
2895 }
2896
2897 add_disk(disk);
2898
2899 /* inherit the connection state */
2900 device->state.conn = first_connection(resource)->cstate;
2901 if (device->state.conn == C_WF_REPORT_PARAMS) {
2902 for_each_peer_device(peer_device, device)
2903 drbd_connected(peer_device);
2904 }
2905 /* move to create_peer_device() */
2906 for_each_peer_device(peer_device, device)
2907 drbd_debugfs_peer_device_add(peer_device);
2908 drbd_debugfs_device_add(device);
2909 return NO_ERROR;
2910
2911out_idr_remove_vol:
2912 idr_remove(&connection->peer_devices, vnr);
2913out_idr_remove_from_resource:
2914 for_each_connection(connection, resource) {
2915 peer_device = idr_remove(&connection->peer_devices, vnr);
2916 if (peer_device)
2917 kref_put(&connection->kref, drbd_destroy_connection);
2918 }
2919 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2920 list_del(&peer_device->peer_devices);
2921 kfree(peer_device);
2922 }
2923 idr_remove(&resource->devices, vnr);
2924out_idr_remove_minor:
2925 idr_remove(&drbd_devices, minor);
2926 synchronize_rcu();
2927out_no_minor_idr:
2928 drbd_bm_cleanup(device);
2929out_no_bitmap:
2930 __free_page(device->md_io.page);
2931out_no_io_page:
2932 put_disk(disk);
2933out_no_disk:
2934 blk_cleanup_queue(q);
2935out_no_q:
2936 kref_put(&resource->kref, drbd_destroy_resource);
2937 kfree(device);
2938 return err;
2939}
2940
2941void drbd_delete_device(struct drbd_device *device)
2942{
2943 struct drbd_resource *resource = device->resource;
2944 struct drbd_connection *connection;
2945 struct drbd_peer_device *peer_device;
2946
2947 /* move to free_peer_device() */
2948 for_each_peer_device(peer_device, device)
2949 drbd_debugfs_peer_device_cleanup(peer_device);
2950 drbd_debugfs_device_cleanup(device);
2951 for_each_connection(connection, resource) {
2952 idr_remove(&connection->peer_devices, device->vnr);
2953 kref_put(&device->kref, drbd_destroy_device);
2954 }
2955 idr_remove(&resource->devices, device->vnr);
2956 kref_put(&device->kref, drbd_destroy_device);
2957 idr_remove(&drbd_devices, device_to_minor(device));
2958 kref_put(&device->kref, drbd_destroy_device);
2959 del_gendisk(device->vdisk);
2960 synchronize_rcu();
2961 kref_put(&device->kref, drbd_destroy_device);
2962}
2963
2964static int __init drbd_init(void)
2965{
2966 int err;
2967
2968 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
2969 pr_err("invalid minor_count (%d)\n", minor_count);
2970#ifdef MODULE
2971 return -EINVAL;
2972#else
2973 minor_count = DRBD_MINOR_COUNT_DEF;
2974#endif
2975 }
2976
2977 err = register_blkdev(DRBD_MAJOR, "drbd");
2978 if (err) {
2979 pr_err("unable to register block device major %d\n",
2980 DRBD_MAJOR);
2981 return err;
2982 }
2983
2984 /*
2985 * allocate all necessary structs
2986 */
2987 init_waitqueue_head(&drbd_pp_wait);
2988
2989 drbd_proc = NULL; /* play safe for drbd_cleanup */
2990 idr_init(&drbd_devices);
2991
2992 mutex_init(&resources_mutex);
2993 INIT_LIST_HEAD(&drbd_resources);
2994
2995 err = drbd_genl_register();
2996 if (err) {
2997 pr_err("unable to register generic netlink family\n");
2998 goto fail;
2999 }
3000
3001 err = drbd_create_mempools();
3002 if (err)
3003 goto fail;
3004
3005 err = -ENOMEM;
3006 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3007 if (!drbd_proc) {
3008 pr_err("unable to register proc file\n");
3009 goto fail;
3010 }
3011
3012 retry.wq = create_singlethread_workqueue("drbd-reissue");
3013 if (!retry.wq) {
3014 pr_err("unable to create retry workqueue\n");
3015 goto fail;
3016 }
3017 INIT_WORK(&retry.worker, do_retry);
3018 spin_lock_init(&retry.lock);
3019 INIT_LIST_HEAD(&retry.writes);
3020
3021 if (drbd_debugfs_init())
3022 pr_notice("failed to initialize debugfs -- will not be available\n");
3023
3024 pr_info("initialized. "
3025 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3026 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3027 pr_info("%s\n", drbd_buildtag());
3028 pr_info("registered as block device major %d\n", DRBD_MAJOR);
3029 return 0; /* Success! */
3030
3031fail:
3032 drbd_cleanup();
3033 if (err == -ENOMEM)
3034 pr_err("ran out of memory\n");
3035 else
3036 pr_err("initialization failure\n");
3037 return err;
3038}
3039
3040static void drbd_free_one_sock(struct drbd_socket *ds)
3041{
3042 struct socket *s;
3043 mutex_lock(&ds->mutex);
3044 s = ds->socket;
3045 ds->socket = NULL;
3046 mutex_unlock(&ds->mutex);
3047 if (s) {
3048 /* so debugfs does not need to mutex_lock() */
3049 synchronize_rcu();
3050 kernel_sock_shutdown(s, SHUT_RDWR);
3051 sock_release(s);
3052 }
3053}
3054
3055void drbd_free_sock(struct drbd_connection *connection)
3056{
3057 if (connection->data.socket)
3058 drbd_free_one_sock(&connection->data);
3059 if (connection->meta.socket)
3060 drbd_free_one_sock(&connection->meta);
3061}
3062
3063/* meta data management */
3064
3065void conn_md_sync(struct drbd_connection *connection)
3066{
3067 struct drbd_peer_device *peer_device;
3068 int vnr;
3069
3070 rcu_read_lock();
3071 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
3072 struct drbd_device *device = peer_device->device;
3073
3074 kref_get(&device->kref);
3075 rcu_read_unlock();
3076 drbd_md_sync(device);
3077 kref_put(&device->kref, drbd_destroy_device);
3078 rcu_read_lock();
3079 }
3080 rcu_read_unlock();
3081}
3082
3083/* aligned 4kByte */
3084struct meta_data_on_disk {
3085 u64 la_size_sect; /* last agreed size. */
3086 u64 uuid[UI_SIZE]; /* UUIDs. */
3087 u64 device_uuid;
3088 u64 reserved_u64_1;
3089 u32 flags; /* MDF */
3090 u32 magic;
3091 u32 md_size_sect;
3092 u32 al_offset; /* offset to this block */
3093 u32 al_nr_extents; /* important for restoring the AL (userspace) */
3094 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
3095 u32 bm_offset; /* offset to the bitmap, from here */
3096 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3097 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3098
3099 /* see al_tr_number_to_on_disk_sector() */
3100 u32 al_stripes;
3101 u32 al_stripe_size_4k;
3102
3103 u8 reserved_u8[4096 - (7*8 + 10*4)];
3104} __packed;
3105
3106
3107
3108void drbd_md_write(struct drbd_device *device, void *b)
3109{
3110 struct meta_data_on_disk *buffer = b;
3111 sector_t sector;
3112 int i;
3113
3114 memset(buffer, 0, sizeof(*buffer));
3115
3116 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
3117 for (i = UI_CURRENT; i < UI_SIZE; i++)
3118 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3119 buffer->flags = cpu_to_be32(device->ldev->md.flags);
3120 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
3121
3122 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3123 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3124 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3125 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3126 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3127
3128 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3129 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3130
3131 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3132 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3133
3134 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3135 sector = device->ldev->md.md_offset;
3136
3137 if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3138 /* this was a try anyways ... */
3139 drbd_err(device, "meta data update failed!\n");
3140 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3141 }
3142}
3143
3144/**
3145 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3146 * @device: DRBD device.
3147 */
3148void drbd_md_sync(struct drbd_device *device)
3149{
3150 struct meta_data_on_disk *buffer;
3151
3152 /* Don't accidentally change the DRBD meta data layout. */
3153 BUILD_BUG_ON(UI_SIZE != 4);
3154 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3155
3156 del_timer(&device->md_sync_timer);
3157 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3158 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3159 return;
3160
3161 /* We use here D_FAILED and not D_ATTACHING because we try to write
3162 * metadata even if we detach due to a disk failure! */
3163 if (!get_ldev_if_state(device, D_FAILED))
3164 return;
3165
3166 buffer = drbd_md_get_buffer(device, __func__);
3167 if (!buffer)
3168 goto out;
3169
3170 drbd_md_write(device, buffer);
3171
3172 /* Update device->ldev->md.la_size_sect,
3173 * since we updated it on metadata. */
3174 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
3175
3176 drbd_md_put_buffer(device);
3177out:
3178 put_ldev(device);
3179}
3180
3181static int check_activity_log_stripe_size(struct drbd_device *device,
3182 struct meta_data_on_disk *on_disk,
3183 struct drbd_md *in_core)
3184{
3185 u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3186 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3187 u64 al_size_4k;
3188
3189 /* both not set: default to old fixed size activity log */
3190 if (al_stripes == 0 && al_stripe_size_4k == 0) {
3191 al_stripes = 1;
3192 al_stripe_size_4k = MD_32kB_SECT/8;
3193 }
3194
3195 /* some paranoia plausibility checks */
3196
3197 /* we need both values to be set */
3198 if (al_stripes == 0 || al_stripe_size_4k == 0)
3199 goto err;
3200
3201 al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3202
3203 /* Upper limit of activity log area, to avoid potential overflow
3204 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3205 * than 72 * 4k blocks total only increases the amount of history,
3206 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3207 if (al_size_4k > (16 * 1024 * 1024/4))
3208 goto err;
3209
3210 /* Lower limit: we need at least 8 transaction slots (32kB)
3211 * to not break existing setups */
3212 if (al_size_4k < MD_32kB_SECT/8)
3213 goto err;
3214
3215 in_core->al_stripe_size_4k = al_stripe_size_4k;
3216 in_core->al_stripes = al_stripes;
3217 in_core->al_size_4k = al_size_4k;
3218
3219 return 0;
3220err:
3221 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3222 al_stripes, al_stripe_size_4k);
3223 return -EINVAL;
3224}
3225
3226static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3227{
3228 sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3229 struct drbd_md *in_core = &bdev->md;
3230 s32 on_disk_al_sect;
3231 s32 on_disk_bm_sect;
3232
3233 /* The on-disk size of the activity log, calculated from offsets, and
3234 * the size of the activity log calculated from the stripe settings,
3235 * should match.
3236 * Though we could relax this a bit: it is ok, if the striped activity log
3237 * fits in the available on-disk activity log size.
3238 * Right now, that would break how resize is implemented.
3239 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3240 * of possible unused padding space in the on disk layout. */
3241 if (in_core->al_offset < 0) {
3242 if (in_core->bm_offset > in_core->al_offset)
3243 goto err;
3244 on_disk_al_sect = -in_core->al_offset;
3245 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3246 } else {
3247 if (in_core->al_offset != MD_4kB_SECT)
3248 goto err;
3249 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3250 goto err;
3251
3252 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3253 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3254 }
3255
3256 /* old fixed size meta data is exactly that: fixed. */
3257 if (in_core->meta_dev_idx >= 0) {
3258 if (in_core->md_size_sect != MD_128MB_SECT
3259 || in_core->al_offset != MD_4kB_SECT
3260 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3261 || in_core->al_stripes != 1
3262 || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3263 goto err;
3264 }
3265
3266 if (capacity < in_core->md_size_sect)
3267 goto err;
3268 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3269 goto err;
3270
3271 /* should be aligned, and at least 32k */
3272 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3273 goto err;
3274
3275 /* should fit (for now: exactly) into the available on-disk space;
3276 * overflow prevention is in check_activity_log_stripe_size() above. */
3277 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3278 goto err;
3279
3280 /* again, should be aligned */
3281 if (in_core->bm_offset & 7)
3282 goto err;
3283
3284 /* FIXME check for device grow with flex external meta data? */
3285
3286 /* can the available bitmap space cover the last agreed device size? */
3287 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3288 goto err;
3289
3290 return 0;
3291
3292err:
3293 drbd_err(device, "meta data offsets don't make sense: idx=%d "
3294 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3295 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3296 in_core->meta_dev_idx,
3297 in_core->al_stripes, in_core->al_stripe_size_4k,
3298 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3299 (unsigned long long)in_core->la_size_sect,
3300 (unsigned long long)capacity);
3301
3302 return -EINVAL;
3303}
3304
3305
3306/**
3307 * drbd_md_read() - Reads in the meta data super block
3308 * @device: DRBD device.
3309 * @bdev: Device from which the meta data should be read in.
3310 *
3311 * Return NO_ERROR on success, and an enum drbd_ret_code in case
3312 * something goes wrong.
3313 *
3314 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3315 * even before @bdev is assigned to @device->ldev.
3316 */
3317int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3318{
3319 struct meta_data_on_disk *buffer;
3320 u32 magic, flags;
3321 int i, rv = NO_ERROR;
3322
3323 if (device->state.disk != D_DISKLESS)
3324 return ERR_DISK_CONFIGURED;
3325
3326 buffer = drbd_md_get_buffer(device, __func__);
3327 if (!buffer)
3328 return ERR_NOMEM;
3329
3330 /* First, figure out where our meta data superblock is located,
3331 * and read it. */
3332 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3333 bdev->md.md_offset = drbd_md_ss(bdev);
3334 /* Even for (flexible or indexed) external meta data,
3335 * initially restrict us to the 4k superblock for now.
3336 * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
3337 bdev->md.md_size_sect = 8;
3338
3339 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3340 REQ_OP_READ)) {
3341 /* NOTE: can't do normal error processing here as this is
3342 called BEFORE disk is attached */
3343 drbd_err(device, "Error while reading metadata.\n");
3344 rv = ERR_IO_MD_DISK;
3345 goto err;
3346 }
3347
3348 magic = be32_to_cpu(buffer->magic);
3349 flags = be32_to_cpu(buffer->flags);
3350 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3351 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3352 /* btw: that's Activity Log clean, not "all" clean. */
3353 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3354 rv = ERR_MD_UNCLEAN;
3355 goto err;
3356 }
3357
3358 rv = ERR_MD_INVALID;
3359 if (magic != DRBD_MD_MAGIC_08) {
3360 if (magic == DRBD_MD_MAGIC_07)
3361 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3362 else
3363 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3364 goto err;
3365 }
3366
3367 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3368 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3369 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3370 goto err;
3371 }
3372
3373
3374 /* convert to in_core endian */
3375 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3376 for (i = UI_CURRENT; i < UI_SIZE; i++)
3377 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3378 bdev->md.flags = be32_to_cpu(buffer->flags);
3379 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3380
3381 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3382 bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3383 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3384
3385 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3386 goto err;
3387 if (check_offsets_and_sizes(device, bdev))
3388 goto err;
3389
3390 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3391 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3392 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3393 goto err;
3394 }
3395 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3396 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3397 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3398 goto err;
3399 }
3400
3401 rv = NO_ERROR;
3402
3403 spin_lock_irq(&device->resource->req_lock);
3404 if (device->state.conn < C_CONNECTED) {
3405 unsigned int peer;
3406 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3407 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3408 device->peer_max_bio_size = peer;
3409 }
3410 spin_unlock_irq(&device->resource->req_lock);
3411
3412 err:
3413 drbd_md_put_buffer(device);
3414
3415 return rv;
3416}
3417
3418/**
3419 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3420 * @device: DRBD device.
3421 *
3422 * Call this function if you change anything that should be written to
3423 * the meta-data super block. This function sets MD_DIRTY, and starts a
3424 * timer that ensures that within five seconds you have to call drbd_md_sync().
3425 */
3426#ifdef DEBUG
3427void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
3428{
3429 if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3430 mod_timer(&device->md_sync_timer, jiffies + HZ);
3431 device->last_md_mark_dirty.line = line;
3432 device->last_md_mark_dirty.func = func;
3433 }
3434}
3435#else
3436void drbd_md_mark_dirty(struct drbd_device *device)
3437{
3438 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3439 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3440}
3441#endif
3442
3443void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3444{
3445 int i;
3446
3447 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3448 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3449}
3450
3451void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3452{
3453 if (idx == UI_CURRENT) {
3454 if (device->state.role == R_PRIMARY)
3455 val |= 1;
3456 else
3457 val &= ~((u64)1);
3458
3459 drbd_set_ed_uuid(device, val);
3460 }
3461
3462 device->ldev->md.uuid[idx] = val;
3463 drbd_md_mark_dirty(device);
3464}
3465
3466void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3467{
3468 unsigned long flags;
3469 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3470 __drbd_uuid_set(device, idx, val);
3471 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3472}
3473
3474void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3475{
3476 unsigned long flags;
3477 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3478 if (device->ldev->md.uuid[idx]) {
3479 drbd_uuid_move_history(device);
3480 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3481 }
3482 __drbd_uuid_set(device, idx, val);
3483 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3484}
3485
3486/**
3487 * drbd_uuid_new_current() - Creates a new current UUID
3488 * @device: DRBD device.
3489 *
3490 * Creates a new current UUID, and rotates the old current UUID into
3491 * the bitmap slot. Causes an incremental resync upon next connect.
3492 */
3493void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3494{
3495 u64 val;
3496 unsigned long long bm_uuid;
3497
3498 get_random_bytes(&val, sizeof(u64));
3499
3500 spin_lock_irq(&device->ldev->md.uuid_lock);
3501 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3502
3503 if (bm_uuid)
3504 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3505
3506 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3507 __drbd_uuid_set(device, UI_CURRENT, val);
3508 spin_unlock_irq(&device->ldev->md.uuid_lock);
3509
3510 drbd_print_uuids(device, "new current UUID");
3511 /* get it to stable storage _now_ */
3512 drbd_md_sync(device);
3513}
3514
3515void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3516{
3517 unsigned long flags;
3518 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3519 return;
3520
3521 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3522 if (val == 0) {
3523 drbd_uuid_move_history(device);
3524 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3525 device->ldev->md.uuid[UI_BITMAP] = 0;
3526 } else {
3527 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3528 if (bm_uuid)
3529 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3530
3531 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3532 }
3533 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3534
3535 drbd_md_mark_dirty(device);
3536}
3537
3538/**
3539 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3540 * @device: DRBD device.
3541 *
3542 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3543 */
3544int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
3545{
3546 int rv = -EIO;
3547
3548 drbd_md_set_flag(device, MDF_FULL_SYNC);
3549 drbd_md_sync(device);
3550 drbd_bm_set_all(device);
3551
3552 rv = drbd_bm_write(device);
3553
3554 if (!rv) {
3555 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3556 drbd_md_sync(device);
3557 }
3558
3559 return rv;
3560}
3561
3562/**
3563 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3564 * @device: DRBD device.
3565 *
3566 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3567 */
3568int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
3569{
3570 drbd_resume_al(device);
3571 drbd_bm_clear_all(device);
3572 return drbd_bm_write(device);
3573}
3574
3575static int w_bitmap_io(struct drbd_work *w, int unused)
3576{
3577 struct drbd_device *device =
3578 container_of(w, struct drbd_device, bm_io_work.w);
3579 struct bm_io_work *work = &device->bm_io_work;
3580 int rv = -EIO;
3581
3582 if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
3583 int cnt = atomic_read(&device->ap_bio_cnt);
3584 if (cnt)
3585 drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3586 cnt, work->why);
3587 }
3588
3589 if (get_ldev(device)) {
3590 drbd_bm_lock(device, work->why, work->flags);
3591 rv = work->io_fn(device);
3592 drbd_bm_unlock(device);
3593 put_ldev(device);
3594 }
3595
3596 clear_bit_unlock(BITMAP_IO, &device->flags);
3597 wake_up(&device->misc_wait);
3598
3599 if (work->done)
3600 work->done(device, rv);
3601
3602 clear_bit(BITMAP_IO_QUEUED, &device->flags);
3603 work->why = NULL;
3604 work->flags = 0;
3605
3606 return 0;
3607}
3608
3609/**
3610 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3611 * @device: DRBD device.
3612 * @io_fn: IO callback to be called when bitmap IO is possible
3613 * @done: callback to be called after the bitmap IO was performed
3614 * @why: Descriptive text of the reason for doing the IO
3615 *
3616 * While IO on the bitmap happens we freeze application IO thus we ensure
3617 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3618 * called from worker context. It MUST NOT be used while a previous such
3619 * work is still pending!
3620 *
3621 * Its worker function encloses the call of io_fn() by get_ldev() and
3622 * put_ldev().
3623 */
3624void drbd_queue_bitmap_io(struct drbd_device *device,
3625 int (*io_fn)(struct drbd_device *),
3626 void (*done)(struct drbd_device *, int),
3627 char *why, enum bm_flag flags)
3628{
3629 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3630
3631 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3632 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3633 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3634 if (device->bm_io_work.why)
3635 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3636 why, device->bm_io_work.why);
3637
3638 device->bm_io_work.io_fn = io_fn;
3639 device->bm_io_work.done = done;
3640 device->bm_io_work.why = why;
3641 device->bm_io_work.flags = flags;
3642
3643 spin_lock_irq(&device->resource->req_lock);
3644 set_bit(BITMAP_IO, &device->flags);
3645 /* don't wait for pending application IO if the caller indicates that
3646 * application IO does not conflict anyways. */
3647 if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3648 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3649 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3650 &device->bm_io_work.w);
3651 }
3652 spin_unlock_irq(&device->resource->req_lock);
3653}
3654
3655/**
3656 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3657 * @device: DRBD device.
3658 * @io_fn: IO callback to be called when bitmap IO is possible
3659 * @why: Descriptive text of the reason for doing the IO
3660 *
3661 * freezes application IO while that the actual IO operations runs. This
3662 * functions MAY NOT be called from worker context.
3663 */
3664int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
3665 char *why, enum bm_flag flags)
3666{
3667 /* Only suspend io, if some operation is supposed to be locked out */
3668 const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
3669 int rv;
3670
3671 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3672
3673 if (do_suspend_io)
3674 drbd_suspend_io(device);
3675
3676 drbd_bm_lock(device, why, flags);
3677 rv = io_fn(device);
3678 drbd_bm_unlock(device);
3679
3680 if (do_suspend_io)
3681 drbd_resume_io(device);
3682
3683 return rv;
3684}
3685
3686void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3687{
3688 if ((device->ldev->md.flags & flag) != flag) {
3689 drbd_md_mark_dirty(device);
3690 device->ldev->md.flags |= flag;
3691 }
3692}
3693
3694void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3695{
3696 if ((device->ldev->md.flags & flag) != 0) {
3697 drbd_md_mark_dirty(device);
3698 device->ldev->md.flags &= ~flag;
3699 }
3700}
3701int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3702{
3703 return (bdev->md.flags & flag) != 0;
3704}
3705
3706static void md_sync_timer_fn(unsigned long data)
3707{
3708 struct drbd_device *device = (struct drbd_device *) data;
3709 drbd_device_post_work(device, MD_SYNC);
3710}
3711
3712const char *cmdname(enum drbd_packet cmd)
3713{
3714 /* THINK may need to become several global tables
3715 * when we want to support more than
3716 * one PRO_VERSION */
3717 static const char *cmdnames[] = {
3718 [P_DATA] = "Data",
3719 [P_WSAME] = "WriteSame",
3720 [P_TRIM] = "Trim",
3721 [P_DATA_REPLY] = "DataReply",
3722 [P_RS_DATA_REPLY] = "RSDataReply",
3723 [P_BARRIER] = "Barrier",
3724 [P_BITMAP] = "ReportBitMap",
3725 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3726 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3727 [P_UNPLUG_REMOTE] = "UnplugRemote",
3728 [P_DATA_REQUEST] = "DataRequest",
3729 [P_RS_DATA_REQUEST] = "RSDataRequest",
3730 [P_SYNC_PARAM] = "SyncParam",
3731 [P_SYNC_PARAM89] = "SyncParam89",
3732 [P_PROTOCOL] = "ReportProtocol",
3733 [P_UUIDS] = "ReportUUIDs",
3734 [P_SIZES] = "ReportSizes",
3735 [P_STATE] = "ReportState",
3736 [P_SYNC_UUID] = "ReportSyncUUID",
3737 [P_AUTH_CHALLENGE] = "AuthChallenge",
3738 [P_AUTH_RESPONSE] = "AuthResponse",
3739 [P_PING] = "Ping",
3740 [P_PING_ACK] = "PingAck",
3741 [P_RECV_ACK] = "RecvAck",
3742 [P_WRITE_ACK] = "WriteAck",
3743 [P_RS_WRITE_ACK] = "RSWriteAck",
3744 [P_SUPERSEDED] = "Superseded",
3745 [P_NEG_ACK] = "NegAck",
3746 [P_NEG_DREPLY] = "NegDReply",
3747 [P_NEG_RS_DREPLY] = "NegRSDReply",
3748 [P_BARRIER_ACK] = "BarrierAck",
3749 [P_STATE_CHG_REQ] = "StateChgRequest",
3750 [P_STATE_CHG_REPLY] = "StateChgReply",
3751 [P_OV_REQUEST] = "OVRequest",
3752 [P_OV_REPLY] = "OVReply",
3753 [P_OV_RESULT] = "OVResult",
3754 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3755 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3756 [P_COMPRESSED_BITMAP] = "CBitmap",
3757 [P_DELAY_PROBE] = "DelayProbe",
3758 [P_OUT_OF_SYNC] = "OutOfSync",
3759 [P_RETRY_WRITE] = "RetryWrite",
3760 [P_RS_CANCEL] = "RSCancel",
3761 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3762 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
3763 [P_RETRY_WRITE] = "retry_write",
3764 [P_PROTOCOL_UPDATE] = "protocol_update",
3765 [P_RS_THIN_REQ] = "rs_thin_req",
3766 [P_RS_DEALLOCATED] = "rs_deallocated",
3767
3768 /* enum drbd_packet, but not commands - obsoleted flags:
3769 * P_MAY_IGNORE
3770 * P_MAX_OPT_CMD
3771 */
3772 };
3773
3774 /* too big for the array: 0xfffX */
3775 if (cmd == P_INITIAL_META)
3776 return "InitialMeta";
3777 if (cmd == P_INITIAL_DATA)
3778 return "InitialData";
3779 if (cmd == P_CONNECTION_FEATURES)
3780 return "ConnectionFeatures";
3781 if (cmd >= ARRAY_SIZE(cmdnames))
3782 return "Unknown";
3783 return cmdnames[cmd];
3784}
3785
3786/**
3787 * drbd_wait_misc - wait for a request to make progress
3788 * @device: device associated with the request
3789 * @i: the struct drbd_interval embedded in struct drbd_request or
3790 * struct drbd_peer_request
3791 */
3792int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3793{
3794 struct net_conf *nc;
3795 DEFINE_WAIT(wait);
3796 long timeout;
3797
3798 rcu_read_lock();
3799 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3800 if (!nc) {
3801 rcu_read_unlock();
3802 return -ETIMEDOUT;
3803 }
3804 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3805 rcu_read_unlock();
3806
3807 /* Indicate to wake up device->misc_wait on progress. */
3808 i->waiting = true;
3809 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3810 spin_unlock_irq(&device->resource->req_lock);
3811 timeout = schedule_timeout(timeout);
3812 finish_wait(&device->misc_wait, &wait);
3813 spin_lock_irq(&device->resource->req_lock);
3814 if (!timeout || device->state.conn < C_CONNECTED)
3815 return -ETIMEDOUT;
3816 if (signal_pending(current))
3817 return -ERESTARTSYS;
3818 return 0;
3819}
3820
3821void lock_all_resources(void)
3822{
3823 struct drbd_resource *resource;
3824 int __maybe_unused i = 0;
3825
3826 mutex_lock(&resources_mutex);
3827 local_irq_disable();
3828 for_each_resource(resource, &drbd_resources)
3829 spin_lock_nested(&resource->req_lock, i++);
3830}
3831
3832void unlock_all_resources(void)
3833{
3834 struct drbd_resource *resource;
3835
3836 for_each_resource(resource, &drbd_resources)
3837 spin_unlock(&resource->req_lock);
3838 local_irq_enable();
3839 mutex_unlock(&resources_mutex);
3840}
3841
3842#ifdef CONFIG_DRBD_FAULT_INJECTION
3843/* Fault insertion support including random number generator shamelessly
3844 * stolen from kernel/rcutorture.c */
3845struct fault_random_state {
3846 unsigned long state;
3847 unsigned long count;
3848};
3849
3850#define FAULT_RANDOM_MULT 39916801 /* prime */
3851#define FAULT_RANDOM_ADD 479001701 /* prime */
3852#define FAULT_RANDOM_REFRESH 10000
3853
3854/*
3855 * Crude but fast random-number generator. Uses a linear congruential
3856 * generator, with occasional help from get_random_bytes().
3857 */
3858static unsigned long
3859_drbd_fault_random(struct fault_random_state *rsp)
3860{
3861 long refresh;
3862
3863 if (!rsp->count--) {
3864 get_random_bytes(&refresh, sizeof(refresh));
3865 rsp->state += refresh;
3866 rsp->count = FAULT_RANDOM_REFRESH;
3867 }
3868 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3869 return swahw32(rsp->state);
3870}
3871
3872static char *
3873_drbd_fault_str(unsigned int type) {
3874 static char *_faults[] = {
3875 [DRBD_FAULT_MD_WR] = "Meta-data write",
3876 [DRBD_FAULT_MD_RD] = "Meta-data read",
3877 [DRBD_FAULT_RS_WR] = "Resync write",
3878 [DRBD_FAULT_RS_RD] = "Resync read",
3879 [DRBD_FAULT_DT_WR] = "Data write",
3880 [DRBD_FAULT_DT_RD] = "Data read",
3881 [DRBD_FAULT_DT_RA] = "Data read ahead",
3882 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3883 [DRBD_FAULT_AL_EE] = "EE allocation",
3884 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3885 };
3886
3887 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3888}
3889
3890unsigned int
3891_drbd_insert_fault(struct drbd_device *device, unsigned int type)
3892{
3893 static struct fault_random_state rrs = {0, 0};
3894
3895 unsigned int ret = (
3896 (fault_devs == 0 ||
3897 ((1 << device_to_minor(device)) & fault_devs) != 0) &&
3898 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3899
3900 if (ret) {
3901 fault_count++;
3902
3903 if (__ratelimit(&drbd_ratelimit_state))
3904 drbd_warn(device, "***Simulating %s failure\n",
3905 _drbd_fault_str(type));
3906 }
3907
3908 return ret;
3909}
3910#endif
3911
3912const char *drbd_buildtag(void)
3913{
3914 /* DRBD built from external sources has here a reference to the
3915 git hash of the source code. */
3916
3917 static char buildtag[38] = "\0uilt-in";
3918
3919 if (buildtag[0] == 0) {
3920#ifdef MODULE
3921 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3922#else
3923 buildtag[0] = 'b';
3924#endif
3925 }
3926
3927 return buildtag;
3928}
3929
3930module_init(drbd_init)
3931module_exit(drbd_cleanup)
3932
3933EXPORT_SYMBOL(drbd_conn_str);
3934EXPORT_SYMBOL(drbd_role_str);
3935EXPORT_SYMBOL(drbd_disk_str);
3936EXPORT_SYMBOL(drbd_set_st_err_str);