Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 drbd_nl.c
4
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10
11
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/drbd.h>
18#include <linux/in.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/slab.h>
22#include <linux/blkpg.h>
23#include <linux/cpumask.h>
24#include "drbd_int.h"
25#include "drbd_protocol.h"
26#include "drbd_req.h"
27#include "drbd_state_change.h"
28#include <asm/unaligned.h>
29#include <linux/drbd_limits.h>
30#include <linux/kthread.h>
31
32#include <net/genetlink.h>
33
34/* .doit */
35// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
36// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
37
38int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
39int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
40
41int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
42int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
43int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
44
45int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
46int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
47int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
49int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
50int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
52int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
53int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
54int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
56int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
57int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
59int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
60int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
65/* .dumpit */
66int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
67int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
68int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
69int drbd_adm_dump_devices_done(struct netlink_callback *cb);
70int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
71int drbd_adm_dump_connections_done(struct netlink_callback *cb);
72int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
73int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
74int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
75
76#include <linux/drbd_genl_api.h>
77#include "drbd_nla.h"
78#include <linux/genl_magic_func.h>
79
80static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
81static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
82
83DEFINE_MUTEX(notification_mutex);
84
85/* used blkdev_get_by_path, to claim our meta data device(s) */
86static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
87
88static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
89{
90 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
91 if (genlmsg_reply(skb, info))
92 pr_err("error sending genl reply\n");
93}
94
95/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
96 * reason it could fail was no space in skb, and there are 4k available. */
97static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
98{
99 struct nlattr *nla;
100 int err = -EMSGSIZE;
101
102 if (!info || !info[0])
103 return 0;
104
105 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
106 if (!nla)
107 return err;
108
109 err = nla_put_string(skb, T_info_text, info);
110 if (err) {
111 nla_nest_cancel(skb, nla);
112 return err;
113 } else
114 nla_nest_end(skb, nla);
115 return 0;
116}
117
118__printf(2, 3)
119static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
120{
121 va_list args;
122 struct nlattr *nla, *txt;
123 int err = -EMSGSIZE;
124 int len;
125
126 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
127 if (!nla)
128 return err;
129
130 txt = nla_reserve(skb, T_info_text, 256);
131 if (!txt) {
132 nla_nest_cancel(skb, nla);
133 return err;
134 }
135 va_start(args, fmt);
136 len = vscnprintf(nla_data(txt), 256, fmt, args);
137 va_end(args);
138
139 /* maybe: retry with larger reserve, if truncated */
140 txt->nla_len = nla_attr_size(len+1);
141 nlmsg_trim(skb, (char*)txt + NLA_ALIGN(txt->nla_len));
142 nla_nest_end(skb, nla);
143
144 return 0;
145}
146
147/* This would be a good candidate for a "pre_doit" hook,
148 * and per-family private info->pointers.
149 * But we need to stay compatible with older kernels.
150 * If it returns successfully, adm_ctx members are valid.
151 *
152 * At this point, we still rely on the global genl_lock().
153 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
154 * to add additional synchronization against object destruction/modification.
155 */
156#define DRBD_ADM_NEED_MINOR 1
157#define DRBD_ADM_NEED_RESOURCE 2
158#define DRBD_ADM_NEED_CONNECTION 4
159static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
160 struct sk_buff *skb, struct genl_info *info, unsigned flags)
161{
162 struct drbd_genlmsghdr *d_in = info->userhdr;
163 const u8 cmd = info->genlhdr->cmd;
164 int err;
165
166 memset(adm_ctx, 0, sizeof(*adm_ctx));
167
168 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
169 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
170 return -EPERM;
171
172 adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
173 if (!adm_ctx->reply_skb) {
174 err = -ENOMEM;
175 goto fail;
176 }
177
178 adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
179 info, &drbd_genl_family, 0, cmd);
180 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
181 * but anyways */
182 if (!adm_ctx->reply_dh) {
183 err = -ENOMEM;
184 goto fail;
185 }
186
187 adm_ctx->reply_dh->minor = d_in->minor;
188 adm_ctx->reply_dh->ret_code = NO_ERROR;
189
190 adm_ctx->volume = VOLUME_UNSPECIFIED;
191 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
192 struct nlattr *nla;
193 /* parse and validate only */
194 err = drbd_cfg_context_from_attrs(NULL, info);
195 if (err)
196 goto fail;
197
198 /* It was present, and valid,
199 * copy it over to the reply skb. */
200 err = nla_put_nohdr(adm_ctx->reply_skb,
201 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
202 info->attrs[DRBD_NLA_CFG_CONTEXT]);
203 if (err)
204 goto fail;
205
206 /* and assign stuff to the adm_ctx */
207 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
208 if (nla)
209 adm_ctx->volume = nla_get_u32(nla);
210 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
211 if (nla)
212 adm_ctx->resource_name = nla_data(nla);
213 adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
214 adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
215 if ((adm_ctx->my_addr &&
216 nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
217 (adm_ctx->peer_addr &&
218 nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
219 err = -EINVAL;
220 goto fail;
221 }
222 }
223
224 adm_ctx->minor = d_in->minor;
225 adm_ctx->device = minor_to_device(d_in->minor);
226
227 /* We are protected by the global genl_lock().
228 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
229 * so make sure this object stays around. */
230 if (adm_ctx->device)
231 kref_get(&adm_ctx->device->kref);
232
233 if (adm_ctx->resource_name) {
234 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
235 }
236
237 if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
238 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
239 return ERR_MINOR_INVALID;
240 }
241 if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
242 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
243 if (adm_ctx->resource_name)
244 return ERR_RES_NOT_KNOWN;
245 return ERR_INVALID_REQUEST;
246 }
247
248 if (flags & DRBD_ADM_NEED_CONNECTION) {
249 if (adm_ctx->resource) {
250 drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
251 return ERR_INVALID_REQUEST;
252 }
253 if (adm_ctx->device) {
254 drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
255 return ERR_INVALID_REQUEST;
256 }
257 if (adm_ctx->my_addr && adm_ctx->peer_addr)
258 adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
259 nla_len(adm_ctx->my_addr),
260 nla_data(adm_ctx->peer_addr),
261 nla_len(adm_ctx->peer_addr));
262 if (!adm_ctx->connection) {
263 drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
264 return ERR_INVALID_REQUEST;
265 }
266 }
267
268 /* some more paranoia, if the request was over-determined */
269 if (adm_ctx->device && adm_ctx->resource &&
270 adm_ctx->device->resource != adm_ctx->resource) {
271 pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
272 adm_ctx->minor, adm_ctx->resource->name,
273 adm_ctx->device->resource->name);
274 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
275 return ERR_INVALID_REQUEST;
276 }
277 if (adm_ctx->device &&
278 adm_ctx->volume != VOLUME_UNSPECIFIED &&
279 adm_ctx->volume != adm_ctx->device->vnr) {
280 pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
281 adm_ctx->minor, adm_ctx->volume,
282 adm_ctx->device->vnr, adm_ctx->device->resource->name);
283 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
284 return ERR_INVALID_REQUEST;
285 }
286
287 /* still, provide adm_ctx->resource always, if possible. */
288 if (!adm_ctx->resource) {
289 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
290 : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
291 if (adm_ctx->resource)
292 kref_get(&adm_ctx->resource->kref);
293 }
294
295 return NO_ERROR;
296
297fail:
298 nlmsg_free(adm_ctx->reply_skb);
299 adm_ctx->reply_skb = NULL;
300 return err;
301}
302
303static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
304 struct genl_info *info, int retcode)
305{
306 if (adm_ctx->device) {
307 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
308 adm_ctx->device = NULL;
309 }
310 if (adm_ctx->connection) {
311 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
312 adm_ctx->connection = NULL;
313 }
314 if (adm_ctx->resource) {
315 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
316 adm_ctx->resource = NULL;
317 }
318
319 if (!adm_ctx->reply_skb)
320 return -ENOMEM;
321
322 adm_ctx->reply_dh->ret_code = retcode;
323 drbd_adm_send_reply(adm_ctx->reply_skb, info);
324 return 0;
325}
326
327static void setup_khelper_env(struct drbd_connection *connection, char **envp)
328{
329 char *afs;
330
331 /* FIXME: A future version will not allow this case. */
332 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
333 return;
334
335 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
336 case AF_INET6:
337 afs = "ipv6";
338 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
339 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
340 break;
341 case AF_INET:
342 afs = "ipv4";
343 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
344 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
345 break;
346 default:
347 afs = "ssocks";
348 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
349 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
350 }
351 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
352}
353
354int drbd_khelper(struct drbd_device *device, char *cmd)
355{
356 char *envp[] = { "HOME=/",
357 "TERM=linux",
358 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
359 (char[20]) { }, /* address family */
360 (char[60]) { }, /* address */
361 NULL };
362 char mb[14];
363 char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
364 struct drbd_connection *connection = first_peer_device(device)->connection;
365 struct sib_info sib;
366 int ret;
367
368 if (current == connection->worker.task)
369 set_bit(CALLBACK_PENDING, &connection->flags);
370
371 snprintf(mb, 14, "minor-%d", device_to_minor(device));
372 setup_khelper_env(connection, envp);
373
374 /* The helper may take some time.
375 * write out any unsynced meta data changes now */
376 drbd_md_sync(device);
377
378 drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
379 sib.sib_reason = SIB_HELPER_PRE;
380 sib.helper_name = cmd;
381 drbd_bcast_event(device, &sib);
382 notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
383 ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
384 if (ret)
385 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
386 drbd_usermode_helper, cmd, mb,
387 (ret >> 8) & 0xff, ret);
388 else
389 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
390 drbd_usermode_helper, cmd, mb,
391 (ret >> 8) & 0xff, ret);
392 sib.sib_reason = SIB_HELPER_POST;
393 sib.helper_exit_code = ret;
394 drbd_bcast_event(device, &sib);
395 notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
396
397 if (current == connection->worker.task)
398 clear_bit(CALLBACK_PENDING, &connection->flags);
399
400 if (ret < 0) /* Ignore any ERRNOs we got. */
401 ret = 0;
402
403 return ret;
404}
405
406enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
407{
408 char *envp[] = { "HOME=/",
409 "TERM=linux",
410 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
411 (char[20]) { }, /* address family */
412 (char[60]) { }, /* address */
413 NULL };
414 char *resource_name = connection->resource->name;
415 char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
416 int ret;
417
418 setup_khelper_env(connection, envp);
419 conn_md_sync(connection);
420
421 drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
422 /* TODO: conn_bcast_event() ?? */
423 notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
424
425 ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
426 if (ret)
427 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
428 drbd_usermode_helper, cmd, resource_name,
429 (ret >> 8) & 0xff, ret);
430 else
431 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
432 drbd_usermode_helper, cmd, resource_name,
433 (ret >> 8) & 0xff, ret);
434 /* TODO: conn_bcast_event() ?? */
435 notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
436
437 if (ret < 0) /* Ignore any ERRNOs we got. */
438 ret = 0;
439
440 return ret;
441}
442
443static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
444{
445 enum drbd_fencing_p fp = FP_NOT_AVAIL;
446 struct drbd_peer_device *peer_device;
447 int vnr;
448
449 rcu_read_lock();
450 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
451 struct drbd_device *device = peer_device->device;
452 if (get_ldev_if_state(device, D_CONSISTENT)) {
453 struct disk_conf *disk_conf =
454 rcu_dereference(peer_device->device->ldev->disk_conf);
455 fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
456 put_ldev(device);
457 }
458 }
459 rcu_read_unlock();
460
461 return fp;
462}
463
464static bool resource_is_supended(struct drbd_resource *resource)
465{
466 return resource->susp || resource->susp_fen || resource->susp_nod;
467}
468
469bool conn_try_outdate_peer(struct drbd_connection *connection)
470{
471 struct drbd_resource * const resource = connection->resource;
472 unsigned int connect_cnt;
473 union drbd_state mask = { };
474 union drbd_state val = { };
475 enum drbd_fencing_p fp;
476 char *ex_to_string;
477 int r;
478
479 spin_lock_irq(&resource->req_lock);
480 if (connection->cstate >= C_WF_REPORT_PARAMS) {
481 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
482 spin_unlock_irq(&resource->req_lock);
483 return false;
484 }
485
486 connect_cnt = connection->connect_cnt;
487 spin_unlock_irq(&resource->req_lock);
488
489 fp = highest_fencing_policy(connection);
490 switch (fp) {
491 case FP_NOT_AVAIL:
492 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
493 spin_lock_irq(&resource->req_lock);
494 if (connection->cstate < C_WF_REPORT_PARAMS) {
495 _conn_request_state(connection,
496 (union drbd_state) { { .susp_fen = 1 } },
497 (union drbd_state) { { .susp_fen = 0 } },
498 CS_VERBOSE | CS_HARD | CS_DC_SUSP);
499 /* We are no longer suspended due to the fencing policy.
500 * We may still be suspended due to the on-no-data-accessible policy.
501 * If that was OND_IO_ERROR, fail pending requests. */
502 if (!resource_is_supended(resource))
503 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
504 }
505 /* Else: in case we raced with a connection handshake,
506 * let the handshake figure out if we maybe can RESEND,
507 * and do not resume/fail pending requests here.
508 * Worst case is we stay suspended for now, which may be
509 * resolved by either re-establishing the replication link, or
510 * the next link failure, or eventually the administrator. */
511 spin_unlock_irq(&resource->req_lock);
512 return false;
513
514 case FP_DONT_CARE:
515 return true;
516 default: ;
517 }
518
519 r = conn_khelper(connection, "fence-peer");
520
521 switch ((r>>8) & 0xff) {
522 case P_INCONSISTENT: /* peer is inconsistent */
523 ex_to_string = "peer is inconsistent or worse";
524 mask.pdsk = D_MASK;
525 val.pdsk = D_INCONSISTENT;
526 break;
527 case P_OUTDATED: /* peer got outdated, or was already outdated */
528 ex_to_string = "peer was fenced";
529 mask.pdsk = D_MASK;
530 val.pdsk = D_OUTDATED;
531 break;
532 case P_DOWN: /* peer was down */
533 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
534 /* we will(have) create(d) a new UUID anyways... */
535 ex_to_string = "peer is unreachable, assumed to be dead";
536 mask.pdsk = D_MASK;
537 val.pdsk = D_OUTDATED;
538 } else {
539 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
540 }
541 break;
542 case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
543 * This is useful when an unconnected R_SECONDARY is asked to
544 * become R_PRIMARY, but finds the other peer being active. */
545 ex_to_string = "peer is active";
546 drbd_warn(connection, "Peer is primary, outdating myself.\n");
547 mask.disk = D_MASK;
548 val.disk = D_OUTDATED;
549 break;
550 case P_FENCING:
551 /* THINK: do we need to handle this
552 * like case 4, or more like case 5? */
553 if (fp != FP_STONITH)
554 drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
555 ex_to_string = "peer was stonithed";
556 mask.pdsk = D_MASK;
557 val.pdsk = D_OUTDATED;
558 break;
559 default:
560 /* The script is broken ... */
561 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
562 return false; /* Eventually leave IO frozen */
563 }
564
565 drbd_info(connection, "fence-peer helper returned %d (%s)\n",
566 (r>>8) & 0xff, ex_to_string);
567
568 /* Not using
569 conn_request_state(connection, mask, val, CS_VERBOSE);
570 here, because we might were able to re-establish the connection in the
571 meantime. */
572 spin_lock_irq(&resource->req_lock);
573 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
574 if (connection->connect_cnt != connect_cnt)
575 /* In case the connection was established and droped
576 while the fence-peer handler was running, ignore it */
577 drbd_info(connection, "Ignoring fence-peer exit code\n");
578 else
579 _conn_request_state(connection, mask, val, CS_VERBOSE);
580 }
581 spin_unlock_irq(&resource->req_lock);
582
583 return conn_highest_pdsk(connection) <= D_OUTDATED;
584}
585
586static int _try_outdate_peer_async(void *data)
587{
588 struct drbd_connection *connection = (struct drbd_connection *)data;
589
590 conn_try_outdate_peer(connection);
591
592 kref_put(&connection->kref, drbd_destroy_connection);
593 return 0;
594}
595
596void conn_try_outdate_peer_async(struct drbd_connection *connection)
597{
598 struct task_struct *opa;
599
600 kref_get(&connection->kref);
601 /* We may have just sent a signal to this thread
602 * to get it out of some blocking network function.
603 * Clear signals; otherwise kthread_run(), which internally uses
604 * wait_on_completion_killable(), will mistake our pending signal
605 * for a new fatal signal and fail. */
606 flush_signals(current);
607 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
608 if (IS_ERR(opa)) {
609 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
610 kref_put(&connection->kref, drbd_destroy_connection);
611 }
612}
613
614enum drbd_state_rv
615drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
616{
617 struct drbd_peer_device *const peer_device = first_peer_device(device);
618 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
619 const int max_tries = 4;
620 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
621 struct net_conf *nc;
622 int try = 0;
623 int forced = 0;
624 union drbd_state mask, val;
625
626 if (new_role == R_PRIMARY) {
627 struct drbd_connection *connection;
628
629 /* Detect dead peers as soon as possible. */
630
631 rcu_read_lock();
632 for_each_connection(connection, device->resource)
633 request_ping(connection);
634 rcu_read_unlock();
635 }
636
637 mutex_lock(device->state_mutex);
638
639 mask.i = 0; mask.role = R_MASK;
640 val.i = 0; val.role = new_role;
641
642 while (try++ < max_tries) {
643 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
644
645 /* in case we first succeeded to outdate,
646 * but now suddenly could establish a connection */
647 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
648 val.pdsk = 0;
649 mask.pdsk = 0;
650 continue;
651 }
652
653 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
654 (device->state.disk < D_UP_TO_DATE &&
655 device->state.disk >= D_INCONSISTENT)) {
656 mask.disk = D_MASK;
657 val.disk = D_UP_TO_DATE;
658 forced = 1;
659 continue;
660 }
661
662 if (rv == SS_NO_UP_TO_DATE_DISK &&
663 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
664 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
665
666 if (conn_try_outdate_peer(connection)) {
667 val.disk = D_UP_TO_DATE;
668 mask.disk = D_MASK;
669 }
670 continue;
671 }
672
673 if (rv == SS_NOTHING_TO_DO)
674 goto out;
675 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
676 if (!conn_try_outdate_peer(connection) && force) {
677 drbd_warn(device, "Forced into split brain situation!\n");
678 mask.pdsk = D_MASK;
679 val.pdsk = D_OUTDATED;
680
681 }
682 continue;
683 }
684 if (rv == SS_TWO_PRIMARIES) {
685 /* Maybe the peer is detected as dead very soon...
686 retry at most once more in this case. */
687 if (try < max_tries) {
688 int timeo;
689 try = max_tries - 1;
690 rcu_read_lock();
691 nc = rcu_dereference(connection->net_conf);
692 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
693 rcu_read_unlock();
694 schedule_timeout_interruptible(timeo);
695 }
696 continue;
697 }
698 if (rv < SS_SUCCESS) {
699 rv = _drbd_request_state(device, mask, val,
700 CS_VERBOSE + CS_WAIT_COMPLETE);
701 if (rv < SS_SUCCESS)
702 goto out;
703 }
704 break;
705 }
706
707 if (rv < SS_SUCCESS)
708 goto out;
709
710 if (forced)
711 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
712
713 /* Wait until nothing is on the fly :) */
714 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
715
716 /* FIXME also wait for all pending P_BARRIER_ACK? */
717
718 if (new_role == R_SECONDARY) {
719 if (get_ldev(device)) {
720 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
721 put_ldev(device);
722 }
723 } else {
724 mutex_lock(&device->resource->conf_update);
725 nc = connection->net_conf;
726 if (nc)
727 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
728 mutex_unlock(&device->resource->conf_update);
729
730 if (get_ldev(device)) {
731 if (((device->state.conn < C_CONNECTED ||
732 device->state.pdsk <= D_FAILED)
733 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
734 drbd_uuid_new_current(device);
735
736 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
737 put_ldev(device);
738 }
739 }
740
741 /* writeout of activity log covered areas of the bitmap
742 * to stable storage done in after state change already */
743
744 if (device->state.conn >= C_WF_REPORT_PARAMS) {
745 /* if this was forced, we should consider sync */
746 if (forced)
747 drbd_send_uuids(peer_device);
748 drbd_send_current_state(peer_device);
749 }
750
751 drbd_md_sync(device);
752 set_disk_ro(device->vdisk, new_role == R_SECONDARY);
753 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
754out:
755 mutex_unlock(device->state_mutex);
756 return rv;
757}
758
759static const char *from_attrs_err_to_txt(int err)
760{
761 return err == -ENOMSG ? "required attribute missing" :
762 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
763 err == -EEXIST ? "can not change invariant setting" :
764 "invalid attribute value";
765}
766
767int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
768{
769 struct drbd_config_context adm_ctx;
770 struct set_role_parms parms;
771 int err;
772 enum drbd_ret_code retcode;
773 enum drbd_state_rv rv;
774
775 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
776 if (!adm_ctx.reply_skb)
777 return retcode;
778 if (retcode != NO_ERROR)
779 goto out;
780
781 memset(&parms, 0, sizeof(parms));
782 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
783 err = set_role_parms_from_attrs(&parms, info);
784 if (err) {
785 retcode = ERR_MANDATORY_TAG;
786 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
787 goto out;
788 }
789 }
790 genl_unlock();
791 mutex_lock(&adm_ctx.resource->adm_mutex);
792
793 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
794 rv = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
795 else
796 rv = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
797
798 mutex_unlock(&adm_ctx.resource->adm_mutex);
799 genl_lock();
800 drbd_adm_finish(&adm_ctx, info, rv);
801 return 0;
802out:
803 drbd_adm_finish(&adm_ctx, info, retcode);
804 return 0;
805}
806
807/* Initializes the md.*_offset members, so we are able to find
808 * the on disk meta data.
809 *
810 * We currently have two possible layouts:
811 * external:
812 * |----------- md_size_sect ------------------|
813 * [ 4k superblock ][ activity log ][ Bitmap ]
814 * | al_offset == 8 |
815 * | bm_offset = al_offset + X |
816 * ==> bitmap sectors = md_size_sect - bm_offset
817 *
818 * internal:
819 * |----------- md_size_sect ------------------|
820 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
821 * | al_offset < 0 |
822 * | bm_offset = al_offset - Y |
823 * ==> bitmap sectors = Y = al_offset - bm_offset
824 *
825 * Activity log size used to be fixed 32kB,
826 * but is about to become configurable.
827 */
828static void drbd_md_set_sector_offsets(struct drbd_device *device,
829 struct drbd_backing_dev *bdev)
830{
831 sector_t md_size_sect = 0;
832 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
833
834 bdev->md.md_offset = drbd_md_ss(bdev);
835
836 switch (bdev->md.meta_dev_idx) {
837 default:
838 /* v07 style fixed size indexed meta data */
839 bdev->md.md_size_sect = MD_128MB_SECT;
840 bdev->md.al_offset = MD_4kB_SECT;
841 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
842 break;
843 case DRBD_MD_INDEX_FLEX_EXT:
844 /* just occupy the full device; unit: sectors */
845 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
846 bdev->md.al_offset = MD_4kB_SECT;
847 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
848 break;
849 case DRBD_MD_INDEX_INTERNAL:
850 case DRBD_MD_INDEX_FLEX_INT:
851 /* al size is still fixed */
852 bdev->md.al_offset = -al_size_sect;
853 /* we need (slightly less than) ~ this much bitmap sectors: */
854 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
855 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
856 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
857 md_size_sect = ALIGN(md_size_sect, 8);
858
859 /* plus the "drbd meta data super block",
860 * and the activity log; */
861 md_size_sect += MD_4kB_SECT + al_size_sect;
862
863 bdev->md.md_size_sect = md_size_sect;
864 /* bitmap offset is adjusted by 'super' block size */
865 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
866 break;
867 }
868}
869
870/* input size is expected to be in KB */
871char *ppsize(char *buf, unsigned long long size)
872{
873 /* Needs 9 bytes at max including trailing NUL:
874 * -1ULL ==> "16384 EB" */
875 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
876 int base = 0;
877 while (size >= 10000 && base < sizeof(units)-1) {
878 /* shift + round */
879 size = (size >> 10) + !!(size & (1<<9));
880 base++;
881 }
882 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
883
884 return buf;
885}
886
887/* there is still a theoretical deadlock when called from receiver
888 * on an D_INCONSISTENT R_PRIMARY:
889 * remote READ does inc_ap_bio, receiver would need to receive answer
890 * packet from remote to dec_ap_bio again.
891 * receiver receive_sizes(), comes here,
892 * waits for ap_bio_cnt == 0. -> deadlock.
893 * but this cannot happen, actually, because:
894 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
895 * (not connected, or bad/no disk on peer):
896 * see drbd_fail_request_early, ap_bio_cnt is zero.
897 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
898 * peer may not initiate a resize.
899 */
900/* Note these are not to be confused with
901 * drbd_adm_suspend_io/drbd_adm_resume_io,
902 * which are (sub) state changes triggered by admin (drbdsetup),
903 * and can be long lived.
904 * This changes an device->flag, is triggered by drbd internals,
905 * and should be short-lived. */
906/* It needs to be a counter, since multiple threads might
907 independently suspend and resume IO. */
908void drbd_suspend_io(struct drbd_device *device)
909{
910 atomic_inc(&device->suspend_cnt);
911 if (drbd_suspended(device))
912 return;
913 wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
914}
915
916void drbd_resume_io(struct drbd_device *device)
917{
918 if (atomic_dec_and_test(&device->suspend_cnt))
919 wake_up(&device->misc_wait);
920}
921
922/*
923 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
924 * @device: DRBD device.
925 *
926 * Returns 0 on success, negative return values indicate errors.
927 * You should call drbd_md_sync() after calling this function.
928 */
929enum determine_dev_size
930drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
931{
932 struct md_offsets_and_sizes {
933 u64 last_agreed_sect;
934 u64 md_offset;
935 s32 al_offset;
936 s32 bm_offset;
937 u32 md_size_sect;
938
939 u32 al_stripes;
940 u32 al_stripe_size_4k;
941 } prev;
942 sector_t u_size, size;
943 struct drbd_md *md = &device->ldev->md;
944 void *buffer;
945
946 int md_moved, la_size_changed;
947 enum determine_dev_size rv = DS_UNCHANGED;
948
949 /* We may change the on-disk offsets of our meta data below. Lock out
950 * anything that may cause meta data IO, to avoid acting on incomplete
951 * layout changes or scribbling over meta data that is in the process
952 * of being moved.
953 *
954 * Move is not exactly correct, btw, currently we have all our meta
955 * data in core memory, to "move" it we just write it all out, there
956 * are no reads. */
957 drbd_suspend_io(device);
958 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
959 if (!buffer) {
960 drbd_resume_io(device);
961 return DS_ERROR;
962 }
963
964 /* remember current offset and sizes */
965 prev.last_agreed_sect = md->la_size_sect;
966 prev.md_offset = md->md_offset;
967 prev.al_offset = md->al_offset;
968 prev.bm_offset = md->bm_offset;
969 prev.md_size_sect = md->md_size_sect;
970 prev.al_stripes = md->al_stripes;
971 prev.al_stripe_size_4k = md->al_stripe_size_4k;
972
973 if (rs) {
974 /* rs is non NULL if we should change the AL layout only */
975 md->al_stripes = rs->al_stripes;
976 md->al_stripe_size_4k = rs->al_stripe_size / 4;
977 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
978 }
979
980 drbd_md_set_sector_offsets(device, device->ldev);
981
982 rcu_read_lock();
983 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
984 rcu_read_unlock();
985 size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
986
987 if (size < prev.last_agreed_sect) {
988 if (rs && u_size == 0) {
989 /* Remove "rs &&" later. This check should always be active, but
990 right now the receiver expects the permissive behavior */
991 drbd_warn(device, "Implicit shrink not allowed. "
992 "Use --size=%llus for explicit shrink.\n",
993 (unsigned long long)size);
994 rv = DS_ERROR_SHRINK;
995 }
996 if (u_size > size)
997 rv = DS_ERROR_SPACE_MD;
998 if (rv != DS_UNCHANGED)
999 goto err_out;
1000 }
1001
1002 if (get_capacity(device->vdisk) != size ||
1003 drbd_bm_capacity(device) != size) {
1004 int err;
1005 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
1006 if (unlikely(err)) {
1007 /* currently there is only one error: ENOMEM! */
1008 size = drbd_bm_capacity(device);
1009 if (size == 0) {
1010 drbd_err(device, "OUT OF MEMORY! "
1011 "Could not allocate bitmap!\n");
1012 } else {
1013 drbd_err(device, "BM resizing failed. "
1014 "Leaving size unchanged\n");
1015 }
1016 rv = DS_ERROR;
1017 }
1018 /* racy, see comments above. */
1019 drbd_set_my_capacity(device, size);
1020 md->la_size_sect = size;
1021 }
1022 if (rv <= DS_ERROR)
1023 goto err_out;
1024
1025 la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
1026
1027 md_moved = prev.md_offset != md->md_offset
1028 || prev.md_size_sect != md->md_size_sect;
1029
1030 if (la_size_changed || md_moved || rs) {
1031 u32 prev_flags;
1032
1033 /* We do some synchronous IO below, which may take some time.
1034 * Clear the timer, to avoid scary "timer expired!" messages,
1035 * "Superblock" is written out at least twice below, anyways. */
1036 del_timer(&device->md_sync_timer);
1037
1038 /* We won't change the "al-extents" setting, we just may need
1039 * to move the on-disk location of the activity log ringbuffer.
1040 * Lock for transaction is good enough, it may well be "dirty"
1041 * or even "starving". */
1042 wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
1043
1044 /* mark current on-disk bitmap and activity log as unreliable */
1045 prev_flags = md->flags;
1046 md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
1047 drbd_md_write(device, buffer);
1048
1049 drbd_al_initialize(device, buffer);
1050
1051 drbd_info(device, "Writing the whole bitmap, %s\n",
1052 la_size_changed && md_moved ? "size changed and md moved" :
1053 la_size_changed ? "size changed" : "md moved");
1054 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1055 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
1056 "size changed", BM_LOCKED_MASK);
1057
1058 /* on-disk bitmap and activity log is authoritative again
1059 * (unless there was an IO error meanwhile...) */
1060 md->flags = prev_flags;
1061 drbd_md_write(device, buffer);
1062
1063 if (rs)
1064 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1065 md->al_stripes, md->al_stripe_size_4k * 4);
1066 }
1067
1068 if (size > prev.last_agreed_sect)
1069 rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1070 if (size < prev.last_agreed_sect)
1071 rv = DS_SHRUNK;
1072
1073 if (0) {
1074 err_out:
1075 /* restore previous offset and sizes */
1076 md->la_size_sect = prev.last_agreed_sect;
1077 md->md_offset = prev.md_offset;
1078 md->al_offset = prev.al_offset;
1079 md->bm_offset = prev.bm_offset;
1080 md->md_size_sect = prev.md_size_sect;
1081 md->al_stripes = prev.al_stripes;
1082 md->al_stripe_size_4k = prev.al_stripe_size_4k;
1083 md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
1084 }
1085 lc_unlock(device->act_log);
1086 wake_up(&device->al_wait);
1087 drbd_md_put_buffer(device);
1088 drbd_resume_io(device);
1089
1090 return rv;
1091}
1092
1093sector_t
1094drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1095 sector_t u_size, int assume_peer_has_space)
1096{
1097 sector_t p_size = device->p_size; /* partner's disk size. */
1098 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1099 sector_t m_size; /* my size */
1100 sector_t size = 0;
1101
1102 m_size = drbd_get_max_capacity(bdev);
1103
1104 if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1105 drbd_warn(device, "Resize while not connected was forced by the user!\n");
1106 p_size = m_size;
1107 }
1108
1109 if (p_size && m_size) {
1110 size = min_t(sector_t, p_size, m_size);
1111 } else {
1112 if (la_size_sect) {
1113 size = la_size_sect;
1114 if (m_size && m_size < size)
1115 size = m_size;
1116 if (p_size && p_size < size)
1117 size = p_size;
1118 } else {
1119 if (m_size)
1120 size = m_size;
1121 if (p_size)
1122 size = p_size;
1123 }
1124 }
1125
1126 if (size == 0)
1127 drbd_err(device, "Both nodes diskless!\n");
1128
1129 if (u_size) {
1130 if (u_size > size)
1131 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1132 (unsigned long)u_size>>1, (unsigned long)size>>1);
1133 else
1134 size = u_size;
1135 }
1136
1137 return size;
1138}
1139
1140/*
1141 * drbd_check_al_size() - Ensures that the AL is of the right size
1142 * @device: DRBD device.
1143 *
1144 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1145 * failed, and 0 on success. You should call drbd_md_sync() after you called
1146 * this function.
1147 */
1148static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1149{
1150 struct lru_cache *n, *t;
1151 struct lc_element *e;
1152 unsigned int in_use;
1153 int i;
1154
1155 if (device->act_log &&
1156 device->act_log->nr_elements == dc->al_extents)
1157 return 0;
1158
1159 in_use = 0;
1160 t = device->act_log;
1161 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1162 dc->al_extents, sizeof(struct lc_element), 0);
1163
1164 if (n == NULL) {
1165 drbd_err(device, "Cannot allocate act_log lru!\n");
1166 return -ENOMEM;
1167 }
1168 spin_lock_irq(&device->al_lock);
1169 if (t) {
1170 for (i = 0; i < t->nr_elements; i++) {
1171 e = lc_element_by_index(t, i);
1172 if (e->refcnt)
1173 drbd_err(device, "refcnt(%d)==%d\n",
1174 e->lc_number, e->refcnt);
1175 in_use += e->refcnt;
1176 }
1177 }
1178 if (!in_use)
1179 device->act_log = n;
1180 spin_unlock_irq(&device->al_lock);
1181 if (in_use) {
1182 drbd_err(device, "Activity log still in use!\n");
1183 lc_destroy(n);
1184 return -EBUSY;
1185 } else {
1186 lc_destroy(t);
1187 }
1188 drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1189 return 0;
1190}
1191
1192static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
1193{
1194 q->limits.discard_granularity = granularity;
1195}
1196
1197static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
1198{
1199 /* when we introduced REQ_WRITE_SAME support, we also bumped
1200 * our maximum supported batch bio size used for discards. */
1201 if (connection->agreed_features & DRBD_FF_WSAME)
1202 return DRBD_MAX_BBIO_SECTORS;
1203 /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1204 return AL_EXTENT_SIZE >> 9;
1205}
1206
1207static void decide_on_discard_support(struct drbd_device *device,
1208 struct drbd_backing_dev *bdev)
1209{
1210 struct drbd_connection *connection =
1211 first_peer_device(device)->connection;
1212 struct request_queue *q = device->rq_queue;
1213 unsigned int max_discard_sectors;
1214
1215 if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
1216 goto not_supported;
1217
1218 if (connection->cstate >= C_CONNECTED &&
1219 !(connection->agreed_features & DRBD_FF_TRIM)) {
1220 drbd_info(connection,
1221 "peer DRBD too old, does not support TRIM: disabling discards\n");
1222 goto not_supported;
1223 }
1224
1225 /*
1226 * We don't care for the granularity, really.
1227 *
1228 * Stacking limits below should fix it for the local device. Whether or
1229 * not it is a suitable granularity on the remote device is not our
1230 * problem, really. If you care, you need to use devices with similar
1231 * topology on all peers.
1232 */
1233 blk_queue_discard_granularity(q, 512);
1234 max_discard_sectors = drbd_max_discard_sectors(connection);
1235 blk_queue_max_discard_sectors(q, max_discard_sectors);
1236 blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
1237 return;
1238
1239not_supported:
1240 blk_queue_discard_granularity(q, 0);
1241 blk_queue_max_discard_sectors(q, 0);
1242}
1243
1244static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
1245{
1246 /* Fixup max_write_zeroes_sectors after blk_stack_limits():
1247 * if we can handle "zeroes" efficiently on the protocol,
1248 * we want to do that, even if our backend does not announce
1249 * max_write_zeroes_sectors itself. */
1250 struct drbd_connection *connection = first_peer_device(device)->connection;
1251 /* If the peer announces WZEROES support, use it. Otherwise, rather
1252 * send explicit zeroes than rely on some discard-zeroes-data magic. */
1253 if (connection->agreed_features & DRBD_FF_WZEROES)
1254 q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
1255 else
1256 q->limits.max_write_zeroes_sectors = 0;
1257}
1258
1259static void fixup_discard_support(struct drbd_device *device, struct request_queue *q)
1260{
1261 unsigned int max_discard = device->rq_queue->limits.max_discard_sectors;
1262 unsigned int discard_granularity =
1263 device->rq_queue->limits.discard_granularity >> SECTOR_SHIFT;
1264
1265 if (discard_granularity > max_discard) {
1266 blk_queue_discard_granularity(q, 0);
1267 blk_queue_max_discard_sectors(q, 0);
1268 }
1269}
1270
1271static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1272 unsigned int max_bio_size, struct o_qlim *o)
1273{
1274 struct request_queue * const q = device->rq_queue;
1275 unsigned int max_hw_sectors = max_bio_size >> 9;
1276 unsigned int max_segments = 0;
1277 struct request_queue *b = NULL;
1278 struct disk_conf *dc;
1279
1280 if (bdev) {
1281 b = bdev->backing_bdev->bd_disk->queue;
1282
1283 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1284 rcu_read_lock();
1285 dc = rcu_dereference(device->ldev->disk_conf);
1286 max_segments = dc->max_bio_bvecs;
1287 rcu_read_unlock();
1288
1289 blk_set_stacking_limits(&q->limits);
1290 }
1291
1292 blk_queue_max_hw_sectors(q, max_hw_sectors);
1293 /* This is the workaround for "bio would need to, but cannot, be split" */
1294 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1295 blk_queue_segment_boundary(q, PAGE_SIZE-1);
1296 decide_on_discard_support(device, bdev);
1297
1298 if (b) {
1299 blk_stack_limits(&q->limits, &b->limits, 0);
1300 disk_update_readahead(device->vdisk);
1301 }
1302 fixup_write_zeroes(device, q);
1303 fixup_discard_support(device, q);
1304}
1305
1306void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
1307{
1308 unsigned int now, new, local, peer;
1309
1310 now = queue_max_hw_sectors(device->rq_queue) << 9;
1311 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1312 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1313
1314 if (bdev) {
1315 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1316 device->local_max_bio_size = local;
1317 }
1318 local = min(local, DRBD_MAX_BIO_SIZE);
1319
1320 /* We may ignore peer limits if the peer is modern enough.
1321 Because new from 8.3.8 onwards the peer can use multiple
1322 BIOs for a single peer_request */
1323 if (device->state.conn >= C_WF_REPORT_PARAMS) {
1324 if (first_peer_device(device)->connection->agreed_pro_version < 94)
1325 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1326 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1327 else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1328 peer = DRBD_MAX_SIZE_H80_PACKET;
1329 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1330 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1331 else
1332 peer = DRBD_MAX_BIO_SIZE;
1333
1334 /* We may later detach and re-attach on a disconnected Primary.
1335 * Avoid this setting to jump back in that case.
1336 * We want to store what we know the peer DRBD can handle,
1337 * not what the peer IO backend can handle. */
1338 if (peer > device->peer_max_bio_size)
1339 device->peer_max_bio_size = peer;
1340 }
1341 new = min(local, peer);
1342
1343 if (device->state.role == R_PRIMARY && new < now)
1344 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1345
1346 if (new != now)
1347 drbd_info(device, "max BIO size = %u\n", new);
1348
1349 drbd_setup_queue_param(device, bdev, new, o);
1350}
1351
1352/* Starts the worker thread */
1353static void conn_reconfig_start(struct drbd_connection *connection)
1354{
1355 drbd_thread_start(&connection->worker);
1356 drbd_flush_workqueue(&connection->sender_work);
1357}
1358
1359/* if still unconfigured, stops worker again. */
1360static void conn_reconfig_done(struct drbd_connection *connection)
1361{
1362 bool stop_threads;
1363 spin_lock_irq(&connection->resource->req_lock);
1364 stop_threads = conn_all_vols_unconf(connection) &&
1365 connection->cstate == C_STANDALONE;
1366 spin_unlock_irq(&connection->resource->req_lock);
1367 if (stop_threads) {
1368 /* ack_receiver thread and ack_sender workqueue are implicitly
1369 * stopped by receiver in conn_disconnect() */
1370 drbd_thread_stop(&connection->receiver);
1371 drbd_thread_stop(&connection->worker);
1372 }
1373}
1374
1375/* Make sure IO is suspended before calling this function(). */
1376static void drbd_suspend_al(struct drbd_device *device)
1377{
1378 int s = 0;
1379
1380 if (!lc_try_lock(device->act_log)) {
1381 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1382 return;
1383 }
1384
1385 drbd_al_shrink(device);
1386 spin_lock_irq(&device->resource->req_lock);
1387 if (device->state.conn < C_CONNECTED)
1388 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1389 spin_unlock_irq(&device->resource->req_lock);
1390 lc_unlock(device->act_log);
1391
1392 if (s)
1393 drbd_info(device, "Suspended AL updates\n");
1394}
1395
1396
1397static bool should_set_defaults(struct genl_info *info)
1398{
1399 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1400 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1401}
1402
1403static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1404{
1405 /* This is limited by 16 bit "slot" numbers,
1406 * and by available on-disk context storage.
1407 *
1408 * Also (u16)~0 is special (denotes a "free" extent).
1409 *
1410 * One transaction occupies one 4kB on-disk block,
1411 * we have n such blocks in the on disk ring buffer,
1412 * the "current" transaction may fail (n-1),
1413 * and there is 919 slot numbers context information per transaction.
1414 *
1415 * 72 transaction blocks amounts to more than 2**16 context slots,
1416 * so cap there first.
1417 */
1418 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1419 const unsigned int sufficient_on_disk =
1420 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1421 /AL_CONTEXT_PER_TRANSACTION;
1422
1423 unsigned int al_size_4k = bdev->md.al_size_4k;
1424
1425 if (al_size_4k > sufficient_on_disk)
1426 return max_al_nr;
1427
1428 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1429}
1430
1431static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1432{
1433 return a->disk_barrier != b->disk_barrier ||
1434 a->disk_flushes != b->disk_flushes ||
1435 a->disk_drain != b->disk_drain;
1436}
1437
1438static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
1439 struct drbd_backing_dev *nbc)
1440{
1441 struct block_device *bdev = nbc->backing_bdev;
1442
1443 if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1444 disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1445 if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1446 disk_conf->al_extents = drbd_al_extents_max(nbc);
1447
1448 if (!bdev_max_discard_sectors(bdev)) {
1449 if (disk_conf->rs_discard_granularity) {
1450 disk_conf->rs_discard_granularity = 0; /* disable feature */
1451 drbd_info(device, "rs_discard_granularity feature disabled\n");
1452 }
1453 }
1454
1455 if (disk_conf->rs_discard_granularity) {
1456 int orig_value = disk_conf->rs_discard_granularity;
1457 sector_t discard_size = bdev_max_discard_sectors(bdev) << 9;
1458 unsigned int discard_granularity = bdev_discard_granularity(bdev);
1459 int remainder;
1460
1461 if (discard_granularity > disk_conf->rs_discard_granularity)
1462 disk_conf->rs_discard_granularity = discard_granularity;
1463
1464 remainder = disk_conf->rs_discard_granularity %
1465 discard_granularity;
1466 disk_conf->rs_discard_granularity += remainder;
1467
1468 if (disk_conf->rs_discard_granularity > discard_size)
1469 disk_conf->rs_discard_granularity = discard_size;
1470
1471 if (disk_conf->rs_discard_granularity != orig_value)
1472 drbd_info(device, "rs_discard_granularity changed to %d\n",
1473 disk_conf->rs_discard_granularity);
1474 }
1475}
1476
1477static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1478{
1479 int err = -EBUSY;
1480
1481 if (device->act_log &&
1482 device->act_log->nr_elements == dc->al_extents)
1483 return 0;
1484
1485 drbd_suspend_io(device);
1486 /* If IO completion is currently blocked, we would likely wait
1487 * "forever" for the activity log to become unused. So we don't. */
1488 if (atomic_read(&device->ap_bio_cnt))
1489 goto out;
1490
1491 wait_event(device->al_wait, lc_try_lock(device->act_log));
1492 drbd_al_shrink(device);
1493 err = drbd_check_al_size(device, dc);
1494 lc_unlock(device->act_log);
1495 wake_up(&device->al_wait);
1496out:
1497 drbd_resume_io(device);
1498 return err;
1499}
1500
1501int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1502{
1503 struct drbd_config_context adm_ctx;
1504 enum drbd_ret_code retcode;
1505 struct drbd_device *device;
1506 struct disk_conf *new_disk_conf, *old_disk_conf;
1507 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1508 int err;
1509 unsigned int fifo_size;
1510
1511 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1512 if (!adm_ctx.reply_skb)
1513 return retcode;
1514 if (retcode != NO_ERROR)
1515 goto finish;
1516
1517 device = adm_ctx.device;
1518 mutex_lock(&adm_ctx.resource->adm_mutex);
1519
1520 /* we also need a disk
1521 * to change the options on */
1522 if (!get_ldev(device)) {
1523 retcode = ERR_NO_DISK;
1524 goto out;
1525 }
1526
1527 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1528 if (!new_disk_conf) {
1529 retcode = ERR_NOMEM;
1530 goto fail;
1531 }
1532
1533 mutex_lock(&device->resource->conf_update);
1534 old_disk_conf = device->ldev->disk_conf;
1535 *new_disk_conf = *old_disk_conf;
1536 if (should_set_defaults(info))
1537 set_disk_conf_defaults(new_disk_conf);
1538
1539 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1540 if (err && err != -ENOMSG) {
1541 retcode = ERR_MANDATORY_TAG;
1542 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1543 goto fail_unlock;
1544 }
1545
1546 if (!expect(device, new_disk_conf->resync_rate >= 1))
1547 new_disk_conf->resync_rate = 1;
1548
1549 sanitize_disk_conf(device, new_disk_conf, device->ldev);
1550
1551 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1552 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1553
1554 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1555 if (fifo_size != device->rs_plan_s->size) {
1556 new_plan = fifo_alloc(fifo_size);
1557 if (!new_plan) {
1558 drbd_err(device, "kmalloc of fifo_buffer failed");
1559 retcode = ERR_NOMEM;
1560 goto fail_unlock;
1561 }
1562 }
1563
1564 err = disk_opts_check_al_size(device, new_disk_conf);
1565 if (err) {
1566 /* Could be just "busy". Ignore?
1567 * Introduce dedicated error code? */
1568 drbd_msg_put_info(adm_ctx.reply_skb,
1569 "Try again without changing current al-extents setting");
1570 retcode = ERR_NOMEM;
1571 goto fail_unlock;
1572 }
1573
1574 lock_all_resources();
1575 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1576 if (retcode == NO_ERROR) {
1577 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1578 drbd_resync_after_changed(device);
1579 }
1580 unlock_all_resources();
1581
1582 if (retcode != NO_ERROR)
1583 goto fail_unlock;
1584
1585 if (new_plan) {
1586 old_plan = device->rs_plan_s;
1587 rcu_assign_pointer(device->rs_plan_s, new_plan);
1588 }
1589
1590 mutex_unlock(&device->resource->conf_update);
1591
1592 if (new_disk_conf->al_updates)
1593 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1594 else
1595 device->ldev->md.flags |= MDF_AL_DISABLED;
1596
1597 if (new_disk_conf->md_flushes)
1598 clear_bit(MD_NO_FUA, &device->flags);
1599 else
1600 set_bit(MD_NO_FUA, &device->flags);
1601
1602 if (write_ordering_changed(old_disk_conf, new_disk_conf))
1603 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1604
1605 if (old_disk_conf->discard_zeroes_if_aligned !=
1606 new_disk_conf->discard_zeroes_if_aligned)
1607 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1608
1609 drbd_md_sync(device);
1610
1611 if (device->state.conn >= C_CONNECTED) {
1612 struct drbd_peer_device *peer_device;
1613
1614 for_each_peer_device(peer_device, device)
1615 drbd_send_sync_param(peer_device);
1616 }
1617
1618 kvfree_rcu(old_disk_conf);
1619 kfree(old_plan);
1620 mod_timer(&device->request_timer, jiffies + HZ);
1621 goto success;
1622
1623fail_unlock:
1624 mutex_unlock(&device->resource->conf_update);
1625 fail:
1626 kfree(new_disk_conf);
1627 kfree(new_plan);
1628success:
1629 put_ldev(device);
1630 out:
1631 mutex_unlock(&adm_ctx.resource->adm_mutex);
1632 finish:
1633 drbd_adm_finish(&adm_ctx, info, retcode);
1634 return 0;
1635}
1636
1637static struct block_device *open_backing_dev(struct drbd_device *device,
1638 const char *bdev_path, void *claim_ptr, bool do_bd_link)
1639{
1640 struct block_device *bdev;
1641 int err = 0;
1642
1643 bdev = blkdev_get_by_path(bdev_path,
1644 FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr);
1645 if (IS_ERR(bdev)) {
1646 drbd_err(device, "open(\"%s\") failed with %ld\n",
1647 bdev_path, PTR_ERR(bdev));
1648 return bdev;
1649 }
1650
1651 if (!do_bd_link)
1652 return bdev;
1653
1654 err = bd_link_disk_holder(bdev, device->vdisk);
1655 if (err) {
1656 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1657 drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1658 bdev_path, err);
1659 bdev = ERR_PTR(err);
1660 }
1661 return bdev;
1662}
1663
1664static int open_backing_devices(struct drbd_device *device,
1665 struct disk_conf *new_disk_conf,
1666 struct drbd_backing_dev *nbc)
1667{
1668 struct block_device *bdev;
1669
1670 bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
1671 if (IS_ERR(bdev))
1672 return ERR_OPEN_DISK;
1673 nbc->backing_bdev = bdev;
1674
1675 /*
1676 * meta_dev_idx >= 0: external fixed size, possibly multiple
1677 * drbd sharing one meta device. TODO in that case, paranoia
1678 * check that [md_bdev, meta_dev_idx] is not yet used by some
1679 * other drbd minor! (if you use drbd.conf + drbdadm, that
1680 * should check it for you already; but if you don't, or
1681 * someone fooled it, we need to double check here)
1682 */
1683 bdev = open_backing_dev(device, new_disk_conf->meta_dev,
1684 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1685 * if potentially shared with other drbd minors */
1686 (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
1687 /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1688 * as would happen with internal metadata. */
1689 (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
1690 new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
1691 if (IS_ERR(bdev))
1692 return ERR_OPEN_MD_DISK;
1693 nbc->md_bdev = bdev;
1694 return NO_ERROR;
1695}
1696
1697static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
1698 bool do_bd_unlink)
1699{
1700 if (!bdev)
1701 return;
1702 if (do_bd_unlink)
1703 bd_unlink_disk_holder(bdev, device->vdisk);
1704 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1705}
1706
1707void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
1708{
1709 if (ldev == NULL)
1710 return;
1711
1712 close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev);
1713 close_backing_dev(device, ldev->backing_bdev, true);
1714
1715 kfree(ldev->disk_conf);
1716 kfree(ldev);
1717}
1718
1719int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1720{
1721 struct drbd_config_context adm_ctx;
1722 struct drbd_device *device;
1723 struct drbd_peer_device *peer_device;
1724 struct drbd_connection *connection;
1725 int err;
1726 enum drbd_ret_code retcode;
1727 enum determine_dev_size dd;
1728 sector_t max_possible_sectors;
1729 sector_t min_md_device_sectors;
1730 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1731 struct disk_conf *new_disk_conf = NULL;
1732 struct lru_cache *resync_lru = NULL;
1733 struct fifo_buffer *new_plan = NULL;
1734 union drbd_state ns, os;
1735 enum drbd_state_rv rv;
1736 struct net_conf *nc;
1737
1738 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1739 if (!adm_ctx.reply_skb)
1740 return retcode;
1741 if (retcode != NO_ERROR)
1742 goto finish;
1743
1744 device = adm_ctx.device;
1745 mutex_lock(&adm_ctx.resource->adm_mutex);
1746 peer_device = first_peer_device(device);
1747 connection = peer_device->connection;
1748 conn_reconfig_start(connection);
1749
1750 /* if you want to reconfigure, please tear down first */
1751 if (device->state.disk > D_DISKLESS) {
1752 retcode = ERR_DISK_CONFIGURED;
1753 goto fail;
1754 }
1755 /* It may just now have detached because of IO error. Make sure
1756 * drbd_ldev_destroy is done already, we may end up here very fast,
1757 * e.g. if someone calls attach from the on-io-error handler,
1758 * to realize a "hot spare" feature (not that I'd recommend that) */
1759 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1760
1761 /* make sure there is no leftover from previous force-detach attempts */
1762 clear_bit(FORCE_DETACH, &device->flags);
1763 clear_bit(WAS_IO_ERROR, &device->flags);
1764 clear_bit(WAS_READ_ERROR, &device->flags);
1765
1766 /* and no leftover from previously aborted resync or verify, either */
1767 device->rs_total = 0;
1768 device->rs_failed = 0;
1769 atomic_set(&device->rs_pending_cnt, 0);
1770
1771 /* allocation not in the IO path, drbdsetup context */
1772 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1773 if (!nbc) {
1774 retcode = ERR_NOMEM;
1775 goto fail;
1776 }
1777 spin_lock_init(&nbc->md.uuid_lock);
1778
1779 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1780 if (!new_disk_conf) {
1781 retcode = ERR_NOMEM;
1782 goto fail;
1783 }
1784 nbc->disk_conf = new_disk_conf;
1785
1786 set_disk_conf_defaults(new_disk_conf);
1787 err = disk_conf_from_attrs(new_disk_conf, info);
1788 if (err) {
1789 retcode = ERR_MANDATORY_TAG;
1790 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1791 goto fail;
1792 }
1793
1794 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1795 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1796
1797 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1798 if (!new_plan) {
1799 retcode = ERR_NOMEM;
1800 goto fail;
1801 }
1802
1803 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1804 retcode = ERR_MD_IDX_INVALID;
1805 goto fail;
1806 }
1807
1808 rcu_read_lock();
1809 nc = rcu_dereference(connection->net_conf);
1810 if (nc) {
1811 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1812 rcu_read_unlock();
1813 retcode = ERR_STONITH_AND_PROT_A;
1814 goto fail;
1815 }
1816 }
1817 rcu_read_unlock();
1818
1819 retcode = open_backing_devices(device, new_disk_conf, nbc);
1820 if (retcode != NO_ERROR)
1821 goto fail;
1822
1823 if ((nbc->backing_bdev == nbc->md_bdev) !=
1824 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1825 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1826 retcode = ERR_MD_IDX_INVALID;
1827 goto fail;
1828 }
1829
1830 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1831 1, 61, sizeof(struct bm_extent),
1832 offsetof(struct bm_extent, lce));
1833 if (!resync_lru) {
1834 retcode = ERR_NOMEM;
1835 goto fail;
1836 }
1837
1838 /* Read our meta data super block early.
1839 * This also sets other on-disk offsets. */
1840 retcode = drbd_md_read(device, nbc);
1841 if (retcode != NO_ERROR)
1842 goto fail;
1843
1844 sanitize_disk_conf(device, new_disk_conf, nbc);
1845
1846 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1847 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1848 (unsigned long long) drbd_get_max_capacity(nbc),
1849 (unsigned long long) new_disk_conf->disk_size);
1850 retcode = ERR_DISK_TOO_SMALL;
1851 goto fail;
1852 }
1853
1854 if (new_disk_conf->meta_dev_idx < 0) {
1855 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1856 /* at least one MB, otherwise it does not make sense */
1857 min_md_device_sectors = (2<<10);
1858 } else {
1859 max_possible_sectors = DRBD_MAX_SECTORS;
1860 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1861 }
1862
1863 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1864 retcode = ERR_MD_DISK_TOO_SMALL;
1865 drbd_warn(device, "refusing attach: md-device too small, "
1866 "at least %llu sectors needed for this meta-disk type\n",
1867 (unsigned long long) min_md_device_sectors);
1868 goto fail;
1869 }
1870
1871 /* Make sure the new disk is big enough
1872 * (we may currently be R_PRIMARY with no local disk...) */
1873 if (drbd_get_max_capacity(nbc) < get_capacity(device->vdisk)) {
1874 retcode = ERR_DISK_TOO_SMALL;
1875 goto fail;
1876 }
1877
1878 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1879
1880 if (nbc->known_size > max_possible_sectors) {
1881 drbd_warn(device, "==> truncating very big lower level device "
1882 "to currently maximum possible %llu sectors <==\n",
1883 (unsigned long long) max_possible_sectors);
1884 if (new_disk_conf->meta_dev_idx >= 0)
1885 drbd_warn(device, "==>> using internal or flexible "
1886 "meta data may help <<==\n");
1887 }
1888
1889 drbd_suspend_io(device);
1890 /* also wait for the last barrier ack. */
1891 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1892 * We need a way to either ignore barrier acks for barriers sent before a device
1893 * was attached, or a way to wait for all pending barrier acks to come in.
1894 * As barriers are counted per resource,
1895 * we'd need to suspend io on all devices of a resource.
1896 */
1897 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1898 /* and for any other previously queued work */
1899 drbd_flush_workqueue(&connection->sender_work);
1900
1901 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1902 retcode = (enum drbd_ret_code)rv;
1903 drbd_resume_io(device);
1904 if (rv < SS_SUCCESS)
1905 goto fail;
1906
1907 if (!get_ldev_if_state(device, D_ATTACHING))
1908 goto force_diskless;
1909
1910 if (!device->bitmap) {
1911 if (drbd_bm_init(device)) {
1912 retcode = ERR_NOMEM;
1913 goto force_diskless_dec;
1914 }
1915 }
1916
1917 if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
1918 (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
1919 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1920 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
1921 (unsigned long long)device->ed_uuid);
1922 retcode = ERR_DATA_NOT_CURRENT;
1923 goto force_diskless_dec;
1924 }
1925
1926 /* Since we are diskless, fix the activity log first... */
1927 if (drbd_check_al_size(device, new_disk_conf)) {
1928 retcode = ERR_NOMEM;
1929 goto force_diskless_dec;
1930 }
1931
1932 /* Prevent shrinking of consistent devices ! */
1933 {
1934 unsigned long long nsz = drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0);
1935 unsigned long long eff = nbc->md.la_size_sect;
1936 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && nsz < eff) {
1937 if (nsz == nbc->disk_conf->disk_size) {
1938 drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff);
1939 } else {
1940 drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff);
1941 drbd_msg_sprintf_info(adm_ctx.reply_skb,
1942 "To-be-attached device has last effective > current size, and is consistent\n"
1943 "(%llu > %llu sectors). Refusing to attach.", eff, nsz);
1944 retcode = ERR_IMPLICIT_SHRINK;
1945 goto force_diskless_dec;
1946 }
1947 }
1948 }
1949
1950 lock_all_resources();
1951 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1952 if (retcode != NO_ERROR) {
1953 unlock_all_resources();
1954 goto force_diskless_dec;
1955 }
1956
1957 /* Reset the "barriers don't work" bits here, then force meta data to
1958 * be written, to ensure we determine if barriers are supported. */
1959 if (new_disk_conf->md_flushes)
1960 clear_bit(MD_NO_FUA, &device->flags);
1961 else
1962 set_bit(MD_NO_FUA, &device->flags);
1963
1964 /* Point of no return reached.
1965 * Devices and memory are no longer released by error cleanup below.
1966 * now device takes over responsibility, and the state engine should
1967 * clean it up somewhere. */
1968 D_ASSERT(device, device->ldev == NULL);
1969 device->ldev = nbc;
1970 device->resync = resync_lru;
1971 device->rs_plan_s = new_plan;
1972 nbc = NULL;
1973 resync_lru = NULL;
1974 new_disk_conf = NULL;
1975 new_plan = NULL;
1976
1977 drbd_resync_after_changed(device);
1978 drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
1979 unlock_all_resources();
1980
1981 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1982 set_bit(CRASHED_PRIMARY, &device->flags);
1983 else
1984 clear_bit(CRASHED_PRIMARY, &device->flags);
1985
1986 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1987 !(device->state.role == R_PRIMARY && device->resource->susp_nod))
1988 set_bit(CRASHED_PRIMARY, &device->flags);
1989
1990 device->send_cnt = 0;
1991 device->recv_cnt = 0;
1992 device->read_cnt = 0;
1993 device->writ_cnt = 0;
1994
1995 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1996
1997 /* If I am currently not R_PRIMARY,
1998 * but meta data primary indicator is set,
1999 * I just now recover from a hard crash,
2000 * and have been R_PRIMARY before that crash.
2001 *
2002 * Now, if I had no connection before that crash
2003 * (have been degraded R_PRIMARY), chances are that
2004 * I won't find my peer now either.
2005 *
2006 * In that case, and _only_ in that case,
2007 * we use the degr-wfc-timeout instead of the default,
2008 * so we can automatically recover from a crash of a
2009 * degraded but active "cluster" after a certain timeout.
2010 */
2011 clear_bit(USE_DEGR_WFC_T, &device->flags);
2012 if (device->state.role != R_PRIMARY &&
2013 drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2014 !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
2015 set_bit(USE_DEGR_WFC_T, &device->flags);
2016
2017 dd = drbd_determine_dev_size(device, 0, NULL);
2018 if (dd <= DS_ERROR) {
2019 retcode = ERR_NOMEM_BITMAP;
2020 goto force_diskless_dec;
2021 } else if (dd == DS_GREW)
2022 set_bit(RESYNC_AFTER_NEG, &device->flags);
2023
2024 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
2025 (test_bit(CRASHED_PRIMARY, &device->flags) &&
2026 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
2027 drbd_info(device, "Assuming that all blocks are out of sync "
2028 "(aka FullSync)\n");
2029 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2030 "set_n_write from attaching", BM_LOCKED_MASK)) {
2031 retcode = ERR_IO_MD_DISK;
2032 goto force_diskless_dec;
2033 }
2034 } else {
2035 if (drbd_bitmap_io(device, &drbd_bm_read,
2036 "read from attaching", BM_LOCKED_MASK)) {
2037 retcode = ERR_IO_MD_DISK;
2038 goto force_diskless_dec;
2039 }
2040 }
2041
2042 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
2043 drbd_suspend_al(device); /* IO is still suspended here... */
2044
2045 spin_lock_irq(&device->resource->req_lock);
2046 os = drbd_read_state(device);
2047 ns = os;
2048 /* If MDF_CONSISTENT is not set go into inconsistent state,
2049 otherwise investigate MDF_WasUpToDate...
2050 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2051 otherwise into D_CONSISTENT state.
2052 */
2053 if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
2054 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
2055 ns.disk = D_CONSISTENT;
2056 else
2057 ns.disk = D_OUTDATED;
2058 } else {
2059 ns.disk = D_INCONSISTENT;
2060 }
2061
2062 if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
2063 ns.pdsk = D_OUTDATED;
2064
2065 rcu_read_lock();
2066 if (ns.disk == D_CONSISTENT &&
2067 (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
2068 ns.disk = D_UP_TO_DATE;
2069
2070 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2071 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2072 this point, because drbd_request_state() modifies these
2073 flags. */
2074
2075 if (rcu_dereference(device->ldev->disk_conf)->al_updates)
2076 device->ldev->md.flags &= ~MDF_AL_DISABLED;
2077 else
2078 device->ldev->md.flags |= MDF_AL_DISABLED;
2079
2080 rcu_read_unlock();
2081
2082 /* In case we are C_CONNECTED postpone any decision on the new disk
2083 state after the negotiation phase. */
2084 if (device->state.conn == C_CONNECTED) {
2085 device->new_state_tmp.i = ns.i;
2086 ns.i = os.i;
2087 ns.disk = D_NEGOTIATING;
2088
2089 /* We expect to receive up-to-date UUIDs soon.
2090 To avoid a race in receive_state, free p_uuid while
2091 holding req_lock. I.e. atomic with the state change */
2092 kfree(device->p_uuid);
2093 device->p_uuid = NULL;
2094 }
2095
2096 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
2097 spin_unlock_irq(&device->resource->req_lock);
2098
2099 if (rv < SS_SUCCESS)
2100 goto force_diskless_dec;
2101
2102 mod_timer(&device->request_timer, jiffies + HZ);
2103
2104 if (device->state.role == R_PRIMARY)
2105 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
2106 else
2107 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
2108
2109 drbd_md_mark_dirty(device);
2110 drbd_md_sync(device);
2111
2112 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
2113 put_ldev(device);
2114 conn_reconfig_done(connection);
2115 mutex_unlock(&adm_ctx.resource->adm_mutex);
2116 drbd_adm_finish(&adm_ctx, info, retcode);
2117 return 0;
2118
2119 force_diskless_dec:
2120 put_ldev(device);
2121 force_diskless:
2122 drbd_force_state(device, NS(disk, D_DISKLESS));
2123 drbd_md_sync(device);
2124 fail:
2125 conn_reconfig_done(connection);
2126 if (nbc) {
2127 close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev);
2128 close_backing_dev(device, nbc->backing_bdev, true);
2129 kfree(nbc);
2130 }
2131 kfree(new_disk_conf);
2132 lc_destroy(resync_lru);
2133 kfree(new_plan);
2134 mutex_unlock(&adm_ctx.resource->adm_mutex);
2135 finish:
2136 drbd_adm_finish(&adm_ctx, info, retcode);
2137 return 0;
2138}
2139
2140static int adm_detach(struct drbd_device *device, int force)
2141{
2142 if (force) {
2143 set_bit(FORCE_DETACH, &device->flags);
2144 drbd_force_state(device, NS(disk, D_FAILED));
2145 return SS_SUCCESS;
2146 }
2147
2148 return drbd_request_detach_interruptible(device);
2149}
2150
2151/* Detaching the disk is a process in multiple stages. First we need to lock
2152 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2153 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2154 * internal references as well.
2155 * Only then we have finally detached. */
2156int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
2157{
2158 struct drbd_config_context adm_ctx;
2159 enum drbd_ret_code retcode;
2160 struct detach_parms parms = { };
2161 int err;
2162
2163 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2164 if (!adm_ctx.reply_skb)
2165 return retcode;
2166 if (retcode != NO_ERROR)
2167 goto out;
2168
2169 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
2170 err = detach_parms_from_attrs(&parms, info);
2171 if (err) {
2172 retcode = ERR_MANDATORY_TAG;
2173 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2174 goto out;
2175 }
2176 }
2177
2178 mutex_lock(&adm_ctx.resource->adm_mutex);
2179 retcode = adm_detach(adm_ctx.device, parms.force_detach);
2180 mutex_unlock(&adm_ctx.resource->adm_mutex);
2181out:
2182 drbd_adm_finish(&adm_ctx, info, retcode);
2183 return 0;
2184}
2185
2186static bool conn_resync_running(struct drbd_connection *connection)
2187{
2188 struct drbd_peer_device *peer_device;
2189 bool rv = false;
2190 int vnr;
2191
2192 rcu_read_lock();
2193 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2194 struct drbd_device *device = peer_device->device;
2195 if (device->state.conn == C_SYNC_SOURCE ||
2196 device->state.conn == C_SYNC_TARGET ||
2197 device->state.conn == C_PAUSED_SYNC_S ||
2198 device->state.conn == C_PAUSED_SYNC_T) {
2199 rv = true;
2200 break;
2201 }
2202 }
2203 rcu_read_unlock();
2204
2205 return rv;
2206}
2207
2208static bool conn_ov_running(struct drbd_connection *connection)
2209{
2210 struct drbd_peer_device *peer_device;
2211 bool rv = false;
2212 int vnr;
2213
2214 rcu_read_lock();
2215 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2216 struct drbd_device *device = peer_device->device;
2217 if (device->state.conn == C_VERIFY_S ||
2218 device->state.conn == C_VERIFY_T) {
2219 rv = true;
2220 break;
2221 }
2222 }
2223 rcu_read_unlock();
2224
2225 return rv;
2226}
2227
2228static enum drbd_ret_code
2229_check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2230{
2231 struct drbd_peer_device *peer_device;
2232 int i;
2233
2234 if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2235 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2236 return ERR_NEED_APV_100;
2237
2238 if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2239 return ERR_NEED_APV_100;
2240
2241 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2242 return ERR_NEED_APV_100;
2243 }
2244
2245 if (!new_net_conf->two_primaries &&
2246 conn_highest_role(connection) == R_PRIMARY &&
2247 conn_highest_peer(connection) == R_PRIMARY)
2248 return ERR_NEED_ALLOW_TWO_PRI;
2249
2250 if (new_net_conf->two_primaries &&
2251 (new_net_conf->wire_protocol != DRBD_PROT_C))
2252 return ERR_NOT_PROTO_C;
2253
2254 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2255 struct drbd_device *device = peer_device->device;
2256 if (get_ldev(device)) {
2257 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2258 put_ldev(device);
2259 if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2260 return ERR_STONITH_AND_PROT_A;
2261 }
2262 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2263 return ERR_DISCARD_IMPOSSIBLE;
2264 }
2265
2266 if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2267 return ERR_CONG_NOT_PROTO_A;
2268
2269 return NO_ERROR;
2270}
2271
2272static enum drbd_ret_code
2273check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2274{
2275 enum drbd_ret_code rv;
2276 struct drbd_peer_device *peer_device;
2277 int i;
2278
2279 rcu_read_lock();
2280 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2281 rcu_read_unlock();
2282
2283 /* connection->peer_devices protected by genl_lock() here */
2284 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2285 struct drbd_device *device = peer_device->device;
2286 if (!device->bitmap) {
2287 if (drbd_bm_init(device))
2288 return ERR_NOMEM;
2289 }
2290 }
2291
2292 return rv;
2293}
2294
2295struct crypto {
2296 struct crypto_shash *verify_tfm;
2297 struct crypto_shash *csums_tfm;
2298 struct crypto_shash *cram_hmac_tfm;
2299 struct crypto_shash *integrity_tfm;
2300};
2301
2302static int
2303alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
2304{
2305 if (!tfm_name[0])
2306 return NO_ERROR;
2307
2308 *tfm = crypto_alloc_shash(tfm_name, 0, 0);
2309 if (IS_ERR(*tfm)) {
2310 *tfm = NULL;
2311 return err_alg;
2312 }
2313
2314 return NO_ERROR;
2315}
2316
2317static enum drbd_ret_code
2318alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2319{
2320 char hmac_name[CRYPTO_MAX_ALG_NAME];
2321 enum drbd_ret_code rv;
2322
2323 rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
2324 ERR_CSUMS_ALG);
2325 if (rv != NO_ERROR)
2326 return rv;
2327 rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
2328 ERR_VERIFY_ALG);
2329 if (rv != NO_ERROR)
2330 return rv;
2331 rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2332 ERR_INTEGRITY_ALG);
2333 if (rv != NO_ERROR)
2334 return rv;
2335 if (new_net_conf->cram_hmac_alg[0] != 0) {
2336 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2337 new_net_conf->cram_hmac_alg);
2338
2339 rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
2340 ERR_AUTH_ALG);
2341 }
2342
2343 return rv;
2344}
2345
2346static void free_crypto(struct crypto *crypto)
2347{
2348 crypto_free_shash(crypto->cram_hmac_tfm);
2349 crypto_free_shash(crypto->integrity_tfm);
2350 crypto_free_shash(crypto->csums_tfm);
2351 crypto_free_shash(crypto->verify_tfm);
2352}
2353
2354int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2355{
2356 struct drbd_config_context adm_ctx;
2357 enum drbd_ret_code retcode;
2358 struct drbd_connection *connection;
2359 struct net_conf *old_net_conf, *new_net_conf = NULL;
2360 int err;
2361 int ovr; /* online verify running */
2362 int rsr; /* re-sync running */
2363 struct crypto crypto = { };
2364
2365 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2366 if (!adm_ctx.reply_skb)
2367 return retcode;
2368 if (retcode != NO_ERROR)
2369 goto finish;
2370
2371 connection = adm_ctx.connection;
2372 mutex_lock(&adm_ctx.resource->adm_mutex);
2373
2374 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2375 if (!new_net_conf) {
2376 retcode = ERR_NOMEM;
2377 goto out;
2378 }
2379
2380 conn_reconfig_start(connection);
2381
2382 mutex_lock(&connection->data.mutex);
2383 mutex_lock(&connection->resource->conf_update);
2384 old_net_conf = connection->net_conf;
2385
2386 if (!old_net_conf) {
2387 drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
2388 retcode = ERR_INVALID_REQUEST;
2389 goto fail;
2390 }
2391
2392 *new_net_conf = *old_net_conf;
2393 if (should_set_defaults(info))
2394 set_net_conf_defaults(new_net_conf);
2395
2396 err = net_conf_from_attrs_for_change(new_net_conf, info);
2397 if (err && err != -ENOMSG) {
2398 retcode = ERR_MANDATORY_TAG;
2399 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2400 goto fail;
2401 }
2402
2403 retcode = check_net_options(connection, new_net_conf);
2404 if (retcode != NO_ERROR)
2405 goto fail;
2406
2407 /* re-sync running */
2408 rsr = conn_resync_running(connection);
2409 if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2410 retcode = ERR_CSUMS_RESYNC_RUNNING;
2411 goto fail;
2412 }
2413
2414 /* online verify running */
2415 ovr = conn_ov_running(connection);
2416 if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2417 retcode = ERR_VERIFY_RUNNING;
2418 goto fail;
2419 }
2420
2421 retcode = alloc_crypto(&crypto, new_net_conf);
2422 if (retcode != NO_ERROR)
2423 goto fail;
2424
2425 rcu_assign_pointer(connection->net_conf, new_net_conf);
2426
2427 if (!rsr) {
2428 crypto_free_shash(connection->csums_tfm);
2429 connection->csums_tfm = crypto.csums_tfm;
2430 crypto.csums_tfm = NULL;
2431 }
2432 if (!ovr) {
2433 crypto_free_shash(connection->verify_tfm);
2434 connection->verify_tfm = crypto.verify_tfm;
2435 crypto.verify_tfm = NULL;
2436 }
2437
2438 crypto_free_shash(connection->integrity_tfm);
2439 connection->integrity_tfm = crypto.integrity_tfm;
2440 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2441 /* Do this without trying to take connection->data.mutex again. */
2442 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2443
2444 crypto_free_shash(connection->cram_hmac_tfm);
2445 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2446
2447 mutex_unlock(&connection->resource->conf_update);
2448 mutex_unlock(&connection->data.mutex);
2449 kvfree_rcu(old_net_conf);
2450
2451 if (connection->cstate >= C_WF_REPORT_PARAMS) {
2452 struct drbd_peer_device *peer_device;
2453 int vnr;
2454
2455 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2456 drbd_send_sync_param(peer_device);
2457 }
2458
2459 goto done;
2460
2461 fail:
2462 mutex_unlock(&connection->resource->conf_update);
2463 mutex_unlock(&connection->data.mutex);
2464 free_crypto(&crypto);
2465 kfree(new_net_conf);
2466 done:
2467 conn_reconfig_done(connection);
2468 out:
2469 mutex_unlock(&adm_ctx.resource->adm_mutex);
2470 finish:
2471 drbd_adm_finish(&adm_ctx, info, retcode);
2472 return 0;
2473}
2474
2475static void connection_to_info(struct connection_info *info,
2476 struct drbd_connection *connection)
2477{
2478 info->conn_connection_state = connection->cstate;
2479 info->conn_role = conn_highest_peer(connection);
2480}
2481
2482static void peer_device_to_info(struct peer_device_info *info,
2483 struct drbd_peer_device *peer_device)
2484{
2485 struct drbd_device *device = peer_device->device;
2486
2487 info->peer_repl_state =
2488 max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2489 info->peer_disk_state = device->state.pdsk;
2490 info->peer_resync_susp_user = device->state.user_isp;
2491 info->peer_resync_susp_peer = device->state.peer_isp;
2492 info->peer_resync_susp_dependency = device->state.aftr_isp;
2493}
2494
2495int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2496{
2497 struct connection_info connection_info;
2498 enum drbd_notification_type flags;
2499 unsigned int peer_devices = 0;
2500 struct drbd_config_context adm_ctx;
2501 struct drbd_peer_device *peer_device;
2502 struct net_conf *old_net_conf, *new_net_conf = NULL;
2503 struct crypto crypto = { };
2504 struct drbd_resource *resource;
2505 struct drbd_connection *connection;
2506 enum drbd_ret_code retcode;
2507 enum drbd_state_rv rv;
2508 int i;
2509 int err;
2510
2511 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2512
2513 if (!adm_ctx.reply_skb)
2514 return retcode;
2515 if (retcode != NO_ERROR)
2516 goto out;
2517 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2518 drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
2519 retcode = ERR_INVALID_REQUEST;
2520 goto out;
2521 }
2522
2523 /* No need for _rcu here. All reconfiguration is
2524 * strictly serialized on genl_lock(). We are protected against
2525 * concurrent reconfiguration/addition/deletion */
2526 for_each_resource(resource, &drbd_resources) {
2527 for_each_connection(connection, resource) {
2528 if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2529 !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2530 connection->my_addr_len)) {
2531 retcode = ERR_LOCAL_ADDR;
2532 goto out;
2533 }
2534
2535 if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2536 !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2537 connection->peer_addr_len)) {
2538 retcode = ERR_PEER_ADDR;
2539 goto out;
2540 }
2541 }
2542 }
2543
2544 mutex_lock(&adm_ctx.resource->adm_mutex);
2545 connection = first_connection(adm_ctx.resource);
2546 conn_reconfig_start(connection);
2547
2548 if (connection->cstate > C_STANDALONE) {
2549 retcode = ERR_NET_CONFIGURED;
2550 goto fail;
2551 }
2552
2553 /* allocation not in the IO path, drbdsetup / netlink process context */
2554 new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2555 if (!new_net_conf) {
2556 retcode = ERR_NOMEM;
2557 goto fail;
2558 }
2559
2560 set_net_conf_defaults(new_net_conf);
2561
2562 err = net_conf_from_attrs(new_net_conf, info);
2563 if (err && err != -ENOMSG) {
2564 retcode = ERR_MANDATORY_TAG;
2565 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2566 goto fail;
2567 }
2568
2569 retcode = check_net_options(connection, new_net_conf);
2570 if (retcode != NO_ERROR)
2571 goto fail;
2572
2573 retcode = alloc_crypto(&crypto, new_net_conf);
2574 if (retcode != NO_ERROR)
2575 goto fail;
2576
2577 ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2578
2579 drbd_flush_workqueue(&connection->sender_work);
2580
2581 mutex_lock(&adm_ctx.resource->conf_update);
2582 old_net_conf = connection->net_conf;
2583 if (old_net_conf) {
2584 retcode = ERR_NET_CONFIGURED;
2585 mutex_unlock(&adm_ctx.resource->conf_update);
2586 goto fail;
2587 }
2588 rcu_assign_pointer(connection->net_conf, new_net_conf);
2589
2590 conn_free_crypto(connection);
2591 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2592 connection->integrity_tfm = crypto.integrity_tfm;
2593 connection->csums_tfm = crypto.csums_tfm;
2594 connection->verify_tfm = crypto.verify_tfm;
2595
2596 connection->my_addr_len = nla_len(adm_ctx.my_addr);
2597 memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2598 connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2599 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2600
2601 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2602 peer_devices++;
2603 }
2604
2605 connection_to_info(&connection_info, connection);
2606 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2607 mutex_lock(¬ification_mutex);
2608 notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2609 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2610 struct peer_device_info peer_device_info;
2611
2612 peer_device_to_info(&peer_device_info, peer_device);
2613 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2614 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2615 }
2616 mutex_unlock(¬ification_mutex);
2617 mutex_unlock(&adm_ctx.resource->conf_update);
2618
2619 rcu_read_lock();
2620 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2621 struct drbd_device *device = peer_device->device;
2622 device->send_cnt = 0;
2623 device->recv_cnt = 0;
2624 }
2625 rcu_read_unlock();
2626
2627 rv = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2628
2629 conn_reconfig_done(connection);
2630 mutex_unlock(&adm_ctx.resource->adm_mutex);
2631 drbd_adm_finish(&adm_ctx, info, rv);
2632 return 0;
2633
2634fail:
2635 free_crypto(&crypto);
2636 kfree(new_net_conf);
2637
2638 conn_reconfig_done(connection);
2639 mutex_unlock(&adm_ctx.resource->adm_mutex);
2640out:
2641 drbd_adm_finish(&adm_ctx, info, retcode);
2642 return 0;
2643}
2644
2645static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2646{
2647 enum drbd_conns cstate;
2648 enum drbd_state_rv rv;
2649
2650repeat:
2651 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2652 force ? CS_HARD : 0);
2653
2654 switch (rv) {
2655 case SS_NOTHING_TO_DO:
2656 break;
2657 case SS_ALREADY_STANDALONE:
2658 return SS_SUCCESS;
2659 case SS_PRIMARY_NOP:
2660 /* Our state checking code wants to see the peer outdated. */
2661 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2662
2663 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2664 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2665
2666 break;
2667 case SS_CW_FAILED_BY_PEER:
2668 spin_lock_irq(&connection->resource->req_lock);
2669 cstate = connection->cstate;
2670 spin_unlock_irq(&connection->resource->req_lock);
2671 if (cstate <= C_WF_CONNECTION)
2672 goto repeat;
2673 /* The peer probably wants to see us outdated. */
2674 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2675 disk, D_OUTDATED), 0);
2676 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2677 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2678 CS_HARD);
2679 }
2680 break;
2681 default:;
2682 /* no special handling necessary */
2683 }
2684
2685 if (rv >= SS_SUCCESS) {
2686 enum drbd_state_rv rv2;
2687 /* No one else can reconfigure the network while I am here.
2688 * The state handling only uses drbd_thread_stop_nowait(),
2689 * we want to really wait here until the receiver is no more.
2690 */
2691 drbd_thread_stop(&connection->receiver);
2692
2693 /* Race breaker. This additional state change request may be
2694 * necessary, if this was a forced disconnect during a receiver
2695 * restart. We may have "killed" the receiver thread just
2696 * after drbd_receiver() returned. Typically, we should be
2697 * C_STANDALONE already, now, and this becomes a no-op.
2698 */
2699 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2700 CS_VERBOSE | CS_HARD);
2701 if (rv2 < SS_SUCCESS)
2702 drbd_err(connection,
2703 "unexpected rv2=%d in conn_try_disconnect()\n",
2704 rv2);
2705 /* Unlike in DRBD 9, the state engine has generated
2706 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2707 }
2708 return rv;
2709}
2710
2711int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2712{
2713 struct drbd_config_context adm_ctx;
2714 struct disconnect_parms parms;
2715 struct drbd_connection *connection;
2716 enum drbd_state_rv rv;
2717 enum drbd_ret_code retcode;
2718 int err;
2719
2720 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2721 if (!adm_ctx.reply_skb)
2722 return retcode;
2723 if (retcode != NO_ERROR)
2724 goto fail;
2725
2726 connection = adm_ctx.connection;
2727 memset(&parms, 0, sizeof(parms));
2728 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2729 err = disconnect_parms_from_attrs(&parms, info);
2730 if (err) {
2731 retcode = ERR_MANDATORY_TAG;
2732 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2733 goto fail;
2734 }
2735 }
2736
2737 mutex_lock(&adm_ctx.resource->adm_mutex);
2738 rv = conn_try_disconnect(connection, parms.force_disconnect);
2739 mutex_unlock(&adm_ctx.resource->adm_mutex);
2740 if (rv < SS_SUCCESS) {
2741 drbd_adm_finish(&adm_ctx, info, rv);
2742 return 0;
2743 }
2744 retcode = NO_ERROR;
2745 fail:
2746 drbd_adm_finish(&adm_ctx, info, retcode);
2747 return 0;
2748}
2749
2750void resync_after_online_grow(struct drbd_device *device)
2751{
2752 int iass; /* I am sync source */
2753
2754 drbd_info(device, "Resync of new storage after online grow\n");
2755 if (device->state.role != device->state.peer)
2756 iass = (device->state.role == R_PRIMARY);
2757 else
2758 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2759
2760 if (iass)
2761 drbd_start_resync(device, C_SYNC_SOURCE);
2762 else
2763 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2764}
2765
2766int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2767{
2768 struct drbd_config_context adm_ctx;
2769 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2770 struct resize_parms rs;
2771 struct drbd_device *device;
2772 enum drbd_ret_code retcode;
2773 enum determine_dev_size dd;
2774 bool change_al_layout = false;
2775 enum dds_flags ddsf;
2776 sector_t u_size;
2777 int err;
2778
2779 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2780 if (!adm_ctx.reply_skb)
2781 return retcode;
2782 if (retcode != NO_ERROR)
2783 goto finish;
2784
2785 mutex_lock(&adm_ctx.resource->adm_mutex);
2786 device = adm_ctx.device;
2787 if (!get_ldev(device)) {
2788 retcode = ERR_NO_DISK;
2789 goto fail;
2790 }
2791
2792 memset(&rs, 0, sizeof(struct resize_parms));
2793 rs.al_stripes = device->ldev->md.al_stripes;
2794 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2795 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2796 err = resize_parms_from_attrs(&rs, info);
2797 if (err) {
2798 retcode = ERR_MANDATORY_TAG;
2799 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2800 goto fail_ldev;
2801 }
2802 }
2803
2804 if (device->state.conn > C_CONNECTED) {
2805 retcode = ERR_RESIZE_RESYNC;
2806 goto fail_ldev;
2807 }
2808
2809 if (device->state.role == R_SECONDARY &&
2810 device->state.peer == R_SECONDARY) {
2811 retcode = ERR_NO_PRIMARY;
2812 goto fail_ldev;
2813 }
2814
2815 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2816 retcode = ERR_NEED_APV_93;
2817 goto fail_ldev;
2818 }
2819
2820 rcu_read_lock();
2821 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2822 rcu_read_unlock();
2823 if (u_size != (sector_t)rs.resize_size) {
2824 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2825 if (!new_disk_conf) {
2826 retcode = ERR_NOMEM;
2827 goto fail_ldev;
2828 }
2829 }
2830
2831 if (device->ldev->md.al_stripes != rs.al_stripes ||
2832 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2833 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2834
2835 if (al_size_k > (16 * 1024 * 1024)) {
2836 retcode = ERR_MD_LAYOUT_TOO_BIG;
2837 goto fail_ldev;
2838 }
2839
2840 if (al_size_k < MD_32kB_SECT/2) {
2841 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2842 goto fail_ldev;
2843 }
2844
2845 if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2846 retcode = ERR_MD_LAYOUT_CONNECTED;
2847 goto fail_ldev;
2848 }
2849
2850 change_al_layout = true;
2851 }
2852
2853 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2854 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2855
2856 if (new_disk_conf) {
2857 mutex_lock(&device->resource->conf_update);
2858 old_disk_conf = device->ldev->disk_conf;
2859 *new_disk_conf = *old_disk_conf;
2860 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2861 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2862 mutex_unlock(&device->resource->conf_update);
2863 kvfree_rcu(old_disk_conf);
2864 new_disk_conf = NULL;
2865 }
2866
2867 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2868 dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2869 drbd_md_sync(device);
2870 put_ldev(device);
2871 if (dd == DS_ERROR) {
2872 retcode = ERR_NOMEM_BITMAP;
2873 goto fail;
2874 } else if (dd == DS_ERROR_SPACE_MD) {
2875 retcode = ERR_MD_LAYOUT_NO_FIT;
2876 goto fail;
2877 } else if (dd == DS_ERROR_SHRINK) {
2878 retcode = ERR_IMPLICIT_SHRINK;
2879 goto fail;
2880 }
2881
2882 if (device->state.conn == C_CONNECTED) {
2883 if (dd == DS_GREW)
2884 set_bit(RESIZE_PENDING, &device->flags);
2885
2886 drbd_send_uuids(first_peer_device(device));
2887 drbd_send_sizes(first_peer_device(device), 1, ddsf);
2888 }
2889
2890 fail:
2891 mutex_unlock(&adm_ctx.resource->adm_mutex);
2892 finish:
2893 drbd_adm_finish(&adm_ctx, info, retcode);
2894 return 0;
2895
2896 fail_ldev:
2897 put_ldev(device);
2898 kfree(new_disk_conf);
2899 goto fail;
2900}
2901
2902int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2903{
2904 struct drbd_config_context adm_ctx;
2905 enum drbd_ret_code retcode;
2906 struct res_opts res_opts;
2907 int err;
2908
2909 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2910 if (!adm_ctx.reply_skb)
2911 return retcode;
2912 if (retcode != NO_ERROR)
2913 goto fail;
2914
2915 res_opts = adm_ctx.resource->res_opts;
2916 if (should_set_defaults(info))
2917 set_res_opts_defaults(&res_opts);
2918
2919 err = res_opts_from_attrs(&res_opts, info);
2920 if (err && err != -ENOMSG) {
2921 retcode = ERR_MANDATORY_TAG;
2922 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2923 goto fail;
2924 }
2925
2926 mutex_lock(&adm_ctx.resource->adm_mutex);
2927 err = set_resource_options(adm_ctx.resource, &res_opts);
2928 if (err) {
2929 retcode = ERR_INVALID_REQUEST;
2930 if (err == -ENOMEM)
2931 retcode = ERR_NOMEM;
2932 }
2933 mutex_unlock(&adm_ctx.resource->adm_mutex);
2934
2935fail:
2936 drbd_adm_finish(&adm_ctx, info, retcode);
2937 return 0;
2938}
2939
2940int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2941{
2942 struct drbd_config_context adm_ctx;
2943 struct drbd_device *device;
2944 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2945
2946 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2947 if (!adm_ctx.reply_skb)
2948 return retcode;
2949 if (retcode != NO_ERROR)
2950 goto out;
2951
2952 device = adm_ctx.device;
2953 if (!get_ldev(device)) {
2954 retcode = ERR_NO_DISK;
2955 goto out;
2956 }
2957
2958 mutex_lock(&adm_ctx.resource->adm_mutex);
2959
2960 /* If there is still bitmap IO pending, probably because of a previous
2961 * resync just being finished, wait for it before requesting a new resync.
2962 * Also wait for it's after_state_ch(). */
2963 drbd_suspend_io(device);
2964 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
2965 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
2966
2967 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2968 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2969 * try to start a resync handshake as sync target for full sync.
2970 */
2971 if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
2972 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
2973 if (retcode >= SS_SUCCESS) {
2974 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2975 "set_n_write from invalidate", BM_LOCKED_MASK))
2976 retcode = ERR_IO_MD_DISK;
2977 }
2978 } else
2979 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
2980 drbd_resume_io(device);
2981 mutex_unlock(&adm_ctx.resource->adm_mutex);
2982 put_ldev(device);
2983out:
2984 drbd_adm_finish(&adm_ctx, info, retcode);
2985 return 0;
2986}
2987
2988static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2989 union drbd_state mask, union drbd_state val)
2990{
2991 struct drbd_config_context adm_ctx;
2992 enum drbd_ret_code retcode;
2993
2994 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2995 if (!adm_ctx.reply_skb)
2996 return retcode;
2997 if (retcode != NO_ERROR)
2998 goto out;
2999
3000 mutex_lock(&adm_ctx.resource->adm_mutex);
3001 retcode = drbd_request_state(adm_ctx.device, mask, val);
3002 mutex_unlock(&adm_ctx.resource->adm_mutex);
3003out:
3004 drbd_adm_finish(&adm_ctx, info, retcode);
3005 return 0;
3006}
3007
3008static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
3009{
3010 int rv;
3011
3012 rv = drbd_bmio_set_n_write(device);
3013 drbd_suspend_al(device);
3014 return rv;
3015}
3016
3017int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
3018{
3019 struct drbd_config_context adm_ctx;
3020 int retcode; /* drbd_ret_code, drbd_state_rv */
3021 struct drbd_device *device;
3022
3023 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3024 if (!adm_ctx.reply_skb)
3025 return retcode;
3026 if (retcode != NO_ERROR)
3027 goto out;
3028
3029 device = adm_ctx.device;
3030 if (!get_ldev(device)) {
3031 retcode = ERR_NO_DISK;
3032 goto out;
3033 }
3034
3035 mutex_lock(&adm_ctx.resource->adm_mutex);
3036
3037 /* If there is still bitmap IO pending, probably because of a previous
3038 * resync just being finished, wait for it before requesting a new resync.
3039 * Also wait for it's after_state_ch(). */
3040 drbd_suspend_io(device);
3041 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3042 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3043
3044 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3045 * in the bitmap. Otherwise, try to start a resync handshake
3046 * as sync source for full sync.
3047 */
3048 if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
3049 /* The peer will get a resync upon connect anyways. Just make that
3050 into a full resync. */
3051 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
3052 if (retcode >= SS_SUCCESS) {
3053 if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
3054 "set_n_write from invalidate_peer",
3055 BM_LOCKED_SET_ALLOWED))
3056 retcode = ERR_IO_MD_DISK;
3057 }
3058 } else
3059 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
3060 drbd_resume_io(device);
3061 mutex_unlock(&adm_ctx.resource->adm_mutex);
3062 put_ldev(device);
3063out:
3064 drbd_adm_finish(&adm_ctx, info, retcode);
3065 return 0;
3066}
3067
3068int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
3069{
3070 struct drbd_config_context adm_ctx;
3071 enum drbd_ret_code retcode;
3072
3073 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3074 if (!adm_ctx.reply_skb)
3075 return retcode;
3076 if (retcode != NO_ERROR)
3077 goto out;
3078
3079 mutex_lock(&adm_ctx.resource->adm_mutex);
3080 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
3081 retcode = ERR_PAUSE_IS_SET;
3082 mutex_unlock(&adm_ctx.resource->adm_mutex);
3083out:
3084 drbd_adm_finish(&adm_ctx, info, retcode);
3085 return 0;
3086}
3087
3088int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
3089{
3090 struct drbd_config_context adm_ctx;
3091 union drbd_dev_state s;
3092 enum drbd_ret_code retcode;
3093
3094 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3095 if (!adm_ctx.reply_skb)
3096 return retcode;
3097 if (retcode != NO_ERROR)
3098 goto out;
3099
3100 mutex_lock(&adm_ctx.resource->adm_mutex);
3101 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
3102 s = adm_ctx.device->state;
3103 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
3104 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
3105 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
3106 } else {
3107 retcode = ERR_PAUSE_IS_CLEAR;
3108 }
3109 }
3110 mutex_unlock(&adm_ctx.resource->adm_mutex);
3111out:
3112 drbd_adm_finish(&adm_ctx, info, retcode);
3113 return 0;
3114}
3115
3116int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
3117{
3118 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
3119}
3120
3121int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
3122{
3123 struct drbd_config_context adm_ctx;
3124 struct drbd_device *device;
3125 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3126
3127 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3128 if (!adm_ctx.reply_skb)
3129 return retcode;
3130 if (retcode != NO_ERROR)
3131 goto out;
3132
3133 mutex_lock(&adm_ctx.resource->adm_mutex);
3134 device = adm_ctx.device;
3135 if (test_bit(NEW_CUR_UUID, &device->flags)) {
3136 if (get_ldev_if_state(device, D_ATTACHING)) {
3137 drbd_uuid_new_current(device);
3138 put_ldev(device);
3139 } else {
3140 /* This is effectively a multi-stage "forced down".
3141 * The NEW_CUR_UUID bit is supposedly only set, if we
3142 * lost the replication connection, and are configured
3143 * to freeze IO and wait for some fence-peer handler.
3144 * So we still don't have a replication connection.
3145 * And now we don't have a local disk either. After
3146 * resume, we will fail all pending and new IO, because
3147 * we don't have any data anymore. Which means we will
3148 * eventually be able to terminate all users of this
3149 * device, and then take it down. By bumping the
3150 * "effective" data uuid, we make sure that you really
3151 * need to tear down before you reconfigure, we will
3152 * the refuse to re-connect or re-attach (because no
3153 * matching real data uuid exists).
3154 */
3155 u64 val;
3156 get_random_bytes(&val, sizeof(u64));
3157 drbd_set_ed_uuid(device, val);
3158 drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3159 }
3160 clear_bit(NEW_CUR_UUID, &device->flags);
3161 }
3162 drbd_suspend_io(device);
3163 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
3164 if (retcode == SS_SUCCESS) {
3165 if (device->state.conn < C_CONNECTED)
3166 tl_clear(first_peer_device(device)->connection);
3167 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
3168 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
3169 }
3170 drbd_resume_io(device);
3171 mutex_unlock(&adm_ctx.resource->adm_mutex);
3172out:
3173 drbd_adm_finish(&adm_ctx, info, retcode);
3174 return 0;
3175}
3176
3177int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
3178{
3179 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
3180}
3181
3182static int nla_put_drbd_cfg_context(struct sk_buff *skb,
3183 struct drbd_resource *resource,
3184 struct drbd_connection *connection,
3185 struct drbd_device *device)
3186{
3187 struct nlattr *nla;
3188 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
3189 if (!nla)
3190 goto nla_put_failure;
3191 if (device &&
3192 nla_put_u32(skb, T_ctx_volume, device->vnr))
3193 goto nla_put_failure;
3194 if (nla_put_string(skb, T_ctx_resource_name, resource->name))
3195 goto nla_put_failure;
3196 if (connection) {
3197 if (connection->my_addr_len &&
3198 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
3199 goto nla_put_failure;
3200 if (connection->peer_addr_len &&
3201 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
3202 goto nla_put_failure;
3203 }
3204 nla_nest_end(skb, nla);
3205 return 0;
3206
3207nla_put_failure:
3208 if (nla)
3209 nla_nest_cancel(skb, nla);
3210 return -EMSGSIZE;
3211}
3212
3213/*
3214 * The generic netlink dump callbacks are called outside the genl_lock(), so
3215 * they cannot use the simple attribute parsing code which uses global
3216 * attribute tables.
3217 */
3218static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
3219{
3220 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3221 const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3222 struct nlattr *nla;
3223
3224 nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
3225 DRBD_NLA_CFG_CONTEXT);
3226 if (!nla)
3227 return NULL;
3228 return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
3229}
3230
3231static void resource_to_info(struct resource_info *, struct drbd_resource *);
3232
3233int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
3234{
3235 struct drbd_genlmsghdr *dh;
3236 struct drbd_resource *resource;
3237 struct resource_info resource_info;
3238 struct resource_statistics resource_statistics;
3239 int err;
3240
3241 rcu_read_lock();
3242 if (cb->args[0]) {
3243 for_each_resource_rcu(resource, &drbd_resources)
3244 if (resource == (struct drbd_resource *)cb->args[0])
3245 goto found_resource;
3246 err = 0; /* resource was probably deleted */
3247 goto out;
3248 }
3249 resource = list_entry(&drbd_resources,
3250 struct drbd_resource, resources);
3251
3252found_resource:
3253 list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
3254 goto put_result;
3255 }
3256 err = 0;
3257 goto out;
3258
3259put_result:
3260 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3261 cb->nlh->nlmsg_seq, &drbd_genl_family,
3262 NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
3263 err = -ENOMEM;
3264 if (!dh)
3265 goto out;
3266 dh->minor = -1U;
3267 dh->ret_code = NO_ERROR;
3268 err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
3269 if (err)
3270 goto out;
3271 err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
3272 if (err)
3273 goto out;
3274 resource_to_info(&resource_info, resource);
3275 err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
3276 if (err)
3277 goto out;
3278 resource_statistics.res_stat_write_ordering = resource->write_ordering;
3279 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3280 if (err)
3281 goto out;
3282 cb->args[0] = (long)resource;
3283 genlmsg_end(skb, dh);
3284 err = 0;
3285
3286out:
3287 rcu_read_unlock();
3288 if (err)
3289 return err;
3290 return skb->len;
3291}
3292
3293static void device_to_statistics(struct device_statistics *s,
3294 struct drbd_device *device)
3295{
3296 memset(s, 0, sizeof(*s));
3297 s->dev_upper_blocked = !may_inc_ap_bio(device);
3298 if (get_ldev(device)) {
3299 struct drbd_md *md = &device->ldev->md;
3300 u64 *history_uuids = (u64 *)s->history_uuids;
3301 int n;
3302
3303 spin_lock_irq(&md->uuid_lock);
3304 s->dev_current_uuid = md->uuid[UI_CURRENT];
3305 BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3306 for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3307 history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3308 for (; n < HISTORY_UUIDS; n++)
3309 history_uuids[n] = 0;
3310 s->history_uuids_len = HISTORY_UUIDS;
3311 spin_unlock_irq(&md->uuid_lock);
3312
3313 s->dev_disk_flags = md->flags;
3314 put_ldev(device);
3315 }
3316 s->dev_size = get_capacity(device->vdisk);
3317 s->dev_read = device->read_cnt;
3318 s->dev_write = device->writ_cnt;
3319 s->dev_al_writes = device->al_writ_cnt;
3320 s->dev_bm_writes = device->bm_writ_cnt;
3321 s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3322 s->dev_lower_pending = atomic_read(&device->local_cnt);
3323 s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3324 s->dev_exposed_data_uuid = device->ed_uuid;
3325}
3326
3327static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
3328{
3329 if (cb->args[0]) {
3330 struct drbd_resource *resource =
3331 (struct drbd_resource *)cb->args[0];
3332 kref_put(&resource->kref, drbd_destroy_resource);
3333 }
3334
3335 return 0;
3336}
3337
3338int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
3339 return put_resource_in_arg0(cb, 7);
3340}
3341
3342static void device_to_info(struct device_info *, struct drbd_device *);
3343
3344int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
3345{
3346 struct nlattr *resource_filter;
3347 struct drbd_resource *resource;
3348 struct drbd_device *device;
3349 int minor, err, retcode;
3350 struct drbd_genlmsghdr *dh;
3351 struct device_info device_info;
3352 struct device_statistics device_statistics;
3353 struct idr *idr_to_search;
3354
3355 resource = (struct drbd_resource *)cb->args[0];
3356 if (!cb->args[0] && !cb->args[1]) {
3357 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3358 if (resource_filter) {
3359 retcode = ERR_RES_NOT_KNOWN;
3360 resource = drbd_find_resource(nla_data(resource_filter));
3361 if (!resource)
3362 goto put_result;
3363 cb->args[0] = (long)resource;
3364 }
3365 }
3366
3367 rcu_read_lock();
3368 minor = cb->args[1];
3369 idr_to_search = resource ? &resource->devices : &drbd_devices;
3370 device = idr_get_next(idr_to_search, &minor);
3371 if (!device) {
3372 err = 0;
3373 goto out;
3374 }
3375 idr_for_each_entry_continue(idr_to_search, device, minor) {
3376 retcode = NO_ERROR;
3377 goto put_result; /* only one iteration */
3378 }
3379 err = 0;
3380 goto out; /* no more devices */
3381
3382put_result:
3383 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3384 cb->nlh->nlmsg_seq, &drbd_genl_family,
3385 NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
3386 err = -ENOMEM;
3387 if (!dh)
3388 goto out;
3389 dh->ret_code = retcode;
3390 dh->minor = -1U;
3391 if (retcode == NO_ERROR) {
3392 dh->minor = device->minor;
3393 err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
3394 if (err)
3395 goto out;
3396 if (get_ldev(device)) {
3397 struct disk_conf *disk_conf =
3398 rcu_dereference(device->ldev->disk_conf);
3399
3400 err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
3401 put_ldev(device);
3402 if (err)
3403 goto out;
3404 }
3405 device_to_info(&device_info, device);
3406 err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
3407 if (err)
3408 goto out;
3409
3410 device_to_statistics(&device_statistics, device);
3411 err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3412 if (err)
3413 goto out;
3414 cb->args[1] = minor + 1;
3415 }
3416 genlmsg_end(skb, dh);
3417 err = 0;
3418
3419out:
3420 rcu_read_unlock();
3421 if (err)
3422 return err;
3423 return skb->len;
3424}
3425
3426int drbd_adm_dump_connections_done(struct netlink_callback *cb)
3427{
3428 return put_resource_in_arg0(cb, 6);
3429}
3430
3431enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
3432
3433int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
3434{
3435 struct nlattr *resource_filter;
3436 struct drbd_resource *resource = NULL, *next_resource;
3437 struct drbd_connection *connection;
3438 int err = 0, retcode;
3439 struct drbd_genlmsghdr *dh;
3440 struct connection_info connection_info;
3441 struct connection_statistics connection_statistics;
3442
3443 rcu_read_lock();
3444 resource = (struct drbd_resource *)cb->args[0];
3445 if (!cb->args[0]) {
3446 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3447 if (resource_filter) {
3448 retcode = ERR_RES_NOT_KNOWN;
3449 resource = drbd_find_resource(nla_data(resource_filter));
3450 if (!resource)
3451 goto put_result;
3452 cb->args[0] = (long)resource;
3453 cb->args[1] = SINGLE_RESOURCE;
3454 }
3455 }
3456 if (!resource) {
3457 if (list_empty(&drbd_resources))
3458 goto out;
3459 resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
3460 kref_get(&resource->kref);
3461 cb->args[0] = (long)resource;
3462 cb->args[1] = ITERATE_RESOURCES;
3463 }
3464
3465 next_resource:
3466 rcu_read_unlock();
3467 mutex_lock(&resource->conf_update);
3468 rcu_read_lock();
3469 if (cb->args[2]) {
3470 for_each_connection_rcu(connection, resource)
3471 if (connection == (struct drbd_connection *)cb->args[2])
3472 goto found_connection;
3473 /* connection was probably deleted */
3474 goto no_more_connections;
3475 }
3476 connection = list_entry(&resource->connections, struct drbd_connection, connections);
3477
3478found_connection:
3479 list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
3480 if (!has_net_conf(connection))
3481 continue;
3482 retcode = NO_ERROR;
3483 goto put_result; /* only one iteration */
3484 }
3485
3486no_more_connections:
3487 if (cb->args[1] == ITERATE_RESOURCES) {
3488 for_each_resource_rcu(next_resource, &drbd_resources) {
3489 if (next_resource == resource)
3490 goto found_resource;
3491 }
3492 /* resource was probably deleted */
3493 }
3494 goto out;
3495
3496found_resource:
3497 list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
3498 mutex_unlock(&resource->conf_update);
3499 kref_put(&resource->kref, drbd_destroy_resource);
3500 resource = next_resource;
3501 kref_get(&resource->kref);
3502 cb->args[0] = (long)resource;
3503 cb->args[2] = 0;
3504 goto next_resource;
3505 }
3506 goto out; /* no more resources */
3507
3508put_result:
3509 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3510 cb->nlh->nlmsg_seq, &drbd_genl_family,
3511 NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
3512 err = -ENOMEM;
3513 if (!dh)
3514 goto out;
3515 dh->ret_code = retcode;
3516 dh->minor = -1U;
3517 if (retcode == NO_ERROR) {
3518 struct net_conf *net_conf;
3519
3520 err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
3521 if (err)
3522 goto out;
3523 net_conf = rcu_dereference(connection->net_conf);
3524 if (net_conf) {
3525 err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
3526 if (err)
3527 goto out;
3528 }
3529 connection_to_info(&connection_info, connection);
3530 err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
3531 if (err)
3532 goto out;
3533 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
3534 err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
3535 if (err)
3536 goto out;
3537 cb->args[2] = (long)connection;
3538 }
3539 genlmsg_end(skb, dh);
3540 err = 0;
3541
3542out:
3543 rcu_read_unlock();
3544 if (resource)
3545 mutex_unlock(&resource->conf_update);
3546 if (err)
3547 return err;
3548 return skb->len;
3549}
3550
3551enum mdf_peer_flag {
3552 MDF_PEER_CONNECTED = 1 << 0,
3553 MDF_PEER_OUTDATED = 1 << 1,
3554 MDF_PEER_FENCING = 1 << 2,
3555 MDF_PEER_FULL_SYNC = 1 << 3,
3556};
3557
3558static void peer_device_to_statistics(struct peer_device_statistics *s,
3559 struct drbd_peer_device *peer_device)
3560{
3561 struct drbd_device *device = peer_device->device;
3562
3563 memset(s, 0, sizeof(*s));
3564 s->peer_dev_received = device->recv_cnt;
3565 s->peer_dev_sent = device->send_cnt;
3566 s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3567 atomic_read(&device->rs_pending_cnt);
3568 s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3569 s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3570 s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3571 if (get_ldev(device)) {
3572 struct drbd_md *md = &device->ldev->md;
3573
3574 spin_lock_irq(&md->uuid_lock);
3575 s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3576 spin_unlock_irq(&md->uuid_lock);
3577 s->peer_dev_flags =
3578 (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3579 MDF_PEER_CONNECTED : 0) +
3580 (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3581 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3582 MDF_PEER_OUTDATED : 0) +
3583 /* FIXME: MDF_PEER_FENCING? */
3584 (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3585 MDF_PEER_FULL_SYNC : 0);
3586 put_ldev(device);
3587 }
3588}
3589
3590int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
3591{
3592 return put_resource_in_arg0(cb, 9);
3593}
3594
3595int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
3596{
3597 struct nlattr *resource_filter;
3598 struct drbd_resource *resource;
3599 struct drbd_device *device;
3600 struct drbd_peer_device *peer_device = NULL;
3601 int minor, err, retcode;
3602 struct drbd_genlmsghdr *dh;
3603 struct idr *idr_to_search;
3604
3605 resource = (struct drbd_resource *)cb->args[0];
3606 if (!cb->args[0] && !cb->args[1]) {
3607 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3608 if (resource_filter) {
3609 retcode = ERR_RES_NOT_KNOWN;
3610 resource = drbd_find_resource(nla_data(resource_filter));
3611 if (!resource)
3612 goto put_result;
3613 }
3614 cb->args[0] = (long)resource;
3615 }
3616
3617 rcu_read_lock();
3618 minor = cb->args[1];
3619 idr_to_search = resource ? &resource->devices : &drbd_devices;
3620 device = idr_find(idr_to_search, minor);
3621 if (!device) {
3622next_device:
3623 minor++;
3624 cb->args[2] = 0;
3625 device = idr_get_next(idr_to_search, &minor);
3626 if (!device) {
3627 err = 0;
3628 goto out;
3629 }
3630 }
3631 if (cb->args[2]) {
3632 for_each_peer_device(peer_device, device)
3633 if (peer_device == (struct drbd_peer_device *)cb->args[2])
3634 goto found_peer_device;
3635 /* peer device was probably deleted */
3636 goto next_device;
3637 }
3638 /* Make peer_device point to the list head (not the first entry). */
3639 peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
3640
3641found_peer_device:
3642 list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
3643 if (!has_net_conf(peer_device->connection))
3644 continue;
3645 retcode = NO_ERROR;
3646 goto put_result; /* only one iteration */
3647 }
3648 goto next_device;
3649
3650put_result:
3651 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3652 cb->nlh->nlmsg_seq, &drbd_genl_family,
3653 NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
3654 err = -ENOMEM;
3655 if (!dh)
3656 goto out;
3657 dh->ret_code = retcode;
3658 dh->minor = -1U;
3659 if (retcode == NO_ERROR) {
3660 struct peer_device_info peer_device_info;
3661 struct peer_device_statistics peer_device_statistics;
3662
3663 dh->minor = minor;
3664 err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
3665 if (err)
3666 goto out;
3667 peer_device_to_info(&peer_device_info, peer_device);
3668 err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
3669 if (err)
3670 goto out;
3671 peer_device_to_statistics(&peer_device_statistics, peer_device);
3672 err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
3673 if (err)
3674 goto out;
3675 cb->args[1] = minor;
3676 cb->args[2] = (long)peer_device;
3677 }
3678 genlmsg_end(skb, dh);
3679 err = 0;
3680
3681out:
3682 rcu_read_unlock();
3683 if (err)
3684 return err;
3685 return skb->len;
3686}
3687/*
3688 * Return the connection of @resource if @resource has exactly one connection.
3689 */
3690static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
3691{
3692 struct list_head *connections = &resource->connections;
3693
3694 if (list_empty(connections) || connections->next->next != connections)
3695 return NULL;
3696 return list_first_entry(&resource->connections, struct drbd_connection, connections);
3697}
3698
3699static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
3700 const struct sib_info *sib)
3701{
3702 struct drbd_resource *resource = device->resource;
3703 struct state_info *si = NULL; /* for sizeof(si->member); */
3704 struct nlattr *nla;
3705 int got_ldev;
3706 int err = 0;
3707 int exclude_sensitive;
3708
3709 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3710 * to. So we better exclude_sensitive information.
3711 *
3712 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3713 * in the context of the requesting user process. Exclude sensitive
3714 * information, unless current has superuser.
3715 *
3716 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3717 * relies on the current implementation of netlink_dump(), which
3718 * executes the dump callback successively from netlink_recvmsg(),
3719 * always in the context of the receiving process */
3720 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3721
3722 got_ldev = get_ldev(device);
3723
3724 /* We need to add connection name and volume number information still.
3725 * Minor number is in drbd_genlmsghdr. */
3726 if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3727 goto nla_put_failure;
3728
3729 if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
3730 goto nla_put_failure;
3731
3732 rcu_read_lock();
3733 if (got_ldev) {
3734 struct disk_conf *disk_conf;
3735
3736 disk_conf = rcu_dereference(device->ldev->disk_conf);
3737 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3738 }
3739 if (!err) {
3740 struct net_conf *nc;
3741
3742 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3743 if (nc)
3744 err = net_conf_to_skb(skb, nc, exclude_sensitive);
3745 }
3746 rcu_read_unlock();
3747 if (err)
3748 goto nla_put_failure;
3749
3750 nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
3751 if (!nla)
3752 goto nla_put_failure;
3753 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
3754 nla_put_u32(skb, T_current_state, device->state.i) ||
3755 nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
3756 nla_put_u64_0pad(skb, T_capacity, get_capacity(device->vdisk)) ||
3757 nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
3758 nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
3759 nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
3760 nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
3761 nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3762 nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
3763 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3764 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3765 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
3766 goto nla_put_failure;
3767
3768 if (got_ldev) {
3769 int err;
3770
3771 spin_lock_irq(&device->ldev->md.uuid_lock);
3772 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3773 spin_unlock_irq(&device->ldev->md.uuid_lock);
3774
3775 if (err)
3776 goto nla_put_failure;
3777
3778 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3779 nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
3780 nla_put_u64_0pad(skb, T_bits_oos,
3781 drbd_bm_total_weight(device)))
3782 goto nla_put_failure;
3783 if (C_SYNC_SOURCE <= device->state.conn &&
3784 C_PAUSED_SYNC_T >= device->state.conn) {
3785 if (nla_put_u64_0pad(skb, T_bits_rs_total,
3786 device->rs_total) ||
3787 nla_put_u64_0pad(skb, T_bits_rs_failed,
3788 device->rs_failed))
3789 goto nla_put_failure;
3790 }
3791 }
3792
3793 if (sib) {
3794 switch(sib->sib_reason) {
3795 case SIB_SYNC_PROGRESS:
3796 case SIB_GET_STATUS_REPLY:
3797 break;
3798 case SIB_STATE_CHANGE:
3799 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3800 nla_put_u32(skb, T_new_state, sib->ns.i))
3801 goto nla_put_failure;
3802 break;
3803 case SIB_HELPER_POST:
3804 if (nla_put_u32(skb, T_helper_exit_code,
3805 sib->helper_exit_code))
3806 goto nla_put_failure;
3807 fallthrough;
3808 case SIB_HELPER_PRE:
3809 if (nla_put_string(skb, T_helper, sib->helper_name))
3810 goto nla_put_failure;
3811 break;
3812 }
3813 }
3814 nla_nest_end(skb, nla);
3815
3816 if (0)
3817nla_put_failure:
3818 err = -EMSGSIZE;
3819 if (got_ldev)
3820 put_ldev(device);
3821 return err;
3822}
3823
3824int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3825{
3826 struct drbd_config_context adm_ctx;
3827 enum drbd_ret_code retcode;
3828 int err;
3829
3830 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3831 if (!adm_ctx.reply_skb)
3832 return retcode;
3833 if (retcode != NO_ERROR)
3834 goto out;
3835
3836 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3837 if (err) {
3838 nlmsg_free(adm_ctx.reply_skb);
3839 return err;
3840 }
3841out:
3842 drbd_adm_finish(&adm_ctx, info, retcode);
3843 return 0;
3844}
3845
3846static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3847{
3848 struct drbd_device *device;
3849 struct drbd_genlmsghdr *dh;
3850 struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3851 struct drbd_resource *resource = NULL;
3852 struct drbd_resource *tmp;
3853 unsigned volume = cb->args[1];
3854
3855 /* Open coded, deferred, iteration:
3856 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3857 * connection = "first connection of resource or undefined";
3858 * idr_for_each_entry(&resource->devices, device, i) {
3859 * ...
3860 * }
3861 * }
3862 * where resource is cb->args[0];
3863 * and i is cb->args[1];
3864 *
3865 * cb->args[2] indicates if we shall loop over all resources,
3866 * or just dump all volumes of a single resource.
3867 *
3868 * This may miss entries inserted after this dump started,
3869 * or entries deleted before they are reached.
3870 *
3871 * We need to make sure the device won't disappear while
3872 * we are looking at it, and revalidate our iterators
3873 * on each iteration.
3874 */
3875
3876 /* synchronize with conn_create()/drbd_destroy_connection() */
3877 rcu_read_lock();
3878 /* revalidate iterator position */
3879 for_each_resource_rcu(tmp, &drbd_resources) {
3880 if (pos == NULL) {
3881 /* first iteration */
3882 pos = tmp;
3883 resource = pos;
3884 break;
3885 }
3886 if (tmp == pos) {
3887 resource = pos;
3888 break;
3889 }
3890 }
3891 if (resource) {
3892next_resource:
3893 device = idr_get_next(&resource->devices, &volume);
3894 if (!device) {
3895 /* No more volumes to dump on this resource.
3896 * Advance resource iterator. */
3897 pos = list_entry_rcu(resource->resources.next,
3898 struct drbd_resource, resources);
3899 /* Did we dump any volume of this resource yet? */
3900 if (volume != 0) {
3901 /* If we reached the end of the list,
3902 * or only a single resource dump was requested,
3903 * we are done. */
3904 if (&pos->resources == &drbd_resources || cb->args[2])
3905 goto out;
3906 volume = 0;
3907 resource = pos;
3908 goto next_resource;
3909 }
3910 }
3911
3912 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3913 cb->nlh->nlmsg_seq, &drbd_genl_family,
3914 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3915 if (!dh)
3916 goto out;
3917
3918 if (!device) {
3919 /* This is a connection without a single volume.
3920 * Suprisingly enough, it may have a network
3921 * configuration. */
3922 struct drbd_connection *connection;
3923
3924 dh->minor = -1U;
3925 dh->ret_code = NO_ERROR;
3926 connection = the_only_connection(resource);
3927 if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
3928 goto cancel;
3929 if (connection) {
3930 struct net_conf *nc;
3931
3932 nc = rcu_dereference(connection->net_conf);
3933 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
3934 goto cancel;
3935 }
3936 goto done;
3937 }
3938
3939 D_ASSERT(device, device->vnr == volume);
3940 D_ASSERT(device, device->resource == resource);
3941
3942 dh->minor = device_to_minor(device);
3943 dh->ret_code = NO_ERROR;
3944
3945 if (nla_put_status_info(skb, device, NULL)) {
3946cancel:
3947 genlmsg_cancel(skb, dh);
3948 goto out;
3949 }
3950done:
3951 genlmsg_end(skb, dh);
3952 }
3953
3954out:
3955 rcu_read_unlock();
3956 /* where to start the next iteration */
3957 cb->args[0] = (long)pos;
3958 cb->args[1] = (pos == resource) ? volume + 1 : 0;
3959
3960 /* No more resources/volumes/minors found results in an empty skb.
3961 * Which will terminate the dump. */
3962 return skb->len;
3963}
3964
3965/*
3966 * Request status of all resources, or of all volumes within a single resource.
3967 *
3968 * This is a dump, as the answer may not fit in a single reply skb otherwise.
3969 * Which means we cannot use the family->attrbuf or other such members, because
3970 * dump is NOT protected by the genl_lock(). During dump, we only have access
3971 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
3972 *
3973 * Once things are setup properly, we call into get_one_status().
3974 */
3975int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
3976{
3977 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3978 struct nlattr *nla;
3979 const char *resource_name;
3980 struct drbd_resource *resource;
3981 int maxtype;
3982
3983 /* Is this a followup call? */
3984 if (cb->args[0]) {
3985 /* ... of a single resource dump,
3986 * and the resource iterator has been advanced already? */
3987 if (cb->args[2] && cb->args[2] != cb->args[0])
3988 return 0; /* DONE. */
3989 goto dump;
3990 }
3991
3992 /* First call (from netlink_dump_start). We need to figure out
3993 * which resource(s) the user wants us to dump. */
3994 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
3995 nlmsg_attrlen(cb->nlh, hdrlen),
3996 DRBD_NLA_CFG_CONTEXT);
3997
3998 /* No explicit context given. Dump all. */
3999 if (!nla)
4000 goto dump;
4001 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
4002 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
4003 if (IS_ERR(nla))
4004 return PTR_ERR(nla);
4005 /* context given, but no name present? */
4006 if (!nla)
4007 return -EINVAL;
4008 resource_name = nla_data(nla);
4009 if (!*resource_name)
4010 return -ENODEV;
4011 resource = drbd_find_resource(resource_name);
4012 if (!resource)
4013 return -ENODEV;
4014
4015 kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
4016
4017 /* prime iterators, and set "filter" mode mark:
4018 * only dump this connection. */
4019 cb->args[0] = (long)resource;
4020 /* cb->args[1] = 0; passed in this way. */
4021 cb->args[2] = (long)resource;
4022
4023dump:
4024 return get_one_status(skb, cb);
4025}
4026
4027int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
4028{
4029 struct drbd_config_context adm_ctx;
4030 enum drbd_ret_code retcode;
4031 struct timeout_parms tp;
4032 int err;
4033
4034 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4035 if (!adm_ctx.reply_skb)
4036 return retcode;
4037 if (retcode != NO_ERROR)
4038 goto out;
4039
4040 tp.timeout_type =
4041 adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
4042 test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
4043 UT_DEFAULT;
4044
4045 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
4046 if (err) {
4047 nlmsg_free(adm_ctx.reply_skb);
4048 return err;
4049 }
4050out:
4051 drbd_adm_finish(&adm_ctx, info, retcode);
4052 return 0;
4053}
4054
4055int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
4056{
4057 struct drbd_config_context adm_ctx;
4058 struct drbd_device *device;
4059 enum drbd_ret_code retcode;
4060 struct start_ov_parms parms;
4061
4062 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4063 if (!adm_ctx.reply_skb)
4064 return retcode;
4065 if (retcode != NO_ERROR)
4066 goto out;
4067
4068 device = adm_ctx.device;
4069
4070 /* resume from last known position, if possible */
4071 parms.ov_start_sector = device->ov_start_sector;
4072 parms.ov_stop_sector = ULLONG_MAX;
4073 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
4074 int err = start_ov_parms_from_attrs(&parms, info);
4075 if (err) {
4076 retcode = ERR_MANDATORY_TAG;
4077 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4078 goto out;
4079 }
4080 }
4081 mutex_lock(&adm_ctx.resource->adm_mutex);
4082
4083 /* w_make_ov_request expects position to be aligned */
4084 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
4085 device->ov_stop_sector = parms.ov_stop_sector;
4086
4087 /* If there is still bitmap IO pending, e.g. previous resync or verify
4088 * just being finished, wait for it before requesting a new resync. */
4089 drbd_suspend_io(device);
4090 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4091 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
4092 drbd_resume_io(device);
4093
4094 mutex_unlock(&adm_ctx.resource->adm_mutex);
4095out:
4096 drbd_adm_finish(&adm_ctx, info, retcode);
4097 return 0;
4098}
4099
4100
4101int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
4102{
4103 struct drbd_config_context adm_ctx;
4104 struct drbd_device *device;
4105 enum drbd_ret_code retcode;
4106 int skip_initial_sync = 0;
4107 int err;
4108 struct new_c_uuid_parms args;
4109
4110 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4111 if (!adm_ctx.reply_skb)
4112 return retcode;
4113 if (retcode != NO_ERROR)
4114 goto out_nolock;
4115
4116 device = adm_ctx.device;
4117 memset(&args, 0, sizeof(args));
4118 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
4119 err = new_c_uuid_parms_from_attrs(&args, info);
4120 if (err) {
4121 retcode = ERR_MANDATORY_TAG;
4122 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4123 goto out_nolock;
4124 }
4125 }
4126
4127 mutex_lock(&adm_ctx.resource->adm_mutex);
4128 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
4129
4130 if (!get_ldev(device)) {
4131 retcode = ERR_NO_DISK;
4132 goto out;
4133 }
4134
4135 /* this is "skip initial sync", assume to be clean */
4136 if (device->state.conn == C_CONNECTED &&
4137 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
4138 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
4139 drbd_info(device, "Preparing to skip initial sync\n");
4140 skip_initial_sync = 1;
4141 } else if (device->state.conn != C_STANDALONE) {
4142 retcode = ERR_CONNECTED;
4143 goto out_dec;
4144 }
4145
4146 drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
4147 drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
4148
4149 if (args.clear_bm) {
4150 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4151 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
4152 if (err) {
4153 drbd_err(device, "Writing bitmap failed with %d\n", err);
4154 retcode = ERR_IO_MD_DISK;
4155 }
4156 if (skip_initial_sync) {
4157 drbd_send_uuids_skip_initial_sync(first_peer_device(device));
4158 _drbd_uuid_set(device, UI_BITMAP, 0);
4159 drbd_print_uuids(device, "cleared bitmap UUID");
4160 spin_lock_irq(&device->resource->req_lock);
4161 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4162 CS_VERBOSE, NULL);
4163 spin_unlock_irq(&device->resource->req_lock);
4164 }
4165 }
4166
4167 drbd_md_sync(device);
4168out_dec:
4169 put_ldev(device);
4170out:
4171 mutex_unlock(device->state_mutex);
4172 mutex_unlock(&adm_ctx.resource->adm_mutex);
4173out_nolock:
4174 drbd_adm_finish(&adm_ctx, info, retcode);
4175 return 0;
4176}
4177
4178static enum drbd_ret_code
4179drbd_check_resource_name(struct drbd_config_context *adm_ctx)
4180{
4181 const char *name = adm_ctx->resource_name;
4182 if (!name || !name[0]) {
4183 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
4184 return ERR_MANDATORY_TAG;
4185 }
4186 /* if we want to use these in sysfs/configfs/debugfs some day,
4187 * we must not allow slashes */
4188 if (strchr(name, '/')) {
4189 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
4190 return ERR_INVALID_REQUEST;
4191 }
4192 return NO_ERROR;
4193}
4194
4195static void resource_to_info(struct resource_info *info,
4196 struct drbd_resource *resource)
4197{
4198 info->res_role = conn_highest_role(first_connection(resource));
4199 info->res_susp = resource->susp;
4200 info->res_susp_nod = resource->susp_nod;
4201 info->res_susp_fen = resource->susp_fen;
4202}
4203
4204int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
4205{
4206 struct drbd_connection *connection;
4207 struct drbd_config_context adm_ctx;
4208 enum drbd_ret_code retcode;
4209 struct res_opts res_opts;
4210 int err;
4211
4212 retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
4213 if (!adm_ctx.reply_skb)
4214 return retcode;
4215 if (retcode != NO_ERROR)
4216 goto out;
4217
4218 set_res_opts_defaults(&res_opts);
4219 err = res_opts_from_attrs(&res_opts, info);
4220 if (err && err != -ENOMSG) {
4221 retcode = ERR_MANDATORY_TAG;
4222 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4223 goto out;
4224 }
4225
4226 retcode = drbd_check_resource_name(&adm_ctx);
4227 if (retcode != NO_ERROR)
4228 goto out;
4229
4230 if (adm_ctx.resource) {
4231 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
4232 retcode = ERR_INVALID_REQUEST;
4233 drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
4234 }
4235 /* else: still NO_ERROR */
4236 goto out;
4237 }
4238
4239 /* not yet safe for genl_family.parallel_ops */
4240 mutex_lock(&resources_mutex);
4241 connection = conn_create(adm_ctx.resource_name, &res_opts);
4242 mutex_unlock(&resources_mutex);
4243
4244 if (connection) {
4245 struct resource_info resource_info;
4246
4247 mutex_lock(¬ification_mutex);
4248 resource_to_info(&resource_info, connection->resource);
4249 notify_resource_state(NULL, 0, connection->resource,
4250 &resource_info, NOTIFY_CREATE);
4251 mutex_unlock(¬ification_mutex);
4252 } else
4253 retcode = ERR_NOMEM;
4254
4255out:
4256 drbd_adm_finish(&adm_ctx, info, retcode);
4257 return 0;
4258}
4259
4260static void device_to_info(struct device_info *info,
4261 struct drbd_device *device)
4262{
4263 info->dev_disk_state = device->state.disk;
4264}
4265
4266
4267int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
4268{
4269 struct drbd_config_context adm_ctx;
4270 struct drbd_genlmsghdr *dh = info->userhdr;
4271 enum drbd_ret_code retcode;
4272
4273 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4274 if (!adm_ctx.reply_skb)
4275 return retcode;
4276 if (retcode != NO_ERROR)
4277 goto out;
4278
4279 if (dh->minor > MINORMASK) {
4280 drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
4281 retcode = ERR_INVALID_REQUEST;
4282 goto out;
4283 }
4284 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
4285 drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
4286 retcode = ERR_INVALID_REQUEST;
4287 goto out;
4288 }
4289
4290 /* drbd_adm_prepare made sure already
4291 * that first_peer_device(device)->connection and device->vnr match the request. */
4292 if (adm_ctx.device) {
4293 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
4294 retcode = ERR_MINOR_OR_VOLUME_EXISTS;
4295 /* else: still NO_ERROR */
4296 goto out;
4297 }
4298
4299 mutex_lock(&adm_ctx.resource->adm_mutex);
4300 retcode = drbd_create_device(&adm_ctx, dh->minor);
4301 if (retcode == NO_ERROR) {
4302 struct drbd_device *device;
4303 struct drbd_peer_device *peer_device;
4304 struct device_info info;
4305 unsigned int peer_devices = 0;
4306 enum drbd_notification_type flags;
4307
4308 device = minor_to_device(dh->minor);
4309 for_each_peer_device(peer_device, device) {
4310 if (!has_net_conf(peer_device->connection))
4311 continue;
4312 peer_devices++;
4313 }
4314
4315 device_to_info(&info, device);
4316 mutex_lock(¬ification_mutex);
4317 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4318 notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
4319 for_each_peer_device(peer_device, device) {
4320 struct peer_device_info peer_device_info;
4321
4322 if (!has_net_conf(peer_device->connection))
4323 continue;
4324 peer_device_to_info(&peer_device_info, peer_device);
4325 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4326 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
4327 NOTIFY_CREATE | flags);
4328 }
4329 mutex_unlock(¬ification_mutex);
4330 }
4331 mutex_unlock(&adm_ctx.resource->adm_mutex);
4332out:
4333 drbd_adm_finish(&adm_ctx, info, retcode);
4334 return 0;
4335}
4336
4337static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
4338{
4339 struct drbd_peer_device *peer_device;
4340
4341 if (device->state.disk == D_DISKLESS &&
4342 /* no need to be device->state.conn == C_STANDALONE &&
4343 * we may want to delete a minor from a live replication group.
4344 */
4345 device->state.role == R_SECONDARY) {
4346 struct drbd_connection *connection =
4347 first_connection(device->resource);
4348
4349 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
4350 CS_VERBOSE + CS_WAIT_COMPLETE);
4351
4352 /* If the state engine hasn't stopped the sender thread yet, we
4353 * need to flush the sender work queue before generating the
4354 * DESTROY events here. */
4355 if (get_t_state(&connection->worker) == RUNNING)
4356 drbd_flush_workqueue(&connection->sender_work);
4357
4358 mutex_lock(¬ification_mutex);
4359 for_each_peer_device(peer_device, device) {
4360 if (!has_net_conf(peer_device->connection))
4361 continue;
4362 notify_peer_device_state(NULL, 0, peer_device, NULL,
4363 NOTIFY_DESTROY | NOTIFY_CONTINUES);
4364 }
4365 notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
4366 mutex_unlock(¬ification_mutex);
4367
4368 drbd_delete_device(device);
4369 return NO_ERROR;
4370 } else
4371 return ERR_MINOR_CONFIGURED;
4372}
4373
4374int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
4375{
4376 struct drbd_config_context adm_ctx;
4377 enum drbd_ret_code retcode;
4378
4379 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4380 if (!adm_ctx.reply_skb)
4381 return retcode;
4382 if (retcode != NO_ERROR)
4383 goto out;
4384
4385 mutex_lock(&adm_ctx.resource->adm_mutex);
4386 retcode = adm_del_minor(adm_ctx.device);
4387 mutex_unlock(&adm_ctx.resource->adm_mutex);
4388out:
4389 drbd_adm_finish(&adm_ctx, info, retcode);
4390 return 0;
4391}
4392
4393static int adm_del_resource(struct drbd_resource *resource)
4394{
4395 struct drbd_connection *connection;
4396
4397 for_each_connection(connection, resource) {
4398 if (connection->cstate > C_STANDALONE)
4399 return ERR_NET_CONFIGURED;
4400 }
4401 if (!idr_is_empty(&resource->devices))
4402 return ERR_RES_IN_USE;
4403
4404 /* The state engine has stopped the sender thread, so we don't
4405 * need to flush the sender work queue before generating the
4406 * DESTROY event here. */
4407 mutex_lock(¬ification_mutex);
4408 notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
4409 mutex_unlock(¬ification_mutex);
4410
4411 mutex_lock(&resources_mutex);
4412 list_del_rcu(&resource->resources);
4413 mutex_unlock(&resources_mutex);
4414 /* Make sure all threads have actually stopped: state handling only
4415 * does drbd_thread_stop_nowait(). */
4416 list_for_each_entry(connection, &resource->connections, connections)
4417 drbd_thread_stop(&connection->worker);
4418 synchronize_rcu();
4419 drbd_free_resource(resource);
4420 return NO_ERROR;
4421}
4422
4423int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
4424{
4425 struct drbd_config_context adm_ctx;
4426 struct drbd_resource *resource;
4427 struct drbd_connection *connection;
4428 struct drbd_device *device;
4429 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4430 unsigned i;
4431
4432 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4433 if (!adm_ctx.reply_skb)
4434 return retcode;
4435 if (retcode != NO_ERROR)
4436 goto finish;
4437
4438 resource = adm_ctx.resource;
4439 mutex_lock(&resource->adm_mutex);
4440 /* demote */
4441 for_each_connection(connection, resource) {
4442 struct drbd_peer_device *peer_device;
4443
4444 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
4445 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
4446 if (retcode < SS_SUCCESS) {
4447 drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
4448 goto out;
4449 }
4450 }
4451
4452 retcode = conn_try_disconnect(connection, 0);
4453 if (retcode < SS_SUCCESS) {
4454 drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
4455 goto out;
4456 }
4457 }
4458
4459 /* detach */
4460 idr_for_each_entry(&resource->devices, device, i) {
4461 retcode = adm_detach(device, 0);
4462 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
4463 drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
4464 goto out;
4465 }
4466 }
4467
4468 /* delete volumes */
4469 idr_for_each_entry(&resource->devices, device, i) {
4470 retcode = adm_del_minor(device);
4471 if (retcode != NO_ERROR) {
4472 /* "can not happen" */
4473 drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
4474 goto out;
4475 }
4476 }
4477
4478 retcode = adm_del_resource(resource);
4479out:
4480 mutex_unlock(&resource->adm_mutex);
4481finish:
4482 drbd_adm_finish(&adm_ctx, info, retcode);
4483 return 0;
4484}
4485
4486int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
4487{
4488 struct drbd_config_context adm_ctx;
4489 struct drbd_resource *resource;
4490 enum drbd_ret_code retcode;
4491
4492 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4493 if (!adm_ctx.reply_skb)
4494 return retcode;
4495 if (retcode != NO_ERROR)
4496 goto finish;
4497 resource = adm_ctx.resource;
4498
4499 mutex_lock(&resource->adm_mutex);
4500 retcode = adm_del_resource(resource);
4501 mutex_unlock(&resource->adm_mutex);
4502finish:
4503 drbd_adm_finish(&adm_ctx, info, retcode);
4504 return 0;
4505}
4506
4507void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
4508{
4509 struct sk_buff *msg;
4510 struct drbd_genlmsghdr *d_out;
4511 unsigned seq;
4512 int err = -ENOMEM;
4513
4514 seq = atomic_inc_return(&drbd_genl_seq);
4515 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4516 if (!msg)
4517 goto failed;
4518
4519 err = -EMSGSIZE;
4520 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
4521 if (!d_out) /* cannot happen, but anyways. */
4522 goto nla_put_failure;
4523 d_out->minor = device_to_minor(device);
4524 d_out->ret_code = NO_ERROR;
4525
4526 if (nla_put_status_info(msg, device, sib))
4527 goto nla_put_failure;
4528 genlmsg_end(msg, d_out);
4529 err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
4530 /* msg has been consumed or freed in netlink_broadcast() */
4531 if (err && err != -ESRCH)
4532 goto failed;
4533
4534 return;
4535
4536nla_put_failure:
4537 nlmsg_free(msg);
4538failed:
4539 drbd_err(device, "Error %d while broadcasting event. "
4540 "Event seq:%u sib_reason:%u\n",
4541 err, seq, sib->sib_reason);
4542}
4543
4544static int nla_put_notification_header(struct sk_buff *msg,
4545 enum drbd_notification_type type)
4546{
4547 struct drbd_notification_header nh = {
4548 .nh_type = type,
4549 };
4550
4551 return drbd_notification_header_to_skb(msg, &nh, true);
4552}
4553
4554int notify_resource_state(struct sk_buff *skb,
4555 unsigned int seq,
4556 struct drbd_resource *resource,
4557 struct resource_info *resource_info,
4558 enum drbd_notification_type type)
4559{
4560 struct resource_statistics resource_statistics;
4561 struct drbd_genlmsghdr *dh;
4562 bool multicast = false;
4563 int err;
4564
4565 if (!skb) {
4566 seq = atomic_inc_return(¬ify_genl_seq);
4567 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4568 err = -ENOMEM;
4569 if (!skb)
4570 goto failed;
4571 multicast = true;
4572 }
4573
4574 err = -EMSGSIZE;
4575 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
4576 if (!dh)
4577 goto nla_put_failure;
4578 dh->minor = -1U;
4579 dh->ret_code = NO_ERROR;
4580 if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
4581 nla_put_notification_header(skb, type) ||
4582 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4583 resource_info_to_skb(skb, resource_info, true)))
4584 goto nla_put_failure;
4585 resource_statistics.res_stat_write_ordering = resource->write_ordering;
4586 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
4587 if (err)
4588 goto nla_put_failure;
4589 genlmsg_end(skb, dh);
4590 if (multicast) {
4591 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4592 /* skb has been consumed or freed in netlink_broadcast() */
4593 if (err && err != -ESRCH)
4594 goto failed;
4595 }
4596 return 0;
4597
4598nla_put_failure:
4599 nlmsg_free(skb);
4600failed:
4601 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4602 err, seq);
4603 return err;
4604}
4605
4606int notify_device_state(struct sk_buff *skb,
4607 unsigned int seq,
4608 struct drbd_device *device,
4609 struct device_info *device_info,
4610 enum drbd_notification_type type)
4611{
4612 struct device_statistics device_statistics;
4613 struct drbd_genlmsghdr *dh;
4614 bool multicast = false;
4615 int err;
4616
4617 if (!skb) {
4618 seq = atomic_inc_return(¬ify_genl_seq);
4619 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4620 err = -ENOMEM;
4621 if (!skb)
4622 goto failed;
4623 multicast = true;
4624 }
4625
4626 err = -EMSGSIZE;
4627 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
4628 if (!dh)
4629 goto nla_put_failure;
4630 dh->minor = device->minor;
4631 dh->ret_code = NO_ERROR;
4632 if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
4633 nla_put_notification_header(skb, type) ||
4634 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4635 device_info_to_skb(skb, device_info, true)))
4636 goto nla_put_failure;
4637 device_to_statistics(&device_statistics, device);
4638 device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
4639 genlmsg_end(skb, dh);
4640 if (multicast) {
4641 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4642 /* skb has been consumed or freed in netlink_broadcast() */
4643 if (err && err != -ESRCH)
4644 goto failed;
4645 }
4646 return 0;
4647
4648nla_put_failure:
4649 nlmsg_free(skb);
4650failed:
4651 drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4652 err, seq);
4653 return err;
4654}
4655
4656int notify_connection_state(struct sk_buff *skb,
4657 unsigned int seq,
4658 struct drbd_connection *connection,
4659 struct connection_info *connection_info,
4660 enum drbd_notification_type type)
4661{
4662 struct connection_statistics connection_statistics;
4663 struct drbd_genlmsghdr *dh;
4664 bool multicast = false;
4665 int err;
4666
4667 if (!skb) {
4668 seq = atomic_inc_return(¬ify_genl_seq);
4669 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4670 err = -ENOMEM;
4671 if (!skb)
4672 goto failed;
4673 multicast = true;
4674 }
4675
4676 err = -EMSGSIZE;
4677 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4678 if (!dh)
4679 goto nla_put_failure;
4680 dh->minor = -1U;
4681 dh->ret_code = NO_ERROR;
4682 if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4683 nla_put_notification_header(skb, type) ||
4684 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4685 connection_info_to_skb(skb, connection_info, true)))
4686 goto nla_put_failure;
4687 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4688 connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4689 genlmsg_end(skb, dh);
4690 if (multicast) {
4691 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4692 /* skb has been consumed or freed in netlink_broadcast() */
4693 if (err && err != -ESRCH)
4694 goto failed;
4695 }
4696 return 0;
4697
4698nla_put_failure:
4699 nlmsg_free(skb);
4700failed:
4701 drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4702 err, seq);
4703 return err;
4704}
4705
4706int notify_peer_device_state(struct sk_buff *skb,
4707 unsigned int seq,
4708 struct drbd_peer_device *peer_device,
4709 struct peer_device_info *peer_device_info,
4710 enum drbd_notification_type type)
4711{
4712 struct peer_device_statistics peer_device_statistics;
4713 struct drbd_resource *resource = peer_device->device->resource;
4714 struct drbd_genlmsghdr *dh;
4715 bool multicast = false;
4716 int err;
4717
4718 if (!skb) {
4719 seq = atomic_inc_return(¬ify_genl_seq);
4720 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4721 err = -ENOMEM;
4722 if (!skb)
4723 goto failed;
4724 multicast = true;
4725 }
4726
4727 err = -EMSGSIZE;
4728 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4729 if (!dh)
4730 goto nla_put_failure;
4731 dh->minor = -1U;
4732 dh->ret_code = NO_ERROR;
4733 if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4734 nla_put_notification_header(skb, type) ||
4735 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4736 peer_device_info_to_skb(skb, peer_device_info, true)))
4737 goto nla_put_failure;
4738 peer_device_to_statistics(&peer_device_statistics, peer_device);
4739 peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4740 genlmsg_end(skb, dh);
4741 if (multicast) {
4742 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4743 /* skb has been consumed or freed in netlink_broadcast() */
4744 if (err && err != -ESRCH)
4745 goto failed;
4746 }
4747 return 0;
4748
4749nla_put_failure:
4750 nlmsg_free(skb);
4751failed:
4752 drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4753 err, seq);
4754 return err;
4755}
4756
4757void notify_helper(enum drbd_notification_type type,
4758 struct drbd_device *device, struct drbd_connection *connection,
4759 const char *name, int status)
4760{
4761 struct drbd_resource *resource = device ? device->resource : connection->resource;
4762 struct drbd_helper_info helper_info;
4763 unsigned int seq = atomic_inc_return(¬ify_genl_seq);
4764 struct sk_buff *skb = NULL;
4765 struct drbd_genlmsghdr *dh;
4766 int err;
4767
4768 strscpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4769 helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4770 helper_info.helper_status = status;
4771
4772 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4773 err = -ENOMEM;
4774 if (!skb)
4775 goto fail;
4776
4777 err = -EMSGSIZE;
4778 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4779 if (!dh)
4780 goto fail;
4781 dh->minor = device ? device->minor : -1;
4782 dh->ret_code = NO_ERROR;
4783 mutex_lock(¬ification_mutex);
4784 if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4785 nla_put_notification_header(skb, type) ||
4786 drbd_helper_info_to_skb(skb, &helper_info, true))
4787 goto unlock_fail;
4788 genlmsg_end(skb, dh);
4789 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4790 skb = NULL;
4791 /* skb has been consumed or freed in netlink_broadcast() */
4792 if (err && err != -ESRCH)
4793 goto unlock_fail;
4794 mutex_unlock(¬ification_mutex);
4795 return;
4796
4797unlock_fail:
4798 mutex_unlock(¬ification_mutex);
4799fail:
4800 nlmsg_free(skb);
4801 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4802 err, seq);
4803}
4804
4805static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4806{
4807 struct drbd_genlmsghdr *dh;
4808 int err;
4809
4810 err = -EMSGSIZE;
4811 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4812 if (!dh)
4813 goto nla_put_failure;
4814 dh->minor = -1U;
4815 dh->ret_code = NO_ERROR;
4816 if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4817 goto nla_put_failure;
4818 genlmsg_end(skb, dh);
4819 return 0;
4820
4821nla_put_failure:
4822 nlmsg_free(skb);
4823 pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4824 return err;
4825}
4826
4827static void free_state_changes(struct list_head *list)
4828{
4829 while (!list_empty(list)) {
4830 struct drbd_state_change *state_change =
4831 list_first_entry(list, struct drbd_state_change, list);
4832 list_del(&state_change->list);
4833 forget_state_change(state_change);
4834 }
4835}
4836
4837static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4838{
4839 return 1 +
4840 state_change->n_connections +
4841 state_change->n_devices +
4842 state_change->n_devices * state_change->n_connections;
4843}
4844
4845static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4846{
4847 struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4848 unsigned int seq = cb->args[2];
4849 unsigned int n;
4850 enum drbd_notification_type flags = 0;
4851 int err = 0;
4852
4853 /* There is no need for taking notification_mutex here: it doesn't
4854 matter if the initial state events mix with later state chage
4855 events; we can always tell the events apart by the NOTIFY_EXISTS
4856 flag. */
4857
4858 cb->args[5]--;
4859 if (cb->args[5] == 1) {
4860 err = notify_initial_state_done(skb, seq);
4861 goto out;
4862 }
4863 n = cb->args[4]++;
4864 if (cb->args[4] < cb->args[3])
4865 flags |= NOTIFY_CONTINUES;
4866 if (n < 1) {
4867 err = notify_resource_state_change(skb, seq, state_change->resource,
4868 NOTIFY_EXISTS | flags);
4869 goto next;
4870 }
4871 n--;
4872 if (n < state_change->n_connections) {
4873 err = notify_connection_state_change(skb, seq, &state_change->connections[n],
4874 NOTIFY_EXISTS | flags);
4875 goto next;
4876 }
4877 n -= state_change->n_connections;
4878 if (n < state_change->n_devices) {
4879 err = notify_device_state_change(skb, seq, &state_change->devices[n],
4880 NOTIFY_EXISTS | flags);
4881 goto next;
4882 }
4883 n -= state_change->n_devices;
4884 if (n < state_change->n_devices * state_change->n_connections) {
4885 err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4886 NOTIFY_EXISTS | flags);
4887 goto next;
4888 }
4889
4890next:
4891 if (cb->args[4] == cb->args[3]) {
4892 struct drbd_state_change *next_state_change =
4893 list_entry(state_change->list.next,
4894 struct drbd_state_change, list);
4895 cb->args[0] = (long)next_state_change;
4896 cb->args[3] = notifications_for_state_change(next_state_change);
4897 cb->args[4] = 0;
4898 }
4899out:
4900 if (err)
4901 return err;
4902 else
4903 return skb->len;
4904}
4905
4906int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4907{
4908 struct drbd_resource *resource;
4909 LIST_HEAD(head);
4910
4911 if (cb->args[5] >= 1) {
4912 if (cb->args[5] > 1)
4913 return get_initial_state(skb, cb);
4914 if (cb->args[0]) {
4915 struct drbd_state_change *state_change =
4916 (struct drbd_state_change *)cb->args[0];
4917
4918 /* connect list to head */
4919 list_add(&head, &state_change->list);
4920 free_state_changes(&head);
4921 }
4922 return 0;
4923 }
4924
4925 cb->args[5] = 2; /* number of iterations */
4926 mutex_lock(&resources_mutex);
4927 for_each_resource(resource, &drbd_resources) {
4928 struct drbd_state_change *state_change;
4929
4930 state_change = remember_old_state(resource, GFP_KERNEL);
4931 if (!state_change) {
4932 if (!list_empty(&head))
4933 free_state_changes(&head);
4934 mutex_unlock(&resources_mutex);
4935 return -ENOMEM;
4936 }
4937 copy_old_to_new_state_change(state_change);
4938 list_add_tail(&state_change->list, &head);
4939 cb->args[5] += notifications_for_state_change(state_change);
4940 }
4941 mutex_unlock(&resources_mutex);
4942
4943 if (!list_empty(&head)) {
4944 struct drbd_state_change *state_change =
4945 list_entry(head.next, struct drbd_state_change, list);
4946 cb->args[0] = (long)state_change;
4947 cb->args[3] = notifications_for_state_change(state_change);
4948 list_del(&head); /* detach list from head */
4949 }
4950
4951 cb->args[2] = cb->nlh->nlmsg_seq;
4952 return get_initial_state(skb, cb);
4953}