1 // SPDX-License-Identifier: GPL-2.0-or-later
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/drbd.h>
20 #include <linux/file.h>
21 #include <linux/slab.h>
22 #include <linux/blkpg.h>
23 #include <linux/cpumask.h>
25 #include "drbd_protocol.h"
27 #include "drbd_state_change.h"
28 #include <asm/unaligned.h>
29 #include <linux/drbd_limits.h>
30 #include <linux/kthread.h>
32 #include <net/genetlink.h>
35 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
36 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
38 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
39 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
41 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
42 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
43 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
45 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
46 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
49 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
53 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
67 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
68 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
69 int drbd_adm_dump_devices_done(struct netlink_callback *cb);
70 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
71 int drbd_adm_dump_connections_done(struct netlink_callback *cb);
72 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
73 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
74 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
76 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
80 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
81 static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
83 DEFINE_MUTEX(notification_mutex);
85 /* used blkdev_get_by_path, to claim our meta data device(s) */
86 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
88 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
90 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
91 if (genlmsg_reply(skb, info))
92 pr_err("error sending genl reply\n");
95 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
96 * reason it could fail was no space in skb, and there are 4k available. */
97 static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
102 if (!info || !info[0])
105 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
109 err = nla_put_string(skb, T_info_text, info);
111 nla_nest_cancel(skb, nla);
114 nla_nest_end(skb, nla);
119 static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
122 struct nlattr *nla, *txt;
126 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
130 txt = nla_reserve(skb, T_info_text, 256);
132 nla_nest_cancel(skb, nla);
136 len = vscnprintf(nla_data(txt), 256, fmt, args);
139 /* maybe: retry with larger reserve, if truncated */
140 txt->nla_len = nla_attr_size(len+1);
141 nlmsg_trim(skb, (char*)txt + NLA_ALIGN(txt->nla_len));
142 nla_nest_end(skb, nla);
147 /* This would be a good candidate for a "pre_doit" hook,
148 * and per-family private info->pointers.
149 * But we need to stay compatible with older kernels.
150 * If it returns successfully, adm_ctx members are valid.
152 * At this point, we still rely on the global genl_lock().
153 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
154 * to add additional synchronization against object destruction/modification.
156 #define DRBD_ADM_NEED_MINOR 1
157 #define DRBD_ADM_NEED_RESOURCE 2
158 #define DRBD_ADM_NEED_CONNECTION 4
159 static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
160 struct sk_buff *skb, struct genl_info *info, unsigned flags)
162 struct drbd_genlmsghdr *d_in = info->userhdr;
163 const u8 cmd = info->genlhdr->cmd;
166 memset(adm_ctx, 0, sizeof(*adm_ctx));
168 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
169 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
172 adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
173 if (!adm_ctx->reply_skb) {
178 adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
179 info, &drbd_genl_family, 0, cmd);
180 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
182 if (!adm_ctx->reply_dh) {
187 adm_ctx->reply_dh->minor = d_in->minor;
188 adm_ctx->reply_dh->ret_code = NO_ERROR;
190 adm_ctx->volume = VOLUME_UNSPECIFIED;
191 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
193 /* parse and validate only */
194 err = drbd_cfg_context_from_attrs(NULL, info);
198 /* It was present, and valid,
199 * copy it over to the reply skb. */
200 err = nla_put_nohdr(adm_ctx->reply_skb,
201 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
202 info->attrs[DRBD_NLA_CFG_CONTEXT]);
206 /* and assign stuff to the adm_ctx */
207 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
209 adm_ctx->volume = nla_get_u32(nla);
210 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
212 adm_ctx->resource_name = nla_data(nla);
213 adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
214 adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
215 if ((adm_ctx->my_addr &&
216 nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
217 (adm_ctx->peer_addr &&
218 nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
224 adm_ctx->minor = d_in->minor;
225 adm_ctx->device = minor_to_device(d_in->minor);
227 /* We are protected by the global genl_lock().
228 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
229 * so make sure this object stays around. */
231 kref_get(&adm_ctx->device->kref);
233 if (adm_ctx->resource_name) {
234 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
237 if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
238 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
239 return ERR_MINOR_INVALID;
241 if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
242 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
243 if (adm_ctx->resource_name)
244 return ERR_RES_NOT_KNOWN;
245 return ERR_INVALID_REQUEST;
248 if (flags & DRBD_ADM_NEED_CONNECTION) {
249 if (adm_ctx->resource) {
250 drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
251 return ERR_INVALID_REQUEST;
253 if (adm_ctx->device) {
254 drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
255 return ERR_INVALID_REQUEST;
257 if (adm_ctx->my_addr && adm_ctx->peer_addr)
258 adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
259 nla_len(adm_ctx->my_addr),
260 nla_data(adm_ctx->peer_addr),
261 nla_len(adm_ctx->peer_addr));
262 if (!adm_ctx->connection) {
263 drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
264 return ERR_INVALID_REQUEST;
268 /* some more paranoia, if the request was over-determined */
269 if (adm_ctx->device && adm_ctx->resource &&
270 adm_ctx->device->resource != adm_ctx->resource) {
271 pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
272 adm_ctx->minor, adm_ctx->resource->name,
273 adm_ctx->device->resource->name);
274 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
275 return ERR_INVALID_REQUEST;
277 if (adm_ctx->device &&
278 adm_ctx->volume != VOLUME_UNSPECIFIED &&
279 adm_ctx->volume != adm_ctx->device->vnr) {
280 pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
281 adm_ctx->minor, adm_ctx->volume,
282 adm_ctx->device->vnr, adm_ctx->device->resource->name);
283 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
284 return ERR_INVALID_REQUEST;
287 /* still, provide adm_ctx->resource always, if possible. */
288 if (!adm_ctx->resource) {
289 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
290 : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
291 if (adm_ctx->resource)
292 kref_get(&adm_ctx->resource->kref);
298 nlmsg_free(adm_ctx->reply_skb);
299 adm_ctx->reply_skb = NULL;
303 static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
304 struct genl_info *info, int retcode)
306 if (adm_ctx->device) {
307 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
308 adm_ctx->device = NULL;
310 if (adm_ctx->connection) {
311 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
312 adm_ctx->connection = NULL;
314 if (adm_ctx->resource) {
315 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
316 adm_ctx->resource = NULL;
319 if (!adm_ctx->reply_skb)
322 adm_ctx->reply_dh->ret_code = retcode;
323 drbd_adm_send_reply(adm_ctx->reply_skb, info);
327 static void setup_khelper_env(struct drbd_connection *connection, char **envp)
331 /* FIXME: A future version will not allow this case. */
332 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
335 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
338 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
339 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
343 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
344 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
348 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
349 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
351 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
354 int drbd_khelper(struct drbd_device *device, char *cmd)
356 char *envp[] = { "HOME=/",
358 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
359 (char[20]) { }, /* address family */
360 (char[60]) { }, /* address */
363 char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
364 struct drbd_connection *connection = first_peer_device(device)->connection;
368 if (current == connection->worker.task)
369 set_bit(CALLBACK_PENDING, &connection->flags);
371 snprintf(mb, 14, "minor-%d", device_to_minor(device));
372 setup_khelper_env(connection, envp);
374 /* The helper may take some time.
375 * write out any unsynced meta data changes now */
376 drbd_md_sync(device);
378 drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
379 sib.sib_reason = SIB_HELPER_PRE;
380 sib.helper_name = cmd;
381 drbd_bcast_event(device, &sib);
382 notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
383 ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
385 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
386 drbd_usermode_helper, cmd, mb,
387 (ret >> 8) & 0xff, ret);
389 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
390 drbd_usermode_helper, cmd, mb,
391 (ret >> 8) & 0xff, ret);
392 sib.sib_reason = SIB_HELPER_POST;
393 sib.helper_exit_code = ret;
394 drbd_bcast_event(device, &sib);
395 notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
397 if (current == connection->worker.task)
398 clear_bit(CALLBACK_PENDING, &connection->flags);
400 if (ret < 0) /* Ignore any ERRNOs we got. */
406 enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
408 char *envp[] = { "HOME=/",
410 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
411 (char[20]) { }, /* address family */
412 (char[60]) { }, /* address */
414 char *resource_name = connection->resource->name;
415 char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
418 setup_khelper_env(connection, envp);
419 conn_md_sync(connection);
421 drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
422 /* TODO: conn_bcast_event() ?? */
423 notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
425 ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
427 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
428 drbd_usermode_helper, cmd, resource_name,
429 (ret >> 8) & 0xff, ret);
431 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
432 drbd_usermode_helper, cmd, resource_name,
433 (ret >> 8) & 0xff, ret);
434 /* TODO: conn_bcast_event() ?? */
435 notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
437 if (ret < 0) /* Ignore any ERRNOs we got. */
443 static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
445 enum drbd_fencing_p fp = FP_NOT_AVAIL;
446 struct drbd_peer_device *peer_device;
450 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
451 struct drbd_device *device = peer_device->device;
452 if (get_ldev_if_state(device, D_CONSISTENT)) {
453 struct disk_conf *disk_conf =
454 rcu_dereference(peer_device->device->ldev->disk_conf);
455 fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
464 static bool resource_is_supended(struct drbd_resource *resource)
466 return resource->susp || resource->susp_fen || resource->susp_nod;
469 bool conn_try_outdate_peer(struct drbd_connection *connection)
471 struct drbd_resource * const resource = connection->resource;
472 unsigned int connect_cnt;
473 union drbd_state mask = { };
474 union drbd_state val = { };
475 enum drbd_fencing_p fp;
479 spin_lock_irq(&resource->req_lock);
480 if (connection->cstate >= C_WF_REPORT_PARAMS) {
481 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
482 spin_unlock_irq(&resource->req_lock);
486 connect_cnt = connection->connect_cnt;
487 spin_unlock_irq(&resource->req_lock);
489 fp = highest_fencing_policy(connection);
492 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
493 spin_lock_irq(&resource->req_lock);
494 if (connection->cstate < C_WF_REPORT_PARAMS) {
495 _conn_request_state(connection,
496 (union drbd_state) { { .susp_fen = 1 } },
497 (union drbd_state) { { .susp_fen = 0 } },
498 CS_VERBOSE | CS_HARD | CS_DC_SUSP);
499 /* We are no longer suspended due to the fencing policy.
500 * We may still be suspended due to the on-no-data-accessible policy.
501 * If that was OND_IO_ERROR, fail pending requests. */
502 if (!resource_is_supended(resource))
503 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
505 /* Else: in case we raced with a connection handshake,
506 * let the handshake figure out if we maybe can RESEND,
507 * and do not resume/fail pending requests here.
508 * Worst case is we stay suspended for now, which may be
509 * resolved by either re-establishing the replication link, or
510 * the next link failure, or eventually the administrator. */
511 spin_unlock_irq(&resource->req_lock);
519 r = conn_khelper(connection, "fence-peer");
521 switch ((r>>8) & 0xff) {
522 case P_INCONSISTENT: /* peer is inconsistent */
523 ex_to_string = "peer is inconsistent or worse";
525 val.pdsk = D_INCONSISTENT;
527 case P_OUTDATED: /* peer got outdated, or was already outdated */
528 ex_to_string = "peer was fenced";
530 val.pdsk = D_OUTDATED;
532 case P_DOWN: /* peer was down */
533 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
534 /* we will(have) create(d) a new UUID anyways... */
535 ex_to_string = "peer is unreachable, assumed to be dead";
537 val.pdsk = D_OUTDATED;
539 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
542 case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
543 * This is useful when an unconnected R_SECONDARY is asked to
544 * become R_PRIMARY, but finds the other peer being active. */
545 ex_to_string = "peer is active";
546 drbd_warn(connection, "Peer is primary, outdating myself.\n");
548 val.disk = D_OUTDATED;
551 /* THINK: do we need to handle this
552 * like case 4, or more like case 5? */
553 if (fp != FP_STONITH)
554 drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
555 ex_to_string = "peer was stonithed";
557 val.pdsk = D_OUTDATED;
560 /* The script is broken ... */
561 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
562 return false; /* Eventually leave IO frozen */
565 drbd_info(connection, "fence-peer helper returned %d (%s)\n",
566 (r>>8) & 0xff, ex_to_string);
569 conn_request_state(connection, mask, val, CS_VERBOSE);
570 here, because we might were able to re-establish the connection in the
572 spin_lock_irq(&resource->req_lock);
573 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
574 if (connection->connect_cnt != connect_cnt)
575 /* In case the connection was established and droped
576 while the fence-peer handler was running, ignore it */
577 drbd_info(connection, "Ignoring fence-peer exit code\n");
579 _conn_request_state(connection, mask, val, CS_VERBOSE);
581 spin_unlock_irq(&resource->req_lock);
583 return conn_highest_pdsk(connection) <= D_OUTDATED;
586 static int _try_outdate_peer_async(void *data)
588 struct drbd_connection *connection = (struct drbd_connection *)data;
590 conn_try_outdate_peer(connection);
592 kref_put(&connection->kref, drbd_destroy_connection);
596 void conn_try_outdate_peer_async(struct drbd_connection *connection)
598 struct task_struct *opa;
600 kref_get(&connection->kref);
601 /* We may have just sent a signal to this thread
602 * to get it out of some blocking network function.
603 * Clear signals; otherwise kthread_run(), which internally uses
604 * wait_on_completion_killable(), will mistake our pending signal
605 * for a new fatal signal and fail. */
606 flush_signals(current);
607 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
609 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
610 kref_put(&connection->kref, drbd_destroy_connection);
615 drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
617 struct drbd_peer_device *const peer_device = first_peer_device(device);
618 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
619 const int max_tries = 4;
620 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
624 union drbd_state mask, val;
626 if (new_role == R_PRIMARY) {
627 struct drbd_connection *connection;
629 /* Detect dead peers as soon as possible. */
632 for_each_connection(connection, device->resource)
633 request_ping(connection);
637 mutex_lock(device->state_mutex);
639 mask.i = 0; mask.role = R_MASK;
640 val.i = 0; val.role = new_role;
642 while (try++ < max_tries) {
643 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
645 /* in case we first succeeded to outdate,
646 * but now suddenly could establish a connection */
647 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
653 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
654 (device->state.disk < D_UP_TO_DATE &&
655 device->state.disk >= D_INCONSISTENT)) {
657 val.disk = D_UP_TO_DATE;
662 if (rv == SS_NO_UP_TO_DATE_DISK &&
663 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
664 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
666 if (conn_try_outdate_peer(connection)) {
667 val.disk = D_UP_TO_DATE;
673 if (rv == SS_NOTHING_TO_DO)
675 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
676 if (!conn_try_outdate_peer(connection) && force) {
677 drbd_warn(device, "Forced into split brain situation!\n");
679 val.pdsk = D_OUTDATED;
684 if (rv == SS_TWO_PRIMARIES) {
685 /* Maybe the peer is detected as dead very soon...
686 retry at most once more in this case. */
687 if (try < max_tries) {
691 nc = rcu_dereference(connection->net_conf);
692 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
694 schedule_timeout_interruptible(timeo);
698 if (rv < SS_SUCCESS) {
699 rv = _drbd_request_state(device, mask, val,
700 CS_VERBOSE + CS_WAIT_COMPLETE);
711 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
713 /* Wait until nothing is on the fly :) */
714 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
716 /* FIXME also wait for all pending P_BARRIER_ACK? */
718 if (new_role == R_SECONDARY) {
719 if (get_ldev(device)) {
720 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
724 mutex_lock(&device->resource->conf_update);
725 nc = connection->net_conf;
727 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
728 mutex_unlock(&device->resource->conf_update);
730 if (get_ldev(device)) {
731 if (((device->state.conn < C_CONNECTED ||
732 device->state.pdsk <= D_FAILED)
733 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
734 drbd_uuid_new_current(device);
736 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
741 /* writeout of activity log covered areas of the bitmap
742 * to stable storage done in after state change already */
744 if (device->state.conn >= C_WF_REPORT_PARAMS) {
745 /* if this was forced, we should consider sync */
747 drbd_send_uuids(peer_device);
748 drbd_send_current_state(peer_device);
751 drbd_md_sync(device);
752 set_disk_ro(device->vdisk, new_role == R_SECONDARY);
753 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
755 mutex_unlock(device->state_mutex);
759 static const char *from_attrs_err_to_txt(int err)
761 return err == -ENOMSG ? "required attribute missing" :
762 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
763 err == -EEXIST ? "can not change invariant setting" :
764 "invalid attribute value";
767 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
769 struct drbd_config_context adm_ctx;
770 struct set_role_parms parms;
772 enum drbd_ret_code retcode;
774 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
775 if (!adm_ctx.reply_skb)
777 if (retcode != NO_ERROR)
780 memset(&parms, 0, sizeof(parms));
781 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
782 err = set_role_parms_from_attrs(&parms, info);
784 retcode = ERR_MANDATORY_TAG;
785 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
790 mutex_lock(&adm_ctx.resource->adm_mutex);
792 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
793 retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device,
794 R_PRIMARY, parms.assume_uptodate);
796 retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device,
799 mutex_unlock(&adm_ctx.resource->adm_mutex);
802 drbd_adm_finish(&adm_ctx, info, retcode);
806 /* Initializes the md.*_offset members, so we are able to find
807 * the on disk meta data.
809 * We currently have two possible layouts:
811 * |----------- md_size_sect ------------------|
812 * [ 4k superblock ][ activity log ][ Bitmap ]
814 * | bm_offset = al_offset + X |
815 * ==> bitmap sectors = md_size_sect - bm_offset
818 * |----------- md_size_sect ------------------|
819 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
821 * | bm_offset = al_offset - Y |
822 * ==> bitmap sectors = Y = al_offset - bm_offset
824 * Activity log size used to be fixed 32kB,
825 * but is about to become configurable.
827 static void drbd_md_set_sector_offsets(struct drbd_device *device,
828 struct drbd_backing_dev *bdev)
830 sector_t md_size_sect = 0;
831 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
833 bdev->md.md_offset = drbd_md_ss(bdev);
835 switch (bdev->md.meta_dev_idx) {
837 /* v07 style fixed size indexed meta data */
838 bdev->md.md_size_sect = MD_128MB_SECT;
839 bdev->md.al_offset = MD_4kB_SECT;
840 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
842 case DRBD_MD_INDEX_FLEX_EXT:
843 /* just occupy the full device; unit: sectors */
844 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
845 bdev->md.al_offset = MD_4kB_SECT;
846 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
848 case DRBD_MD_INDEX_INTERNAL:
849 case DRBD_MD_INDEX_FLEX_INT:
850 /* al size is still fixed */
851 bdev->md.al_offset = -al_size_sect;
852 /* we need (slightly less than) ~ this much bitmap sectors: */
853 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
854 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
855 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
856 md_size_sect = ALIGN(md_size_sect, 8);
858 /* plus the "drbd meta data super block",
859 * and the activity log; */
860 md_size_sect += MD_4kB_SECT + al_size_sect;
862 bdev->md.md_size_sect = md_size_sect;
863 /* bitmap offset is adjusted by 'super' block size */
864 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
869 /* input size is expected to be in KB */
870 char *ppsize(char *buf, unsigned long long size)
872 /* Needs 9 bytes at max including trailing NUL:
873 * -1ULL ==> "16384 EB" */
874 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
876 while (size >= 10000 && base < sizeof(units)-1) {
878 size = (size >> 10) + !!(size & (1<<9));
881 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
886 /* there is still a theoretical deadlock when called from receiver
887 * on an D_INCONSISTENT R_PRIMARY:
888 * remote READ does inc_ap_bio, receiver would need to receive answer
889 * packet from remote to dec_ap_bio again.
890 * receiver receive_sizes(), comes here,
891 * waits for ap_bio_cnt == 0. -> deadlock.
892 * but this cannot happen, actually, because:
893 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
894 * (not connected, or bad/no disk on peer):
895 * see drbd_fail_request_early, ap_bio_cnt is zero.
896 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
897 * peer may not initiate a resize.
899 /* Note these are not to be confused with
900 * drbd_adm_suspend_io/drbd_adm_resume_io,
901 * which are (sub) state changes triggered by admin (drbdsetup),
902 * and can be long lived.
903 * This changes an device->flag, is triggered by drbd internals,
904 * and should be short-lived. */
905 /* It needs to be a counter, since multiple threads might
906 independently suspend and resume IO. */
907 void drbd_suspend_io(struct drbd_device *device)
909 atomic_inc(&device->suspend_cnt);
910 if (drbd_suspended(device))
912 wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
915 void drbd_resume_io(struct drbd_device *device)
917 if (atomic_dec_and_test(&device->suspend_cnt))
918 wake_up(&device->misc_wait);
922 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
923 * @device: DRBD device.
925 * Returns 0 on success, negative return values indicate errors.
926 * You should call drbd_md_sync() after calling this function.
928 enum determine_dev_size
929 drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
931 struct md_offsets_and_sizes {
932 u64 last_agreed_sect;
939 u32 al_stripe_size_4k;
941 sector_t u_size, size;
942 struct drbd_md *md = &device->ldev->md;
945 int md_moved, la_size_changed;
946 enum determine_dev_size rv = DS_UNCHANGED;
948 /* We may change the on-disk offsets of our meta data below. Lock out
949 * anything that may cause meta data IO, to avoid acting on incomplete
950 * layout changes or scribbling over meta data that is in the process
953 * Move is not exactly correct, btw, currently we have all our meta
954 * data in core memory, to "move" it we just write it all out, there
956 drbd_suspend_io(device);
957 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
959 drbd_resume_io(device);
963 /* remember current offset and sizes */
964 prev.last_agreed_sect = md->la_size_sect;
965 prev.md_offset = md->md_offset;
966 prev.al_offset = md->al_offset;
967 prev.bm_offset = md->bm_offset;
968 prev.md_size_sect = md->md_size_sect;
969 prev.al_stripes = md->al_stripes;
970 prev.al_stripe_size_4k = md->al_stripe_size_4k;
973 /* rs is non NULL if we should change the AL layout only */
974 md->al_stripes = rs->al_stripes;
975 md->al_stripe_size_4k = rs->al_stripe_size / 4;
976 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
979 drbd_md_set_sector_offsets(device, device->ldev);
982 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
984 size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
986 if (size < prev.last_agreed_sect) {
987 if (rs && u_size == 0) {
988 /* Remove "rs &&" later. This check should always be active, but
989 right now the receiver expects the permissive behavior */
990 drbd_warn(device, "Implicit shrink not allowed. "
991 "Use --size=%llus for explicit shrink.\n",
992 (unsigned long long)size);
993 rv = DS_ERROR_SHRINK;
996 rv = DS_ERROR_SPACE_MD;
997 if (rv != DS_UNCHANGED)
1001 if (get_capacity(device->vdisk) != size ||
1002 drbd_bm_capacity(device) != size) {
1004 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
1005 if (unlikely(err)) {
1006 /* currently there is only one error: ENOMEM! */
1007 size = drbd_bm_capacity(device);
1009 drbd_err(device, "OUT OF MEMORY! "
1010 "Could not allocate bitmap!\n");
1012 drbd_err(device, "BM resizing failed. "
1013 "Leaving size unchanged\n");
1017 /* racy, see comments above. */
1018 drbd_set_my_capacity(device, size);
1019 md->la_size_sect = size;
1024 la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
1026 md_moved = prev.md_offset != md->md_offset
1027 || prev.md_size_sect != md->md_size_sect;
1029 if (la_size_changed || md_moved || rs) {
1032 /* We do some synchronous IO below, which may take some time.
1033 * Clear the timer, to avoid scary "timer expired!" messages,
1034 * "Superblock" is written out at least twice below, anyways. */
1035 del_timer(&device->md_sync_timer);
1037 /* We won't change the "al-extents" setting, we just may need
1038 * to move the on-disk location of the activity log ringbuffer.
1039 * Lock for transaction is good enough, it may well be "dirty"
1040 * or even "starving". */
1041 wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
1043 /* mark current on-disk bitmap and activity log as unreliable */
1044 prev_flags = md->flags;
1045 md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
1046 drbd_md_write(device, buffer);
1048 drbd_al_initialize(device, buffer);
1050 drbd_info(device, "Writing the whole bitmap, %s\n",
1051 la_size_changed && md_moved ? "size changed and md moved" :
1052 la_size_changed ? "size changed" : "md moved");
1053 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1054 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
1055 "size changed", BM_LOCKED_MASK);
1057 /* on-disk bitmap and activity log is authoritative again
1058 * (unless there was an IO error meanwhile...) */
1059 md->flags = prev_flags;
1060 drbd_md_write(device, buffer);
1063 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1064 md->al_stripes, md->al_stripe_size_4k * 4);
1067 if (size > prev.last_agreed_sect)
1068 rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1069 if (size < prev.last_agreed_sect)
1074 /* restore previous offset and sizes */
1075 md->la_size_sect = prev.last_agreed_sect;
1076 md->md_offset = prev.md_offset;
1077 md->al_offset = prev.al_offset;
1078 md->bm_offset = prev.bm_offset;
1079 md->md_size_sect = prev.md_size_sect;
1080 md->al_stripes = prev.al_stripes;
1081 md->al_stripe_size_4k = prev.al_stripe_size_4k;
1082 md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
1084 lc_unlock(device->act_log);
1085 wake_up(&device->al_wait);
1086 drbd_md_put_buffer(device);
1087 drbd_resume_io(device);
1093 drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1094 sector_t u_size, int assume_peer_has_space)
1096 sector_t p_size = device->p_size; /* partner's disk size. */
1097 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1098 sector_t m_size; /* my size */
1101 m_size = drbd_get_max_capacity(bdev);
1103 if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1104 drbd_warn(device, "Resize while not connected was forced by the user!\n");
1108 if (p_size && m_size) {
1109 size = min_t(sector_t, p_size, m_size);
1112 size = la_size_sect;
1113 if (m_size && m_size < size)
1115 if (p_size && p_size < size)
1126 drbd_err(device, "Both nodes diskless!\n");
1130 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1131 (unsigned long)u_size>>1, (unsigned long)size>>1);
1140 * drbd_check_al_size() - Ensures that the AL is of the right size
1141 * @device: DRBD device.
1143 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1144 * failed, and 0 on success. You should call drbd_md_sync() after you called
1147 static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1149 struct lru_cache *n, *t;
1150 struct lc_element *e;
1151 unsigned int in_use;
1154 if (device->act_log &&
1155 device->act_log->nr_elements == dc->al_extents)
1159 t = device->act_log;
1160 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1161 dc->al_extents, sizeof(struct lc_element), 0);
1164 drbd_err(device, "Cannot allocate act_log lru!\n");
1167 spin_lock_irq(&device->al_lock);
1169 for (i = 0; i < t->nr_elements; i++) {
1170 e = lc_element_by_index(t, i);
1172 drbd_err(device, "refcnt(%d)==%d\n",
1173 e->lc_number, e->refcnt);
1174 in_use += e->refcnt;
1178 device->act_log = n;
1179 spin_unlock_irq(&device->al_lock);
1181 drbd_err(device, "Activity log still in use!\n");
1187 drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1191 static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
1193 q->limits.discard_granularity = granularity;
1196 static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
1198 /* when we introduced REQ_WRITE_SAME support, we also bumped
1199 * our maximum supported batch bio size used for discards. */
1200 if (connection->agreed_features & DRBD_FF_WSAME)
1201 return DRBD_MAX_BBIO_SECTORS;
1202 /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1203 return AL_EXTENT_SIZE >> 9;
1206 static void decide_on_discard_support(struct drbd_device *device,
1207 struct request_queue *q,
1208 struct request_queue *b,
1209 bool discard_zeroes_if_aligned)
1211 /* q = drbd device queue (device->rq_queue)
1212 * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
1213 * or NULL if diskless
1215 struct drbd_connection *connection = first_peer_device(device)->connection;
1216 bool can_do = b ? blk_queue_discard(b) : true;
1218 if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
1220 drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
1223 /* We don't care for the granularity, really.
1224 * Stacking limits below should fix it for the local
1225 * device. Whether or not it is a suitable granularity
1226 * on the remote device is not our problem, really. If
1227 * you care, you need to use devices with similar
1228 * topology on all peers. */
1229 blk_queue_discard_granularity(q, 512);
1230 q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
1231 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1232 q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
1234 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1235 blk_queue_discard_granularity(q, 0);
1236 q->limits.max_discard_sectors = 0;
1237 q->limits.max_write_zeroes_sectors = 0;
1241 static void fixup_discard_if_not_supported(struct request_queue *q)
1243 /* To avoid confusion, if this queue does not support discard, clear
1244 * max_discard_sectors, which is what lsblk -D reports to the user.
1245 * Older kernels got this wrong in "stack limits".
1247 if (!blk_queue_discard(q)) {
1248 blk_queue_max_discard_sectors(q, 0);
1249 blk_queue_discard_granularity(q, 0);
1253 static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
1255 /* Fixup max_write_zeroes_sectors after blk_stack_limits():
1256 * if we can handle "zeroes" efficiently on the protocol,
1257 * we want to do that, even if our backend does not announce
1258 * max_write_zeroes_sectors itself. */
1259 struct drbd_connection *connection = first_peer_device(device)->connection;
1260 /* If the peer announces WZEROES support, use it. Otherwise, rather
1261 * send explicit zeroes than rely on some discard-zeroes-data magic. */
1262 if (connection->agreed_features & DRBD_FF_WZEROES)
1263 q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
1265 q->limits.max_write_zeroes_sectors = 0;
1268 static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1269 unsigned int max_bio_size, struct o_qlim *o)
1271 struct request_queue * const q = device->rq_queue;
1272 unsigned int max_hw_sectors = max_bio_size >> 9;
1273 unsigned int max_segments = 0;
1274 struct request_queue *b = NULL;
1275 struct disk_conf *dc;
1276 bool discard_zeroes_if_aligned = true;
1279 b = bdev->backing_bdev->bd_disk->queue;
1281 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1283 dc = rcu_dereference(device->ldev->disk_conf);
1284 max_segments = dc->max_bio_bvecs;
1285 discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
1288 blk_set_stacking_limits(&q->limits);
1291 blk_queue_max_hw_sectors(q, max_hw_sectors);
1292 /* This is the workaround for "bio would need to, but cannot, be split" */
1293 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1294 blk_queue_segment_boundary(q, PAGE_SIZE-1);
1295 decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
1298 blk_stack_limits(&q->limits, &b->limits, 0);
1299 disk_update_readahead(device->vdisk);
1301 fixup_discard_if_not_supported(q);
1302 fixup_write_zeroes(device, q);
1305 void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
1307 unsigned int now, new, local, peer;
1309 now = queue_max_hw_sectors(device->rq_queue) << 9;
1310 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1311 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1314 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1315 device->local_max_bio_size = local;
1317 local = min(local, DRBD_MAX_BIO_SIZE);
1319 /* We may ignore peer limits if the peer is modern enough.
1320 Because new from 8.3.8 onwards the peer can use multiple
1321 BIOs for a single peer_request */
1322 if (device->state.conn >= C_WF_REPORT_PARAMS) {
1323 if (first_peer_device(device)->connection->agreed_pro_version < 94)
1324 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1325 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1326 else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1327 peer = DRBD_MAX_SIZE_H80_PACKET;
1328 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1329 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1331 peer = DRBD_MAX_BIO_SIZE;
1333 /* We may later detach and re-attach on a disconnected Primary.
1334 * Avoid this setting to jump back in that case.
1335 * We want to store what we know the peer DRBD can handle,
1336 * not what the peer IO backend can handle. */
1337 if (peer > device->peer_max_bio_size)
1338 device->peer_max_bio_size = peer;
1340 new = min(local, peer);
1342 if (device->state.role == R_PRIMARY && new < now)
1343 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1346 drbd_info(device, "max BIO size = %u\n", new);
1348 drbd_setup_queue_param(device, bdev, new, o);
1351 /* Starts the worker thread */
1352 static void conn_reconfig_start(struct drbd_connection *connection)
1354 drbd_thread_start(&connection->worker);
1355 drbd_flush_workqueue(&connection->sender_work);
1358 /* if still unconfigured, stops worker again. */
1359 static void conn_reconfig_done(struct drbd_connection *connection)
1362 spin_lock_irq(&connection->resource->req_lock);
1363 stop_threads = conn_all_vols_unconf(connection) &&
1364 connection->cstate == C_STANDALONE;
1365 spin_unlock_irq(&connection->resource->req_lock);
1367 /* ack_receiver thread and ack_sender workqueue are implicitly
1368 * stopped by receiver in conn_disconnect() */
1369 drbd_thread_stop(&connection->receiver);
1370 drbd_thread_stop(&connection->worker);
1374 /* Make sure IO is suspended before calling this function(). */
1375 static void drbd_suspend_al(struct drbd_device *device)
1379 if (!lc_try_lock(device->act_log)) {
1380 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1384 drbd_al_shrink(device);
1385 spin_lock_irq(&device->resource->req_lock);
1386 if (device->state.conn < C_CONNECTED)
1387 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1388 spin_unlock_irq(&device->resource->req_lock);
1389 lc_unlock(device->act_log);
1392 drbd_info(device, "Suspended AL updates\n");
1396 static bool should_set_defaults(struct genl_info *info)
1398 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1399 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1402 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1404 /* This is limited by 16 bit "slot" numbers,
1405 * and by available on-disk context storage.
1407 * Also (u16)~0 is special (denotes a "free" extent).
1409 * One transaction occupies one 4kB on-disk block,
1410 * we have n such blocks in the on disk ring buffer,
1411 * the "current" transaction may fail (n-1),
1412 * and there is 919 slot numbers context information per transaction.
1414 * 72 transaction blocks amounts to more than 2**16 context slots,
1415 * so cap there first.
1417 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1418 const unsigned int sufficient_on_disk =
1419 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1420 /AL_CONTEXT_PER_TRANSACTION;
1422 unsigned int al_size_4k = bdev->md.al_size_4k;
1424 if (al_size_4k > sufficient_on_disk)
1427 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1430 static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1432 return a->disk_barrier != b->disk_barrier ||
1433 a->disk_flushes != b->disk_flushes ||
1434 a->disk_drain != b->disk_drain;
1437 static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
1438 struct drbd_backing_dev *nbc)
1440 struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
1442 if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1443 disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1444 if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1445 disk_conf->al_extents = drbd_al_extents_max(nbc);
1447 if (!blk_queue_discard(q)) {
1448 if (disk_conf->rs_discard_granularity) {
1449 disk_conf->rs_discard_granularity = 0; /* disable feature */
1450 drbd_info(device, "rs_discard_granularity feature disabled\n");
1454 if (disk_conf->rs_discard_granularity) {
1455 int orig_value = disk_conf->rs_discard_granularity;
1458 if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
1459 disk_conf->rs_discard_granularity = q->limits.discard_granularity;
1461 remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
1462 disk_conf->rs_discard_granularity += remainder;
1464 if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
1465 disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
1467 if (disk_conf->rs_discard_granularity != orig_value)
1468 drbd_info(device, "rs_discard_granularity changed to %d\n",
1469 disk_conf->rs_discard_granularity);
1473 static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1477 if (device->act_log &&
1478 device->act_log->nr_elements == dc->al_extents)
1481 drbd_suspend_io(device);
1482 /* If IO completion is currently blocked, we would likely wait
1483 * "forever" for the activity log to become unused. So we don't. */
1484 if (atomic_read(&device->ap_bio_cnt))
1487 wait_event(device->al_wait, lc_try_lock(device->act_log));
1488 drbd_al_shrink(device);
1489 err = drbd_check_al_size(device, dc);
1490 lc_unlock(device->act_log);
1491 wake_up(&device->al_wait);
1493 drbd_resume_io(device);
1497 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1499 struct drbd_config_context adm_ctx;
1500 enum drbd_ret_code retcode;
1501 struct drbd_device *device;
1502 struct disk_conf *new_disk_conf, *old_disk_conf;
1503 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1505 unsigned int fifo_size;
1507 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1508 if (!adm_ctx.reply_skb)
1510 if (retcode != NO_ERROR)
1513 device = adm_ctx.device;
1514 mutex_lock(&adm_ctx.resource->adm_mutex);
1516 /* we also need a disk
1517 * to change the options on */
1518 if (!get_ldev(device)) {
1519 retcode = ERR_NO_DISK;
1523 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1524 if (!new_disk_conf) {
1525 retcode = ERR_NOMEM;
1529 mutex_lock(&device->resource->conf_update);
1530 old_disk_conf = device->ldev->disk_conf;
1531 *new_disk_conf = *old_disk_conf;
1532 if (should_set_defaults(info))
1533 set_disk_conf_defaults(new_disk_conf);
1535 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1536 if (err && err != -ENOMSG) {
1537 retcode = ERR_MANDATORY_TAG;
1538 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1542 if (!expect(new_disk_conf->resync_rate >= 1))
1543 new_disk_conf->resync_rate = 1;
1545 sanitize_disk_conf(device, new_disk_conf, device->ldev);
1547 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1548 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1550 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1551 if (fifo_size != device->rs_plan_s->size) {
1552 new_plan = fifo_alloc(fifo_size);
1554 drbd_err(device, "kmalloc of fifo_buffer failed");
1555 retcode = ERR_NOMEM;
1560 err = disk_opts_check_al_size(device, new_disk_conf);
1562 /* Could be just "busy". Ignore?
1563 * Introduce dedicated error code? */
1564 drbd_msg_put_info(adm_ctx.reply_skb,
1565 "Try again without changing current al-extents setting");
1566 retcode = ERR_NOMEM;
1570 lock_all_resources();
1571 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1572 if (retcode == NO_ERROR) {
1573 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1574 drbd_resync_after_changed(device);
1576 unlock_all_resources();
1578 if (retcode != NO_ERROR)
1582 old_plan = device->rs_plan_s;
1583 rcu_assign_pointer(device->rs_plan_s, new_plan);
1586 mutex_unlock(&device->resource->conf_update);
1588 if (new_disk_conf->al_updates)
1589 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1591 device->ldev->md.flags |= MDF_AL_DISABLED;
1593 if (new_disk_conf->md_flushes)
1594 clear_bit(MD_NO_FUA, &device->flags);
1596 set_bit(MD_NO_FUA, &device->flags);
1598 if (write_ordering_changed(old_disk_conf, new_disk_conf))
1599 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1601 if (old_disk_conf->discard_zeroes_if_aligned !=
1602 new_disk_conf->discard_zeroes_if_aligned)
1603 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1605 drbd_md_sync(device);
1607 if (device->state.conn >= C_CONNECTED) {
1608 struct drbd_peer_device *peer_device;
1610 for_each_peer_device(peer_device, device)
1611 drbd_send_sync_param(peer_device);
1615 kfree(old_disk_conf);
1617 mod_timer(&device->request_timer, jiffies + HZ);
1621 mutex_unlock(&device->resource->conf_update);
1623 kfree(new_disk_conf);
1628 mutex_unlock(&adm_ctx.resource->adm_mutex);
1630 drbd_adm_finish(&adm_ctx, info, retcode);
1634 static struct block_device *open_backing_dev(struct drbd_device *device,
1635 const char *bdev_path, void *claim_ptr, bool do_bd_link)
1637 struct block_device *bdev;
1640 bdev = blkdev_get_by_path(bdev_path,
1641 FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr);
1643 drbd_err(device, "open(\"%s\") failed with %ld\n",
1644 bdev_path, PTR_ERR(bdev));
1651 err = bd_link_disk_holder(bdev, device->vdisk);
1653 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1654 drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1656 bdev = ERR_PTR(err);
1661 static int open_backing_devices(struct drbd_device *device,
1662 struct disk_conf *new_disk_conf,
1663 struct drbd_backing_dev *nbc)
1665 struct block_device *bdev;
1667 bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
1669 return ERR_OPEN_DISK;
1670 nbc->backing_bdev = bdev;
1673 * meta_dev_idx >= 0: external fixed size, possibly multiple
1674 * drbd sharing one meta device. TODO in that case, paranoia
1675 * check that [md_bdev, meta_dev_idx] is not yet used by some
1676 * other drbd minor! (if you use drbd.conf + drbdadm, that
1677 * should check it for you already; but if you don't, or
1678 * someone fooled it, we need to double check here)
1680 bdev = open_backing_dev(device, new_disk_conf->meta_dev,
1681 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1682 * if potentially shared with other drbd minors */
1683 (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
1684 /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1685 * as would happen with internal metadata. */
1686 (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
1687 new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
1689 return ERR_OPEN_MD_DISK;
1690 nbc->md_bdev = bdev;
1694 static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
1700 bd_unlink_disk_holder(bdev, device->vdisk);
1701 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1704 void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
1709 close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev);
1710 close_backing_dev(device, ldev->backing_bdev, true);
1712 kfree(ldev->disk_conf);
1716 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1718 struct drbd_config_context adm_ctx;
1719 struct drbd_device *device;
1720 struct drbd_peer_device *peer_device;
1721 struct drbd_connection *connection;
1723 enum drbd_ret_code retcode;
1724 enum determine_dev_size dd;
1725 sector_t max_possible_sectors;
1726 sector_t min_md_device_sectors;
1727 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1728 struct disk_conf *new_disk_conf = NULL;
1729 struct lru_cache *resync_lru = NULL;
1730 struct fifo_buffer *new_plan = NULL;
1731 union drbd_state ns, os;
1732 enum drbd_state_rv rv;
1733 struct net_conf *nc;
1735 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1736 if (!adm_ctx.reply_skb)
1738 if (retcode != NO_ERROR)
1741 device = adm_ctx.device;
1742 mutex_lock(&adm_ctx.resource->adm_mutex);
1743 peer_device = first_peer_device(device);
1744 connection = peer_device->connection;
1745 conn_reconfig_start(connection);
1747 /* if you want to reconfigure, please tear down first */
1748 if (device->state.disk > D_DISKLESS) {
1749 retcode = ERR_DISK_CONFIGURED;
1752 /* It may just now have detached because of IO error. Make sure
1753 * drbd_ldev_destroy is done already, we may end up here very fast,
1754 * e.g. if someone calls attach from the on-io-error handler,
1755 * to realize a "hot spare" feature (not that I'd recommend that) */
1756 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1758 /* make sure there is no leftover from previous force-detach attempts */
1759 clear_bit(FORCE_DETACH, &device->flags);
1760 clear_bit(WAS_IO_ERROR, &device->flags);
1761 clear_bit(WAS_READ_ERROR, &device->flags);
1763 /* and no leftover from previously aborted resync or verify, either */
1764 device->rs_total = 0;
1765 device->rs_failed = 0;
1766 atomic_set(&device->rs_pending_cnt, 0);
1768 /* allocation not in the IO path, drbdsetup context */
1769 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1771 retcode = ERR_NOMEM;
1774 spin_lock_init(&nbc->md.uuid_lock);
1776 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1777 if (!new_disk_conf) {
1778 retcode = ERR_NOMEM;
1781 nbc->disk_conf = new_disk_conf;
1783 set_disk_conf_defaults(new_disk_conf);
1784 err = disk_conf_from_attrs(new_disk_conf, info);
1786 retcode = ERR_MANDATORY_TAG;
1787 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1791 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1792 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1794 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1796 retcode = ERR_NOMEM;
1800 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1801 retcode = ERR_MD_IDX_INVALID;
1806 nc = rcu_dereference(connection->net_conf);
1808 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1810 retcode = ERR_STONITH_AND_PROT_A;
1816 retcode = open_backing_devices(device, new_disk_conf, nbc);
1817 if (retcode != NO_ERROR)
1820 if ((nbc->backing_bdev == nbc->md_bdev) !=
1821 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1822 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1823 retcode = ERR_MD_IDX_INVALID;
1827 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1828 1, 61, sizeof(struct bm_extent),
1829 offsetof(struct bm_extent, lce));
1831 retcode = ERR_NOMEM;
1835 /* Read our meta data super block early.
1836 * This also sets other on-disk offsets. */
1837 retcode = drbd_md_read(device, nbc);
1838 if (retcode != NO_ERROR)
1841 sanitize_disk_conf(device, new_disk_conf, nbc);
1843 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1844 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1845 (unsigned long long) drbd_get_max_capacity(nbc),
1846 (unsigned long long) new_disk_conf->disk_size);
1847 retcode = ERR_DISK_TOO_SMALL;
1851 if (new_disk_conf->meta_dev_idx < 0) {
1852 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1853 /* at least one MB, otherwise it does not make sense */
1854 min_md_device_sectors = (2<<10);
1856 max_possible_sectors = DRBD_MAX_SECTORS;
1857 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1860 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1861 retcode = ERR_MD_DISK_TOO_SMALL;
1862 drbd_warn(device, "refusing attach: md-device too small, "
1863 "at least %llu sectors needed for this meta-disk type\n",
1864 (unsigned long long) min_md_device_sectors);
1868 /* Make sure the new disk is big enough
1869 * (we may currently be R_PRIMARY with no local disk...) */
1870 if (drbd_get_max_capacity(nbc) < get_capacity(device->vdisk)) {
1871 retcode = ERR_DISK_TOO_SMALL;
1875 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1877 if (nbc->known_size > max_possible_sectors) {
1878 drbd_warn(device, "==> truncating very big lower level device "
1879 "to currently maximum possible %llu sectors <==\n",
1880 (unsigned long long) max_possible_sectors);
1881 if (new_disk_conf->meta_dev_idx >= 0)
1882 drbd_warn(device, "==>> using internal or flexible "
1883 "meta data may help <<==\n");
1886 drbd_suspend_io(device);
1887 /* also wait for the last barrier ack. */
1888 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1889 * We need a way to either ignore barrier acks for barriers sent before a device
1890 * was attached, or a way to wait for all pending barrier acks to come in.
1891 * As barriers are counted per resource,
1892 * we'd need to suspend io on all devices of a resource.
1894 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1895 /* and for any other previously queued work */
1896 drbd_flush_workqueue(&connection->sender_work);
1898 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1899 retcode = (enum drbd_ret_code)rv;
1900 drbd_resume_io(device);
1901 if (rv < SS_SUCCESS)
1904 if (!get_ldev_if_state(device, D_ATTACHING))
1905 goto force_diskless;
1907 if (!device->bitmap) {
1908 if (drbd_bm_init(device)) {
1909 retcode = ERR_NOMEM;
1910 goto force_diskless_dec;
1914 if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
1915 (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
1916 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1917 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
1918 (unsigned long long)device->ed_uuid);
1919 retcode = ERR_DATA_NOT_CURRENT;
1920 goto force_diskless_dec;
1923 /* Since we are diskless, fix the activity log first... */
1924 if (drbd_check_al_size(device, new_disk_conf)) {
1925 retcode = ERR_NOMEM;
1926 goto force_diskless_dec;
1929 /* Prevent shrinking of consistent devices ! */
1931 unsigned long long nsz = drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0);
1932 unsigned long long eff = nbc->md.la_size_sect;
1933 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && nsz < eff) {
1934 if (nsz == nbc->disk_conf->disk_size) {
1935 drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff);
1937 drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff);
1938 drbd_msg_sprintf_info(adm_ctx.reply_skb,
1939 "To-be-attached device has last effective > current size, and is consistent\n"
1940 "(%llu > %llu sectors). Refusing to attach.", eff, nsz);
1941 retcode = ERR_IMPLICIT_SHRINK;
1942 goto force_diskless_dec;
1947 lock_all_resources();
1948 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1949 if (retcode != NO_ERROR) {
1950 unlock_all_resources();
1951 goto force_diskless_dec;
1954 /* Reset the "barriers don't work" bits here, then force meta data to
1955 * be written, to ensure we determine if barriers are supported. */
1956 if (new_disk_conf->md_flushes)
1957 clear_bit(MD_NO_FUA, &device->flags);
1959 set_bit(MD_NO_FUA, &device->flags);
1961 /* Point of no return reached.
1962 * Devices and memory are no longer released by error cleanup below.
1963 * now device takes over responsibility, and the state engine should
1964 * clean it up somewhere. */
1965 D_ASSERT(device, device->ldev == NULL);
1967 device->resync = resync_lru;
1968 device->rs_plan_s = new_plan;
1971 new_disk_conf = NULL;
1974 drbd_resync_after_changed(device);
1975 drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
1976 unlock_all_resources();
1978 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1979 set_bit(CRASHED_PRIMARY, &device->flags);
1981 clear_bit(CRASHED_PRIMARY, &device->flags);
1983 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1984 !(device->state.role == R_PRIMARY && device->resource->susp_nod))
1985 set_bit(CRASHED_PRIMARY, &device->flags);
1987 device->send_cnt = 0;
1988 device->recv_cnt = 0;
1989 device->read_cnt = 0;
1990 device->writ_cnt = 0;
1992 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1994 /* If I am currently not R_PRIMARY,
1995 * but meta data primary indicator is set,
1996 * I just now recover from a hard crash,
1997 * and have been R_PRIMARY before that crash.
1999 * Now, if I had no connection before that crash
2000 * (have been degraded R_PRIMARY), chances are that
2001 * I won't find my peer now either.
2003 * In that case, and _only_ in that case,
2004 * we use the degr-wfc-timeout instead of the default,
2005 * so we can automatically recover from a crash of a
2006 * degraded but active "cluster" after a certain timeout.
2008 clear_bit(USE_DEGR_WFC_T, &device->flags);
2009 if (device->state.role != R_PRIMARY &&
2010 drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2011 !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
2012 set_bit(USE_DEGR_WFC_T, &device->flags);
2014 dd = drbd_determine_dev_size(device, 0, NULL);
2015 if (dd <= DS_ERROR) {
2016 retcode = ERR_NOMEM_BITMAP;
2017 goto force_diskless_dec;
2018 } else if (dd == DS_GREW)
2019 set_bit(RESYNC_AFTER_NEG, &device->flags);
2021 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
2022 (test_bit(CRASHED_PRIMARY, &device->flags) &&
2023 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
2024 drbd_info(device, "Assuming that all blocks are out of sync "
2025 "(aka FullSync)\n");
2026 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2027 "set_n_write from attaching", BM_LOCKED_MASK)) {
2028 retcode = ERR_IO_MD_DISK;
2029 goto force_diskless_dec;
2032 if (drbd_bitmap_io(device, &drbd_bm_read,
2033 "read from attaching", BM_LOCKED_MASK)) {
2034 retcode = ERR_IO_MD_DISK;
2035 goto force_diskless_dec;
2039 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
2040 drbd_suspend_al(device); /* IO is still suspended here... */
2042 spin_lock_irq(&device->resource->req_lock);
2043 os = drbd_read_state(device);
2045 /* If MDF_CONSISTENT is not set go into inconsistent state,
2046 otherwise investigate MDF_WasUpToDate...
2047 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2048 otherwise into D_CONSISTENT state.
2050 if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
2051 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
2052 ns.disk = D_CONSISTENT;
2054 ns.disk = D_OUTDATED;
2056 ns.disk = D_INCONSISTENT;
2059 if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
2060 ns.pdsk = D_OUTDATED;
2063 if (ns.disk == D_CONSISTENT &&
2064 (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
2065 ns.disk = D_UP_TO_DATE;
2067 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2068 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2069 this point, because drbd_request_state() modifies these
2072 if (rcu_dereference(device->ldev->disk_conf)->al_updates)
2073 device->ldev->md.flags &= ~MDF_AL_DISABLED;
2075 device->ldev->md.flags |= MDF_AL_DISABLED;
2079 /* In case we are C_CONNECTED postpone any decision on the new disk
2080 state after the negotiation phase. */
2081 if (device->state.conn == C_CONNECTED) {
2082 device->new_state_tmp.i = ns.i;
2084 ns.disk = D_NEGOTIATING;
2086 /* We expect to receive up-to-date UUIDs soon.
2087 To avoid a race in receive_state, free p_uuid while
2088 holding req_lock. I.e. atomic with the state change */
2089 kfree(device->p_uuid);
2090 device->p_uuid = NULL;
2093 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
2094 spin_unlock_irq(&device->resource->req_lock);
2096 if (rv < SS_SUCCESS)
2097 goto force_diskless_dec;
2099 mod_timer(&device->request_timer, jiffies + HZ);
2101 if (device->state.role == R_PRIMARY)
2102 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
2104 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
2106 drbd_md_mark_dirty(device);
2107 drbd_md_sync(device);
2109 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
2111 conn_reconfig_done(connection);
2112 mutex_unlock(&adm_ctx.resource->adm_mutex);
2113 drbd_adm_finish(&adm_ctx, info, retcode);
2119 drbd_force_state(device, NS(disk, D_DISKLESS));
2120 drbd_md_sync(device);
2122 conn_reconfig_done(connection);
2124 close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev);
2125 close_backing_dev(device, nbc->backing_bdev, true);
2128 kfree(new_disk_conf);
2129 lc_destroy(resync_lru);
2131 mutex_unlock(&adm_ctx.resource->adm_mutex);
2133 drbd_adm_finish(&adm_ctx, info, retcode);
2137 static int adm_detach(struct drbd_device *device, int force)
2140 set_bit(FORCE_DETACH, &device->flags);
2141 drbd_force_state(device, NS(disk, D_FAILED));
2145 return drbd_request_detach_interruptible(device);
2148 /* Detaching the disk is a process in multiple stages. First we need to lock
2149 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2150 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2151 * internal references as well.
2152 * Only then we have finally detached. */
2153 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
2155 struct drbd_config_context adm_ctx;
2156 enum drbd_ret_code retcode;
2157 struct detach_parms parms = { };
2160 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2161 if (!adm_ctx.reply_skb)
2163 if (retcode != NO_ERROR)
2166 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
2167 err = detach_parms_from_attrs(&parms, info);
2169 retcode = ERR_MANDATORY_TAG;
2170 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2175 mutex_lock(&adm_ctx.resource->adm_mutex);
2176 retcode = adm_detach(adm_ctx.device, parms.force_detach);
2177 mutex_unlock(&adm_ctx.resource->adm_mutex);
2179 drbd_adm_finish(&adm_ctx, info, retcode);
2183 static bool conn_resync_running(struct drbd_connection *connection)
2185 struct drbd_peer_device *peer_device;
2190 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2191 struct drbd_device *device = peer_device->device;
2192 if (device->state.conn == C_SYNC_SOURCE ||
2193 device->state.conn == C_SYNC_TARGET ||
2194 device->state.conn == C_PAUSED_SYNC_S ||
2195 device->state.conn == C_PAUSED_SYNC_T) {
2205 static bool conn_ov_running(struct drbd_connection *connection)
2207 struct drbd_peer_device *peer_device;
2212 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2213 struct drbd_device *device = peer_device->device;
2214 if (device->state.conn == C_VERIFY_S ||
2215 device->state.conn == C_VERIFY_T) {
2225 static enum drbd_ret_code
2226 _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2228 struct drbd_peer_device *peer_device;
2231 if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2232 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2233 return ERR_NEED_APV_100;
2235 if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2236 return ERR_NEED_APV_100;
2238 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2239 return ERR_NEED_APV_100;
2242 if (!new_net_conf->two_primaries &&
2243 conn_highest_role(connection) == R_PRIMARY &&
2244 conn_highest_peer(connection) == R_PRIMARY)
2245 return ERR_NEED_ALLOW_TWO_PRI;
2247 if (new_net_conf->two_primaries &&
2248 (new_net_conf->wire_protocol != DRBD_PROT_C))
2249 return ERR_NOT_PROTO_C;
2251 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2252 struct drbd_device *device = peer_device->device;
2253 if (get_ldev(device)) {
2254 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2256 if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2257 return ERR_STONITH_AND_PROT_A;
2259 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2260 return ERR_DISCARD_IMPOSSIBLE;
2263 if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2264 return ERR_CONG_NOT_PROTO_A;
2269 static enum drbd_ret_code
2270 check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2272 enum drbd_ret_code rv;
2273 struct drbd_peer_device *peer_device;
2277 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2280 /* connection->peer_devices protected by genl_lock() here */
2281 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2282 struct drbd_device *device = peer_device->device;
2283 if (!device->bitmap) {
2284 if (drbd_bm_init(device))
2293 struct crypto_shash *verify_tfm;
2294 struct crypto_shash *csums_tfm;
2295 struct crypto_shash *cram_hmac_tfm;
2296 struct crypto_shash *integrity_tfm;
2300 alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
2305 *tfm = crypto_alloc_shash(tfm_name, 0, 0);
2314 static enum drbd_ret_code
2315 alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2317 char hmac_name[CRYPTO_MAX_ALG_NAME];
2318 enum drbd_ret_code rv;
2320 rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
2324 rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
2328 rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2332 if (new_net_conf->cram_hmac_alg[0] != 0) {
2333 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2334 new_net_conf->cram_hmac_alg);
2336 rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
2343 static void free_crypto(struct crypto *crypto)
2345 crypto_free_shash(crypto->cram_hmac_tfm);
2346 crypto_free_shash(crypto->integrity_tfm);
2347 crypto_free_shash(crypto->csums_tfm);
2348 crypto_free_shash(crypto->verify_tfm);
2351 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2353 struct drbd_config_context adm_ctx;
2354 enum drbd_ret_code retcode;
2355 struct drbd_connection *connection;
2356 struct net_conf *old_net_conf, *new_net_conf = NULL;
2358 int ovr; /* online verify running */
2359 int rsr; /* re-sync running */
2360 struct crypto crypto = { };
2362 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2363 if (!adm_ctx.reply_skb)
2365 if (retcode != NO_ERROR)
2368 connection = adm_ctx.connection;
2369 mutex_lock(&adm_ctx.resource->adm_mutex);
2371 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2372 if (!new_net_conf) {
2373 retcode = ERR_NOMEM;
2377 conn_reconfig_start(connection);
2379 mutex_lock(&connection->data.mutex);
2380 mutex_lock(&connection->resource->conf_update);
2381 old_net_conf = connection->net_conf;
2383 if (!old_net_conf) {
2384 drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
2385 retcode = ERR_INVALID_REQUEST;
2389 *new_net_conf = *old_net_conf;
2390 if (should_set_defaults(info))
2391 set_net_conf_defaults(new_net_conf);
2393 err = net_conf_from_attrs_for_change(new_net_conf, info);
2394 if (err && err != -ENOMSG) {
2395 retcode = ERR_MANDATORY_TAG;
2396 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2400 retcode = check_net_options(connection, new_net_conf);
2401 if (retcode != NO_ERROR)
2404 /* re-sync running */
2405 rsr = conn_resync_running(connection);
2406 if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2407 retcode = ERR_CSUMS_RESYNC_RUNNING;
2411 /* online verify running */
2412 ovr = conn_ov_running(connection);
2413 if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2414 retcode = ERR_VERIFY_RUNNING;
2418 retcode = alloc_crypto(&crypto, new_net_conf);
2419 if (retcode != NO_ERROR)
2422 rcu_assign_pointer(connection->net_conf, new_net_conf);
2425 crypto_free_shash(connection->csums_tfm);
2426 connection->csums_tfm = crypto.csums_tfm;
2427 crypto.csums_tfm = NULL;
2430 crypto_free_shash(connection->verify_tfm);
2431 connection->verify_tfm = crypto.verify_tfm;
2432 crypto.verify_tfm = NULL;
2435 crypto_free_shash(connection->integrity_tfm);
2436 connection->integrity_tfm = crypto.integrity_tfm;
2437 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2438 /* Do this without trying to take connection->data.mutex again. */
2439 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2441 crypto_free_shash(connection->cram_hmac_tfm);
2442 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2444 mutex_unlock(&connection->resource->conf_update);
2445 mutex_unlock(&connection->data.mutex);
2447 kfree(old_net_conf);
2449 if (connection->cstate >= C_WF_REPORT_PARAMS) {
2450 struct drbd_peer_device *peer_device;
2453 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2454 drbd_send_sync_param(peer_device);
2460 mutex_unlock(&connection->resource->conf_update);
2461 mutex_unlock(&connection->data.mutex);
2462 free_crypto(&crypto);
2463 kfree(new_net_conf);
2465 conn_reconfig_done(connection);
2467 mutex_unlock(&adm_ctx.resource->adm_mutex);
2469 drbd_adm_finish(&adm_ctx, info, retcode);
2473 static void connection_to_info(struct connection_info *info,
2474 struct drbd_connection *connection)
2476 info->conn_connection_state = connection->cstate;
2477 info->conn_role = conn_highest_peer(connection);
2480 static void peer_device_to_info(struct peer_device_info *info,
2481 struct drbd_peer_device *peer_device)
2483 struct drbd_device *device = peer_device->device;
2485 info->peer_repl_state =
2486 max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2487 info->peer_disk_state = device->state.pdsk;
2488 info->peer_resync_susp_user = device->state.user_isp;
2489 info->peer_resync_susp_peer = device->state.peer_isp;
2490 info->peer_resync_susp_dependency = device->state.aftr_isp;
2493 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2495 struct connection_info connection_info;
2496 enum drbd_notification_type flags;
2497 unsigned int peer_devices = 0;
2498 struct drbd_config_context adm_ctx;
2499 struct drbd_peer_device *peer_device;
2500 struct net_conf *old_net_conf, *new_net_conf = NULL;
2501 struct crypto crypto = { };
2502 struct drbd_resource *resource;
2503 struct drbd_connection *connection;
2504 enum drbd_ret_code retcode;
2508 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2510 if (!adm_ctx.reply_skb)
2512 if (retcode != NO_ERROR)
2514 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2515 drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
2516 retcode = ERR_INVALID_REQUEST;
2520 /* No need for _rcu here. All reconfiguration is
2521 * strictly serialized on genl_lock(). We are protected against
2522 * concurrent reconfiguration/addition/deletion */
2523 for_each_resource(resource, &drbd_resources) {
2524 for_each_connection(connection, resource) {
2525 if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2526 !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2527 connection->my_addr_len)) {
2528 retcode = ERR_LOCAL_ADDR;
2532 if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2533 !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2534 connection->peer_addr_len)) {
2535 retcode = ERR_PEER_ADDR;
2541 mutex_lock(&adm_ctx.resource->adm_mutex);
2542 connection = first_connection(adm_ctx.resource);
2543 conn_reconfig_start(connection);
2545 if (connection->cstate > C_STANDALONE) {
2546 retcode = ERR_NET_CONFIGURED;
2550 /* allocation not in the IO path, drbdsetup / netlink process context */
2551 new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2552 if (!new_net_conf) {
2553 retcode = ERR_NOMEM;
2557 set_net_conf_defaults(new_net_conf);
2559 err = net_conf_from_attrs(new_net_conf, info);
2560 if (err && err != -ENOMSG) {
2561 retcode = ERR_MANDATORY_TAG;
2562 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2566 retcode = check_net_options(connection, new_net_conf);
2567 if (retcode != NO_ERROR)
2570 retcode = alloc_crypto(&crypto, new_net_conf);
2571 if (retcode != NO_ERROR)
2574 ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2576 drbd_flush_workqueue(&connection->sender_work);
2578 mutex_lock(&adm_ctx.resource->conf_update);
2579 old_net_conf = connection->net_conf;
2581 retcode = ERR_NET_CONFIGURED;
2582 mutex_unlock(&adm_ctx.resource->conf_update);
2585 rcu_assign_pointer(connection->net_conf, new_net_conf);
2587 conn_free_crypto(connection);
2588 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2589 connection->integrity_tfm = crypto.integrity_tfm;
2590 connection->csums_tfm = crypto.csums_tfm;
2591 connection->verify_tfm = crypto.verify_tfm;
2593 connection->my_addr_len = nla_len(adm_ctx.my_addr);
2594 memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2595 connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2596 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2598 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2602 connection_to_info(&connection_info, connection);
2603 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2604 mutex_lock(¬ification_mutex);
2605 notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2606 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2607 struct peer_device_info peer_device_info;
2609 peer_device_to_info(&peer_device_info, peer_device);
2610 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2611 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2613 mutex_unlock(¬ification_mutex);
2614 mutex_unlock(&adm_ctx.resource->conf_update);
2617 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2618 struct drbd_device *device = peer_device->device;
2619 device->send_cnt = 0;
2620 device->recv_cnt = 0;
2624 retcode = (enum drbd_ret_code)conn_request_state(connection,
2625 NS(conn, C_UNCONNECTED), CS_VERBOSE);
2627 conn_reconfig_done(connection);
2628 mutex_unlock(&adm_ctx.resource->adm_mutex);
2629 drbd_adm_finish(&adm_ctx, info, retcode);
2633 free_crypto(&crypto);
2634 kfree(new_net_conf);
2636 conn_reconfig_done(connection);
2637 mutex_unlock(&adm_ctx.resource->adm_mutex);
2639 drbd_adm_finish(&adm_ctx, info, retcode);
2643 static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2645 enum drbd_conns cstate;
2646 enum drbd_state_rv rv;
2649 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2650 force ? CS_HARD : 0);
2653 case SS_NOTHING_TO_DO:
2655 case SS_ALREADY_STANDALONE:
2657 case SS_PRIMARY_NOP:
2658 /* Our state checking code wants to see the peer outdated. */
2659 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2661 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2662 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2665 case SS_CW_FAILED_BY_PEER:
2666 spin_lock_irq(&connection->resource->req_lock);
2667 cstate = connection->cstate;
2668 spin_unlock_irq(&connection->resource->req_lock);
2669 if (cstate <= C_WF_CONNECTION)
2671 /* The peer probably wants to see us outdated. */
2672 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2673 disk, D_OUTDATED), 0);
2674 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2675 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2680 /* no special handling necessary */
2683 if (rv >= SS_SUCCESS) {
2684 enum drbd_state_rv rv2;
2685 /* No one else can reconfigure the network while I am here.
2686 * The state handling only uses drbd_thread_stop_nowait(),
2687 * we want to really wait here until the receiver is no more.
2689 drbd_thread_stop(&connection->receiver);
2691 /* Race breaker. This additional state change request may be
2692 * necessary, if this was a forced disconnect during a receiver
2693 * restart. We may have "killed" the receiver thread just
2694 * after drbd_receiver() returned. Typically, we should be
2695 * C_STANDALONE already, now, and this becomes a no-op.
2697 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2698 CS_VERBOSE | CS_HARD);
2699 if (rv2 < SS_SUCCESS)
2700 drbd_err(connection,
2701 "unexpected rv2=%d in conn_try_disconnect()\n",
2703 /* Unlike in DRBD 9, the state engine has generated
2704 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2709 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2711 struct drbd_config_context adm_ctx;
2712 struct disconnect_parms parms;
2713 struct drbd_connection *connection;
2714 enum drbd_state_rv rv;
2715 enum drbd_ret_code retcode;
2718 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2719 if (!adm_ctx.reply_skb)
2721 if (retcode != NO_ERROR)
2724 connection = adm_ctx.connection;
2725 memset(&parms, 0, sizeof(parms));
2726 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2727 err = disconnect_parms_from_attrs(&parms, info);
2729 retcode = ERR_MANDATORY_TAG;
2730 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2735 mutex_lock(&adm_ctx.resource->adm_mutex);
2736 rv = conn_try_disconnect(connection, parms.force_disconnect);
2737 if (rv < SS_SUCCESS)
2738 retcode = (enum drbd_ret_code)rv;
2741 mutex_unlock(&adm_ctx.resource->adm_mutex);
2743 drbd_adm_finish(&adm_ctx, info, retcode);
2747 void resync_after_online_grow(struct drbd_device *device)
2749 int iass; /* I am sync source */
2751 drbd_info(device, "Resync of new storage after online grow\n");
2752 if (device->state.role != device->state.peer)
2753 iass = (device->state.role == R_PRIMARY);
2755 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2758 drbd_start_resync(device, C_SYNC_SOURCE);
2760 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2763 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2765 struct drbd_config_context adm_ctx;
2766 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2767 struct resize_parms rs;
2768 struct drbd_device *device;
2769 enum drbd_ret_code retcode;
2770 enum determine_dev_size dd;
2771 bool change_al_layout = false;
2772 enum dds_flags ddsf;
2776 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2777 if (!adm_ctx.reply_skb)
2779 if (retcode != NO_ERROR)
2782 mutex_lock(&adm_ctx.resource->adm_mutex);
2783 device = adm_ctx.device;
2784 if (!get_ldev(device)) {
2785 retcode = ERR_NO_DISK;
2789 memset(&rs, 0, sizeof(struct resize_parms));
2790 rs.al_stripes = device->ldev->md.al_stripes;
2791 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2792 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2793 err = resize_parms_from_attrs(&rs, info);
2795 retcode = ERR_MANDATORY_TAG;
2796 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2801 if (device->state.conn > C_CONNECTED) {
2802 retcode = ERR_RESIZE_RESYNC;
2806 if (device->state.role == R_SECONDARY &&
2807 device->state.peer == R_SECONDARY) {
2808 retcode = ERR_NO_PRIMARY;
2812 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2813 retcode = ERR_NEED_APV_93;
2818 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2820 if (u_size != (sector_t)rs.resize_size) {
2821 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2822 if (!new_disk_conf) {
2823 retcode = ERR_NOMEM;
2828 if (device->ldev->md.al_stripes != rs.al_stripes ||
2829 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2830 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2832 if (al_size_k > (16 * 1024 * 1024)) {
2833 retcode = ERR_MD_LAYOUT_TOO_BIG;
2837 if (al_size_k < MD_32kB_SECT/2) {
2838 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2842 if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2843 retcode = ERR_MD_LAYOUT_CONNECTED;
2847 change_al_layout = true;
2850 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2851 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2853 if (new_disk_conf) {
2854 mutex_lock(&device->resource->conf_update);
2855 old_disk_conf = device->ldev->disk_conf;
2856 *new_disk_conf = *old_disk_conf;
2857 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2858 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2859 mutex_unlock(&device->resource->conf_update);
2861 kfree(old_disk_conf);
2862 new_disk_conf = NULL;
2865 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2866 dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2867 drbd_md_sync(device);
2869 if (dd == DS_ERROR) {
2870 retcode = ERR_NOMEM_BITMAP;
2872 } else if (dd == DS_ERROR_SPACE_MD) {
2873 retcode = ERR_MD_LAYOUT_NO_FIT;
2875 } else if (dd == DS_ERROR_SHRINK) {
2876 retcode = ERR_IMPLICIT_SHRINK;
2880 if (device->state.conn == C_CONNECTED) {
2882 set_bit(RESIZE_PENDING, &device->flags);
2884 drbd_send_uuids(first_peer_device(device));
2885 drbd_send_sizes(first_peer_device(device), 1, ddsf);
2889 mutex_unlock(&adm_ctx.resource->adm_mutex);
2891 drbd_adm_finish(&adm_ctx, info, retcode);
2896 kfree(new_disk_conf);
2900 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2902 struct drbd_config_context adm_ctx;
2903 enum drbd_ret_code retcode;
2904 struct res_opts res_opts;
2907 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2908 if (!adm_ctx.reply_skb)
2910 if (retcode != NO_ERROR)
2913 res_opts = adm_ctx.resource->res_opts;
2914 if (should_set_defaults(info))
2915 set_res_opts_defaults(&res_opts);
2917 err = res_opts_from_attrs(&res_opts, info);
2918 if (err && err != -ENOMSG) {
2919 retcode = ERR_MANDATORY_TAG;
2920 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2924 mutex_lock(&adm_ctx.resource->adm_mutex);
2925 err = set_resource_options(adm_ctx.resource, &res_opts);
2927 retcode = ERR_INVALID_REQUEST;
2929 retcode = ERR_NOMEM;
2931 mutex_unlock(&adm_ctx.resource->adm_mutex);
2934 drbd_adm_finish(&adm_ctx, info, retcode);
2938 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2940 struct drbd_config_context adm_ctx;
2941 struct drbd_device *device;
2942 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2944 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2945 if (!adm_ctx.reply_skb)
2947 if (retcode != NO_ERROR)
2950 device = adm_ctx.device;
2951 if (!get_ldev(device)) {
2952 retcode = ERR_NO_DISK;
2956 mutex_lock(&adm_ctx.resource->adm_mutex);
2958 /* If there is still bitmap IO pending, probably because of a previous
2959 * resync just being finished, wait for it before requesting a new resync.
2960 * Also wait for it's after_state_ch(). */
2961 drbd_suspend_io(device);
2962 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
2963 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
2965 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2966 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2967 * try to start a resync handshake as sync target for full sync.
2969 if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
2970 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
2971 if (retcode >= SS_SUCCESS) {
2972 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2973 "set_n_write from invalidate", BM_LOCKED_MASK))
2974 retcode = ERR_IO_MD_DISK;
2977 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
2978 drbd_resume_io(device);
2979 mutex_unlock(&adm_ctx.resource->adm_mutex);
2982 drbd_adm_finish(&adm_ctx, info, retcode);
2986 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2987 union drbd_state mask, union drbd_state val)
2989 struct drbd_config_context adm_ctx;
2990 enum drbd_ret_code retcode;
2992 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2993 if (!adm_ctx.reply_skb)
2995 if (retcode != NO_ERROR)
2998 mutex_lock(&adm_ctx.resource->adm_mutex);
2999 retcode = drbd_request_state(adm_ctx.device, mask, val);
3000 mutex_unlock(&adm_ctx.resource->adm_mutex);
3002 drbd_adm_finish(&adm_ctx, info, retcode);
3006 static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
3010 rv = drbd_bmio_set_n_write(device);
3011 drbd_suspend_al(device);
3015 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
3017 struct drbd_config_context adm_ctx;
3018 int retcode; /* drbd_ret_code, drbd_state_rv */
3019 struct drbd_device *device;
3021 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3022 if (!adm_ctx.reply_skb)
3024 if (retcode != NO_ERROR)
3027 device = adm_ctx.device;
3028 if (!get_ldev(device)) {
3029 retcode = ERR_NO_DISK;
3033 mutex_lock(&adm_ctx.resource->adm_mutex);
3035 /* If there is still bitmap IO pending, probably because of a previous
3036 * resync just being finished, wait for it before requesting a new resync.
3037 * Also wait for it's after_state_ch(). */
3038 drbd_suspend_io(device);
3039 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3040 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3042 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3043 * in the bitmap. Otherwise, try to start a resync handshake
3044 * as sync source for full sync.
3046 if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
3047 /* The peer will get a resync upon connect anyways. Just make that
3048 into a full resync. */
3049 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
3050 if (retcode >= SS_SUCCESS) {
3051 if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
3052 "set_n_write from invalidate_peer",
3053 BM_LOCKED_SET_ALLOWED))
3054 retcode = ERR_IO_MD_DISK;
3057 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
3058 drbd_resume_io(device);
3059 mutex_unlock(&adm_ctx.resource->adm_mutex);
3062 drbd_adm_finish(&adm_ctx, info, retcode);
3066 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
3068 struct drbd_config_context adm_ctx;
3069 enum drbd_ret_code retcode;
3071 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3072 if (!adm_ctx.reply_skb)
3074 if (retcode != NO_ERROR)
3077 mutex_lock(&adm_ctx.resource->adm_mutex);
3078 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
3079 retcode = ERR_PAUSE_IS_SET;
3080 mutex_unlock(&adm_ctx.resource->adm_mutex);
3082 drbd_adm_finish(&adm_ctx, info, retcode);
3086 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
3088 struct drbd_config_context adm_ctx;
3089 union drbd_dev_state s;
3090 enum drbd_ret_code retcode;
3092 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3093 if (!adm_ctx.reply_skb)
3095 if (retcode != NO_ERROR)
3098 mutex_lock(&adm_ctx.resource->adm_mutex);
3099 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
3100 s = adm_ctx.device->state;
3101 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
3102 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
3103 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
3105 retcode = ERR_PAUSE_IS_CLEAR;
3108 mutex_unlock(&adm_ctx.resource->adm_mutex);
3110 drbd_adm_finish(&adm_ctx, info, retcode);
3114 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
3116 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
3119 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
3121 struct drbd_config_context adm_ctx;
3122 struct drbd_device *device;
3123 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3125 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3126 if (!adm_ctx.reply_skb)
3128 if (retcode != NO_ERROR)
3131 mutex_lock(&adm_ctx.resource->adm_mutex);
3132 device = adm_ctx.device;
3133 if (test_bit(NEW_CUR_UUID, &device->flags)) {
3134 if (get_ldev_if_state(device, D_ATTACHING)) {
3135 drbd_uuid_new_current(device);
3138 /* This is effectively a multi-stage "forced down".
3139 * The NEW_CUR_UUID bit is supposedly only set, if we
3140 * lost the replication connection, and are configured
3141 * to freeze IO and wait for some fence-peer handler.
3142 * So we still don't have a replication connection.
3143 * And now we don't have a local disk either. After
3144 * resume, we will fail all pending and new IO, because
3145 * we don't have any data anymore. Which means we will
3146 * eventually be able to terminate all users of this
3147 * device, and then take it down. By bumping the
3148 * "effective" data uuid, we make sure that you really
3149 * need to tear down before you reconfigure, we will
3150 * the refuse to re-connect or re-attach (because no
3151 * matching real data uuid exists).
3154 get_random_bytes(&val, sizeof(u64));
3155 drbd_set_ed_uuid(device, val);
3156 drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3158 clear_bit(NEW_CUR_UUID, &device->flags);
3160 drbd_suspend_io(device);
3161 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
3162 if (retcode == SS_SUCCESS) {
3163 if (device->state.conn < C_CONNECTED)
3164 tl_clear(first_peer_device(device)->connection);
3165 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
3166 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
3168 drbd_resume_io(device);
3169 mutex_unlock(&adm_ctx.resource->adm_mutex);
3171 drbd_adm_finish(&adm_ctx, info, retcode);
3175 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
3177 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
3180 static int nla_put_drbd_cfg_context(struct sk_buff *skb,
3181 struct drbd_resource *resource,
3182 struct drbd_connection *connection,
3183 struct drbd_device *device)
3186 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
3188 goto nla_put_failure;
3190 nla_put_u32(skb, T_ctx_volume, device->vnr))
3191 goto nla_put_failure;
3192 if (nla_put_string(skb, T_ctx_resource_name, resource->name))
3193 goto nla_put_failure;
3195 if (connection->my_addr_len &&
3196 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
3197 goto nla_put_failure;
3198 if (connection->peer_addr_len &&
3199 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
3200 goto nla_put_failure;
3202 nla_nest_end(skb, nla);
3207 nla_nest_cancel(skb, nla);
3212 * The generic netlink dump callbacks are called outside the genl_lock(), so
3213 * they cannot use the simple attribute parsing code which uses global
3216 static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
3218 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3219 const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3222 nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
3223 DRBD_NLA_CFG_CONTEXT);
3226 return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
3229 static void resource_to_info(struct resource_info *, struct drbd_resource *);
3231 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
3233 struct drbd_genlmsghdr *dh;
3234 struct drbd_resource *resource;
3235 struct resource_info resource_info;
3236 struct resource_statistics resource_statistics;
3241 for_each_resource_rcu(resource, &drbd_resources)
3242 if (resource == (struct drbd_resource *)cb->args[0])
3243 goto found_resource;
3244 err = 0; /* resource was probably deleted */
3247 resource = list_entry(&drbd_resources,
3248 struct drbd_resource, resources);
3251 list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
3258 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3259 cb->nlh->nlmsg_seq, &drbd_genl_family,
3260 NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
3265 dh->ret_code = NO_ERROR;
3266 err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
3269 err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
3272 resource_to_info(&resource_info, resource);
3273 err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
3276 resource_statistics.res_stat_write_ordering = resource->write_ordering;
3277 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3280 cb->args[0] = (long)resource;
3281 genlmsg_end(skb, dh);
3291 static void device_to_statistics(struct device_statistics *s,
3292 struct drbd_device *device)
3294 memset(s, 0, sizeof(*s));
3295 s->dev_upper_blocked = !may_inc_ap_bio(device);
3296 if (get_ldev(device)) {
3297 struct drbd_md *md = &device->ldev->md;
3298 u64 *history_uuids = (u64 *)s->history_uuids;
3301 spin_lock_irq(&md->uuid_lock);
3302 s->dev_current_uuid = md->uuid[UI_CURRENT];
3303 BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3304 for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3305 history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3306 for (; n < HISTORY_UUIDS; n++)
3307 history_uuids[n] = 0;
3308 s->history_uuids_len = HISTORY_UUIDS;
3309 spin_unlock_irq(&md->uuid_lock);
3311 s->dev_disk_flags = md->flags;
3314 s->dev_size = get_capacity(device->vdisk);
3315 s->dev_read = device->read_cnt;
3316 s->dev_write = device->writ_cnt;
3317 s->dev_al_writes = device->al_writ_cnt;
3318 s->dev_bm_writes = device->bm_writ_cnt;
3319 s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3320 s->dev_lower_pending = atomic_read(&device->local_cnt);
3321 s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3322 s->dev_exposed_data_uuid = device->ed_uuid;
3325 static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
3328 struct drbd_resource *resource =
3329 (struct drbd_resource *)cb->args[0];
3330 kref_put(&resource->kref, drbd_destroy_resource);
3336 int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
3337 return put_resource_in_arg0(cb, 7);
3340 static void device_to_info(struct device_info *, struct drbd_device *);
3342 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
3344 struct nlattr *resource_filter;
3345 struct drbd_resource *resource;
3346 struct drbd_device *device;
3347 int minor, err, retcode;
3348 struct drbd_genlmsghdr *dh;
3349 struct device_info device_info;
3350 struct device_statistics device_statistics;
3351 struct idr *idr_to_search;
3353 resource = (struct drbd_resource *)cb->args[0];
3354 if (!cb->args[0] && !cb->args[1]) {
3355 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3356 if (resource_filter) {
3357 retcode = ERR_RES_NOT_KNOWN;
3358 resource = drbd_find_resource(nla_data(resource_filter));
3361 cb->args[0] = (long)resource;
3366 minor = cb->args[1];
3367 idr_to_search = resource ? &resource->devices : &drbd_devices;
3368 device = idr_get_next(idr_to_search, &minor);
3373 idr_for_each_entry_continue(idr_to_search, device, minor) {
3375 goto put_result; /* only one iteration */
3378 goto out; /* no more devices */
3381 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3382 cb->nlh->nlmsg_seq, &drbd_genl_family,
3383 NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
3387 dh->ret_code = retcode;
3389 if (retcode == NO_ERROR) {
3390 dh->minor = device->minor;
3391 err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
3394 if (get_ldev(device)) {
3395 struct disk_conf *disk_conf =
3396 rcu_dereference(device->ldev->disk_conf);
3398 err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
3403 device_to_info(&device_info, device);
3404 err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
3408 device_to_statistics(&device_statistics, device);
3409 err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3412 cb->args[1] = minor + 1;
3414 genlmsg_end(skb, dh);
3424 int drbd_adm_dump_connections_done(struct netlink_callback *cb)
3426 return put_resource_in_arg0(cb, 6);
3429 enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
3431 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
3433 struct nlattr *resource_filter;
3434 struct drbd_resource *resource = NULL, *next_resource;
3435 struct drbd_connection *connection;
3436 int err = 0, retcode;
3437 struct drbd_genlmsghdr *dh;
3438 struct connection_info connection_info;
3439 struct connection_statistics connection_statistics;
3442 resource = (struct drbd_resource *)cb->args[0];
3444 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3445 if (resource_filter) {
3446 retcode = ERR_RES_NOT_KNOWN;
3447 resource = drbd_find_resource(nla_data(resource_filter));
3450 cb->args[0] = (long)resource;
3451 cb->args[1] = SINGLE_RESOURCE;
3455 if (list_empty(&drbd_resources))
3457 resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
3458 kref_get(&resource->kref);
3459 cb->args[0] = (long)resource;
3460 cb->args[1] = ITERATE_RESOURCES;
3465 mutex_lock(&resource->conf_update);
3468 for_each_connection_rcu(connection, resource)
3469 if (connection == (struct drbd_connection *)cb->args[2])
3470 goto found_connection;
3471 /* connection was probably deleted */
3472 goto no_more_connections;
3474 connection = list_entry(&resource->connections, struct drbd_connection, connections);
3477 list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
3478 if (!has_net_conf(connection))
3481 goto put_result; /* only one iteration */
3484 no_more_connections:
3485 if (cb->args[1] == ITERATE_RESOURCES) {
3486 for_each_resource_rcu(next_resource, &drbd_resources) {
3487 if (next_resource == resource)
3488 goto found_resource;
3490 /* resource was probably deleted */
3495 list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
3496 mutex_unlock(&resource->conf_update);
3497 kref_put(&resource->kref, drbd_destroy_resource);
3498 resource = next_resource;
3499 kref_get(&resource->kref);
3500 cb->args[0] = (long)resource;
3504 goto out; /* no more resources */
3507 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3508 cb->nlh->nlmsg_seq, &drbd_genl_family,
3509 NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
3513 dh->ret_code = retcode;
3515 if (retcode == NO_ERROR) {
3516 struct net_conf *net_conf;
3518 err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
3521 net_conf = rcu_dereference(connection->net_conf);
3523 err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
3527 connection_to_info(&connection_info, connection);
3528 err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
3531 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
3532 err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
3535 cb->args[2] = (long)connection;
3537 genlmsg_end(skb, dh);
3543 mutex_unlock(&resource->conf_update);
3549 enum mdf_peer_flag {
3550 MDF_PEER_CONNECTED = 1 << 0,
3551 MDF_PEER_OUTDATED = 1 << 1,
3552 MDF_PEER_FENCING = 1 << 2,
3553 MDF_PEER_FULL_SYNC = 1 << 3,
3556 static void peer_device_to_statistics(struct peer_device_statistics *s,
3557 struct drbd_peer_device *peer_device)
3559 struct drbd_device *device = peer_device->device;
3561 memset(s, 0, sizeof(*s));
3562 s->peer_dev_received = device->recv_cnt;
3563 s->peer_dev_sent = device->send_cnt;
3564 s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3565 atomic_read(&device->rs_pending_cnt);
3566 s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3567 s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3568 s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3569 if (get_ldev(device)) {
3570 struct drbd_md *md = &device->ldev->md;
3572 spin_lock_irq(&md->uuid_lock);
3573 s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3574 spin_unlock_irq(&md->uuid_lock);
3576 (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3577 MDF_PEER_CONNECTED : 0) +
3578 (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3579 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3580 MDF_PEER_OUTDATED : 0) +
3581 /* FIXME: MDF_PEER_FENCING? */
3582 (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3583 MDF_PEER_FULL_SYNC : 0);
3588 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
3590 return put_resource_in_arg0(cb, 9);
3593 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
3595 struct nlattr *resource_filter;
3596 struct drbd_resource *resource;
3597 struct drbd_device *device;
3598 struct drbd_peer_device *peer_device = NULL;
3599 int minor, err, retcode;
3600 struct drbd_genlmsghdr *dh;
3601 struct idr *idr_to_search;
3603 resource = (struct drbd_resource *)cb->args[0];
3604 if (!cb->args[0] && !cb->args[1]) {
3605 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3606 if (resource_filter) {
3607 retcode = ERR_RES_NOT_KNOWN;
3608 resource = drbd_find_resource(nla_data(resource_filter));
3612 cb->args[0] = (long)resource;
3616 minor = cb->args[1];
3617 idr_to_search = resource ? &resource->devices : &drbd_devices;
3618 device = idr_find(idr_to_search, minor);
3623 device = idr_get_next(idr_to_search, &minor);
3630 for_each_peer_device(peer_device, device)
3631 if (peer_device == (struct drbd_peer_device *)cb->args[2])
3632 goto found_peer_device;
3633 /* peer device was probably deleted */
3636 /* Make peer_device point to the list head (not the first entry). */
3637 peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
3640 list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
3641 if (!has_net_conf(peer_device->connection))
3644 goto put_result; /* only one iteration */
3649 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3650 cb->nlh->nlmsg_seq, &drbd_genl_family,
3651 NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
3655 dh->ret_code = retcode;
3657 if (retcode == NO_ERROR) {
3658 struct peer_device_info peer_device_info;
3659 struct peer_device_statistics peer_device_statistics;
3662 err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
3665 peer_device_to_info(&peer_device_info, peer_device);
3666 err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
3669 peer_device_to_statistics(&peer_device_statistics, peer_device);
3670 err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
3673 cb->args[1] = minor;
3674 cb->args[2] = (long)peer_device;
3676 genlmsg_end(skb, dh);
3686 * Return the connection of @resource if @resource has exactly one connection.
3688 static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
3690 struct list_head *connections = &resource->connections;
3692 if (list_empty(connections) || connections->next->next != connections)
3694 return list_first_entry(&resource->connections, struct drbd_connection, connections);
3697 static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
3698 const struct sib_info *sib)
3700 struct drbd_resource *resource = device->resource;
3701 struct state_info *si = NULL; /* for sizeof(si->member); */
3705 int exclude_sensitive;
3707 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3708 * to. So we better exclude_sensitive information.
3710 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3711 * in the context of the requesting user process. Exclude sensitive
3712 * information, unless current has superuser.
3714 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3715 * relies on the current implementation of netlink_dump(), which
3716 * executes the dump callback successively from netlink_recvmsg(),
3717 * always in the context of the receiving process */
3718 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3720 got_ldev = get_ldev(device);
3722 /* We need to add connection name and volume number information still.
3723 * Minor number is in drbd_genlmsghdr. */
3724 if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3725 goto nla_put_failure;
3727 if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
3728 goto nla_put_failure;
3732 struct disk_conf *disk_conf;
3734 disk_conf = rcu_dereference(device->ldev->disk_conf);
3735 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3738 struct net_conf *nc;
3740 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3742 err = net_conf_to_skb(skb, nc, exclude_sensitive);
3746 goto nla_put_failure;
3748 nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
3750 goto nla_put_failure;
3751 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
3752 nla_put_u32(skb, T_current_state, device->state.i) ||
3753 nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
3754 nla_put_u64_0pad(skb, T_capacity, get_capacity(device->vdisk)) ||
3755 nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
3756 nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
3757 nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
3758 nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
3759 nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3760 nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
3761 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3762 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3763 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
3764 goto nla_put_failure;
3769 spin_lock_irq(&device->ldev->md.uuid_lock);
3770 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3771 spin_unlock_irq(&device->ldev->md.uuid_lock);
3774 goto nla_put_failure;
3776 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3777 nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
3778 nla_put_u64_0pad(skb, T_bits_oos,
3779 drbd_bm_total_weight(device)))
3780 goto nla_put_failure;
3781 if (C_SYNC_SOURCE <= device->state.conn &&
3782 C_PAUSED_SYNC_T >= device->state.conn) {
3783 if (nla_put_u64_0pad(skb, T_bits_rs_total,
3784 device->rs_total) ||
3785 nla_put_u64_0pad(skb, T_bits_rs_failed,
3787 goto nla_put_failure;
3792 switch(sib->sib_reason) {
3793 case SIB_SYNC_PROGRESS:
3794 case SIB_GET_STATUS_REPLY:
3796 case SIB_STATE_CHANGE:
3797 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3798 nla_put_u32(skb, T_new_state, sib->ns.i))
3799 goto nla_put_failure;
3801 case SIB_HELPER_POST:
3802 if (nla_put_u32(skb, T_helper_exit_code,
3803 sib->helper_exit_code))
3804 goto nla_put_failure;
3806 case SIB_HELPER_PRE:
3807 if (nla_put_string(skb, T_helper, sib->helper_name))
3808 goto nla_put_failure;
3812 nla_nest_end(skb, nla);
3822 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3824 struct drbd_config_context adm_ctx;
3825 enum drbd_ret_code retcode;
3828 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3829 if (!adm_ctx.reply_skb)
3831 if (retcode != NO_ERROR)
3834 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3836 nlmsg_free(adm_ctx.reply_skb);
3840 drbd_adm_finish(&adm_ctx, info, retcode);
3844 static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3846 struct drbd_device *device;
3847 struct drbd_genlmsghdr *dh;
3848 struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3849 struct drbd_resource *resource = NULL;
3850 struct drbd_resource *tmp;
3851 unsigned volume = cb->args[1];
3853 /* Open coded, deferred, iteration:
3854 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3855 * connection = "first connection of resource or undefined";
3856 * idr_for_each_entry(&resource->devices, device, i) {
3860 * where resource is cb->args[0];
3861 * and i is cb->args[1];
3863 * cb->args[2] indicates if we shall loop over all resources,
3864 * or just dump all volumes of a single resource.
3866 * This may miss entries inserted after this dump started,
3867 * or entries deleted before they are reached.
3869 * We need to make sure the device won't disappear while
3870 * we are looking at it, and revalidate our iterators
3871 * on each iteration.
3874 /* synchronize with conn_create()/drbd_destroy_connection() */
3876 /* revalidate iterator position */
3877 for_each_resource_rcu(tmp, &drbd_resources) {
3879 /* first iteration */
3891 device = idr_get_next(&resource->devices, &volume);
3893 /* No more volumes to dump on this resource.
3894 * Advance resource iterator. */
3895 pos = list_entry_rcu(resource->resources.next,
3896 struct drbd_resource, resources);
3897 /* Did we dump any volume of this resource yet? */
3899 /* If we reached the end of the list,
3900 * or only a single resource dump was requested,
3902 if (&pos->resources == &drbd_resources || cb->args[2])
3910 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3911 cb->nlh->nlmsg_seq, &drbd_genl_family,
3912 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3917 /* This is a connection without a single volume.
3918 * Suprisingly enough, it may have a network
3920 struct drbd_connection *connection;
3923 dh->ret_code = NO_ERROR;
3924 connection = the_only_connection(resource);
3925 if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
3928 struct net_conf *nc;
3930 nc = rcu_dereference(connection->net_conf);
3931 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
3937 D_ASSERT(device, device->vnr == volume);
3938 D_ASSERT(device, device->resource == resource);
3940 dh->minor = device_to_minor(device);
3941 dh->ret_code = NO_ERROR;
3943 if (nla_put_status_info(skb, device, NULL)) {
3945 genlmsg_cancel(skb, dh);
3949 genlmsg_end(skb, dh);
3954 /* where to start the next iteration */
3955 cb->args[0] = (long)pos;
3956 cb->args[1] = (pos == resource) ? volume + 1 : 0;
3958 /* No more resources/volumes/minors found results in an empty skb.
3959 * Which will terminate the dump. */
3964 * Request status of all resources, or of all volumes within a single resource.
3966 * This is a dump, as the answer may not fit in a single reply skb otherwise.
3967 * Which means we cannot use the family->attrbuf or other such members, because
3968 * dump is NOT protected by the genl_lock(). During dump, we only have access
3969 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
3971 * Once things are setup properly, we call into get_one_status().
3973 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
3975 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3977 const char *resource_name;
3978 struct drbd_resource *resource;
3981 /* Is this a followup call? */
3983 /* ... of a single resource dump,
3984 * and the resource iterator has been advanced already? */
3985 if (cb->args[2] && cb->args[2] != cb->args[0])
3986 return 0; /* DONE. */
3990 /* First call (from netlink_dump_start). We need to figure out
3991 * which resource(s) the user wants us to dump. */
3992 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
3993 nlmsg_attrlen(cb->nlh, hdrlen),
3994 DRBD_NLA_CFG_CONTEXT);
3996 /* No explicit context given. Dump all. */
3999 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
4000 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
4002 return PTR_ERR(nla);
4003 /* context given, but no name present? */
4006 resource_name = nla_data(nla);
4007 if (!*resource_name)
4009 resource = drbd_find_resource(resource_name);
4013 kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
4015 /* prime iterators, and set "filter" mode mark:
4016 * only dump this connection. */
4017 cb->args[0] = (long)resource;
4018 /* cb->args[1] = 0; passed in this way. */
4019 cb->args[2] = (long)resource;
4022 return get_one_status(skb, cb);
4025 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
4027 struct drbd_config_context adm_ctx;
4028 enum drbd_ret_code retcode;
4029 struct timeout_parms tp;
4032 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4033 if (!adm_ctx.reply_skb)
4035 if (retcode != NO_ERROR)
4039 adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
4040 test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
4043 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
4045 nlmsg_free(adm_ctx.reply_skb);
4049 drbd_adm_finish(&adm_ctx, info, retcode);
4053 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
4055 struct drbd_config_context adm_ctx;
4056 struct drbd_device *device;
4057 enum drbd_ret_code retcode;
4058 struct start_ov_parms parms;
4060 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4061 if (!adm_ctx.reply_skb)
4063 if (retcode != NO_ERROR)
4066 device = adm_ctx.device;
4068 /* resume from last known position, if possible */
4069 parms.ov_start_sector = device->ov_start_sector;
4070 parms.ov_stop_sector = ULLONG_MAX;
4071 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
4072 int err = start_ov_parms_from_attrs(&parms, info);
4074 retcode = ERR_MANDATORY_TAG;
4075 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4079 mutex_lock(&adm_ctx.resource->adm_mutex);
4081 /* w_make_ov_request expects position to be aligned */
4082 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
4083 device->ov_stop_sector = parms.ov_stop_sector;
4085 /* If there is still bitmap IO pending, e.g. previous resync or verify
4086 * just being finished, wait for it before requesting a new resync. */
4087 drbd_suspend_io(device);
4088 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4089 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
4090 drbd_resume_io(device);
4092 mutex_unlock(&adm_ctx.resource->adm_mutex);
4094 drbd_adm_finish(&adm_ctx, info, retcode);
4099 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
4101 struct drbd_config_context adm_ctx;
4102 struct drbd_device *device;
4103 enum drbd_ret_code retcode;
4104 int skip_initial_sync = 0;
4106 struct new_c_uuid_parms args;
4108 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4109 if (!adm_ctx.reply_skb)
4111 if (retcode != NO_ERROR)
4114 device = adm_ctx.device;
4115 memset(&args, 0, sizeof(args));
4116 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
4117 err = new_c_uuid_parms_from_attrs(&args, info);
4119 retcode = ERR_MANDATORY_TAG;
4120 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4125 mutex_lock(&adm_ctx.resource->adm_mutex);
4126 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
4128 if (!get_ldev(device)) {
4129 retcode = ERR_NO_DISK;
4133 /* this is "skip initial sync", assume to be clean */
4134 if (device->state.conn == C_CONNECTED &&
4135 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
4136 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
4137 drbd_info(device, "Preparing to skip initial sync\n");
4138 skip_initial_sync = 1;
4139 } else if (device->state.conn != C_STANDALONE) {
4140 retcode = ERR_CONNECTED;
4144 drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
4145 drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
4147 if (args.clear_bm) {
4148 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4149 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
4151 drbd_err(device, "Writing bitmap failed with %d\n", err);
4152 retcode = ERR_IO_MD_DISK;
4154 if (skip_initial_sync) {
4155 drbd_send_uuids_skip_initial_sync(first_peer_device(device));
4156 _drbd_uuid_set(device, UI_BITMAP, 0);
4157 drbd_print_uuids(device, "cleared bitmap UUID");
4158 spin_lock_irq(&device->resource->req_lock);
4159 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4161 spin_unlock_irq(&device->resource->req_lock);
4165 drbd_md_sync(device);
4169 mutex_unlock(device->state_mutex);
4170 mutex_unlock(&adm_ctx.resource->adm_mutex);
4172 drbd_adm_finish(&adm_ctx, info, retcode);
4176 static enum drbd_ret_code
4177 drbd_check_resource_name(struct drbd_config_context *adm_ctx)
4179 const char *name = adm_ctx->resource_name;
4180 if (!name || !name[0]) {
4181 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
4182 return ERR_MANDATORY_TAG;
4184 /* if we want to use these in sysfs/configfs/debugfs some day,
4185 * we must not allow slashes */
4186 if (strchr(name, '/')) {
4187 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
4188 return ERR_INVALID_REQUEST;
4193 static void resource_to_info(struct resource_info *info,
4194 struct drbd_resource *resource)
4196 info->res_role = conn_highest_role(first_connection(resource));
4197 info->res_susp = resource->susp;
4198 info->res_susp_nod = resource->susp_nod;
4199 info->res_susp_fen = resource->susp_fen;
4202 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
4204 struct drbd_connection *connection;
4205 struct drbd_config_context adm_ctx;
4206 enum drbd_ret_code retcode;
4207 struct res_opts res_opts;
4210 retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
4211 if (!adm_ctx.reply_skb)
4213 if (retcode != NO_ERROR)
4216 set_res_opts_defaults(&res_opts);
4217 err = res_opts_from_attrs(&res_opts, info);
4218 if (err && err != -ENOMSG) {
4219 retcode = ERR_MANDATORY_TAG;
4220 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4224 retcode = drbd_check_resource_name(&adm_ctx);
4225 if (retcode != NO_ERROR)
4228 if (adm_ctx.resource) {
4229 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
4230 retcode = ERR_INVALID_REQUEST;
4231 drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
4233 /* else: still NO_ERROR */
4237 /* not yet safe for genl_family.parallel_ops */
4238 mutex_lock(&resources_mutex);
4239 connection = conn_create(adm_ctx.resource_name, &res_opts);
4240 mutex_unlock(&resources_mutex);
4243 struct resource_info resource_info;
4245 mutex_lock(¬ification_mutex);
4246 resource_to_info(&resource_info, connection->resource);
4247 notify_resource_state(NULL, 0, connection->resource,
4248 &resource_info, NOTIFY_CREATE);
4249 mutex_unlock(¬ification_mutex);
4251 retcode = ERR_NOMEM;
4254 drbd_adm_finish(&adm_ctx, info, retcode);
4258 static void device_to_info(struct device_info *info,
4259 struct drbd_device *device)
4261 info->dev_disk_state = device->state.disk;
4265 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
4267 struct drbd_config_context adm_ctx;
4268 struct drbd_genlmsghdr *dh = info->userhdr;
4269 enum drbd_ret_code retcode;
4271 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4272 if (!adm_ctx.reply_skb)
4274 if (retcode != NO_ERROR)
4277 if (dh->minor > MINORMASK) {
4278 drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
4279 retcode = ERR_INVALID_REQUEST;
4282 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
4283 drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
4284 retcode = ERR_INVALID_REQUEST;
4288 /* drbd_adm_prepare made sure already
4289 * that first_peer_device(device)->connection and device->vnr match the request. */
4290 if (adm_ctx.device) {
4291 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
4292 retcode = ERR_MINOR_OR_VOLUME_EXISTS;
4293 /* else: still NO_ERROR */
4297 mutex_lock(&adm_ctx.resource->adm_mutex);
4298 retcode = drbd_create_device(&adm_ctx, dh->minor);
4299 if (retcode == NO_ERROR) {
4300 struct drbd_device *device;
4301 struct drbd_peer_device *peer_device;
4302 struct device_info info;
4303 unsigned int peer_devices = 0;
4304 enum drbd_notification_type flags;
4306 device = minor_to_device(dh->minor);
4307 for_each_peer_device(peer_device, device) {
4308 if (!has_net_conf(peer_device->connection))
4313 device_to_info(&info, device);
4314 mutex_lock(¬ification_mutex);
4315 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4316 notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
4317 for_each_peer_device(peer_device, device) {
4318 struct peer_device_info peer_device_info;
4320 if (!has_net_conf(peer_device->connection))
4322 peer_device_to_info(&peer_device_info, peer_device);
4323 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4324 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
4325 NOTIFY_CREATE | flags);
4327 mutex_unlock(¬ification_mutex);
4329 mutex_unlock(&adm_ctx.resource->adm_mutex);
4331 drbd_adm_finish(&adm_ctx, info, retcode);
4335 static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
4337 struct drbd_peer_device *peer_device;
4339 if (device->state.disk == D_DISKLESS &&
4340 /* no need to be device->state.conn == C_STANDALONE &&
4341 * we may want to delete a minor from a live replication group.
4343 device->state.role == R_SECONDARY) {
4344 struct drbd_connection *connection =
4345 first_connection(device->resource);
4347 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
4348 CS_VERBOSE + CS_WAIT_COMPLETE);
4350 /* If the state engine hasn't stopped the sender thread yet, we
4351 * need to flush the sender work queue before generating the
4352 * DESTROY events here. */
4353 if (get_t_state(&connection->worker) == RUNNING)
4354 drbd_flush_workqueue(&connection->sender_work);
4356 mutex_lock(¬ification_mutex);
4357 for_each_peer_device(peer_device, device) {
4358 if (!has_net_conf(peer_device->connection))
4360 notify_peer_device_state(NULL, 0, peer_device, NULL,
4361 NOTIFY_DESTROY | NOTIFY_CONTINUES);
4363 notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
4364 mutex_unlock(¬ification_mutex);
4366 drbd_delete_device(device);
4369 return ERR_MINOR_CONFIGURED;
4372 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
4374 struct drbd_config_context adm_ctx;
4375 enum drbd_ret_code retcode;
4377 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4378 if (!adm_ctx.reply_skb)
4380 if (retcode != NO_ERROR)
4383 mutex_lock(&adm_ctx.resource->adm_mutex);
4384 retcode = adm_del_minor(adm_ctx.device);
4385 mutex_unlock(&adm_ctx.resource->adm_mutex);
4387 drbd_adm_finish(&adm_ctx, info, retcode);
4391 static int adm_del_resource(struct drbd_resource *resource)
4393 struct drbd_connection *connection;
4395 for_each_connection(connection, resource) {
4396 if (connection->cstate > C_STANDALONE)
4397 return ERR_NET_CONFIGURED;
4399 if (!idr_is_empty(&resource->devices))
4400 return ERR_RES_IN_USE;
4402 /* The state engine has stopped the sender thread, so we don't
4403 * need to flush the sender work queue before generating the
4404 * DESTROY event here. */
4405 mutex_lock(¬ification_mutex);
4406 notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
4407 mutex_unlock(¬ification_mutex);
4409 mutex_lock(&resources_mutex);
4410 list_del_rcu(&resource->resources);
4411 mutex_unlock(&resources_mutex);
4412 /* Make sure all threads have actually stopped: state handling only
4413 * does drbd_thread_stop_nowait(). */
4414 list_for_each_entry(connection, &resource->connections, connections)
4415 drbd_thread_stop(&connection->worker);
4417 drbd_free_resource(resource);
4421 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
4423 struct drbd_config_context adm_ctx;
4424 struct drbd_resource *resource;
4425 struct drbd_connection *connection;
4426 struct drbd_device *device;
4427 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4430 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4431 if (!adm_ctx.reply_skb)
4433 if (retcode != NO_ERROR)
4436 resource = adm_ctx.resource;
4437 mutex_lock(&resource->adm_mutex);
4439 for_each_connection(connection, resource) {
4440 struct drbd_peer_device *peer_device;
4442 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
4443 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
4444 if (retcode < SS_SUCCESS) {
4445 drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
4450 retcode = conn_try_disconnect(connection, 0);
4451 if (retcode < SS_SUCCESS) {
4452 drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
4458 idr_for_each_entry(&resource->devices, device, i) {
4459 retcode = adm_detach(device, 0);
4460 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
4461 drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
4466 /* delete volumes */
4467 idr_for_each_entry(&resource->devices, device, i) {
4468 retcode = adm_del_minor(device);
4469 if (retcode != NO_ERROR) {
4470 /* "can not happen" */
4471 drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
4476 retcode = adm_del_resource(resource);
4478 mutex_unlock(&resource->adm_mutex);
4480 drbd_adm_finish(&adm_ctx, info, retcode);
4484 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
4486 struct drbd_config_context adm_ctx;
4487 struct drbd_resource *resource;
4488 enum drbd_ret_code retcode;
4490 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4491 if (!adm_ctx.reply_skb)
4493 if (retcode != NO_ERROR)
4495 resource = adm_ctx.resource;
4497 mutex_lock(&resource->adm_mutex);
4498 retcode = adm_del_resource(resource);
4499 mutex_unlock(&resource->adm_mutex);
4501 drbd_adm_finish(&adm_ctx, info, retcode);
4505 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
4507 struct sk_buff *msg;
4508 struct drbd_genlmsghdr *d_out;
4512 seq = atomic_inc_return(&drbd_genl_seq);
4513 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4518 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
4519 if (!d_out) /* cannot happen, but anyways. */
4520 goto nla_put_failure;
4521 d_out->minor = device_to_minor(device);
4522 d_out->ret_code = NO_ERROR;
4524 if (nla_put_status_info(msg, device, sib))
4525 goto nla_put_failure;
4526 genlmsg_end(msg, d_out);
4527 err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
4528 /* msg has been consumed or freed in netlink_broadcast() */
4529 if (err && err != -ESRCH)
4537 drbd_err(device, "Error %d while broadcasting event. "
4538 "Event seq:%u sib_reason:%u\n",
4539 err, seq, sib->sib_reason);
4542 static int nla_put_notification_header(struct sk_buff *msg,
4543 enum drbd_notification_type type)
4545 struct drbd_notification_header nh = {
4549 return drbd_notification_header_to_skb(msg, &nh, true);
4552 void notify_resource_state(struct sk_buff *skb,
4554 struct drbd_resource *resource,
4555 struct resource_info *resource_info,
4556 enum drbd_notification_type type)
4558 struct resource_statistics resource_statistics;
4559 struct drbd_genlmsghdr *dh;
4560 bool multicast = false;
4564 seq = atomic_inc_return(¬ify_genl_seq);
4565 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4573 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
4575 goto nla_put_failure;
4577 dh->ret_code = NO_ERROR;
4578 if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
4579 nla_put_notification_header(skb, type) ||
4580 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4581 resource_info_to_skb(skb, resource_info, true)))
4582 goto nla_put_failure;
4583 resource_statistics.res_stat_write_ordering = resource->write_ordering;
4584 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
4586 goto nla_put_failure;
4587 genlmsg_end(skb, dh);
4589 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4590 /* skb has been consumed or freed in netlink_broadcast() */
4591 if (err && err != -ESRCH)
4599 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4603 void notify_device_state(struct sk_buff *skb,
4605 struct drbd_device *device,
4606 struct device_info *device_info,
4607 enum drbd_notification_type type)
4609 struct device_statistics device_statistics;
4610 struct drbd_genlmsghdr *dh;
4611 bool multicast = false;
4615 seq = atomic_inc_return(¬ify_genl_seq);
4616 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4624 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
4626 goto nla_put_failure;
4627 dh->minor = device->minor;
4628 dh->ret_code = NO_ERROR;
4629 if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
4630 nla_put_notification_header(skb, type) ||
4631 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4632 device_info_to_skb(skb, device_info, true)))
4633 goto nla_put_failure;
4634 device_to_statistics(&device_statistics, device);
4635 device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
4636 genlmsg_end(skb, dh);
4638 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4639 /* skb has been consumed or freed in netlink_broadcast() */
4640 if (err && err != -ESRCH)
4648 drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4652 void notify_connection_state(struct sk_buff *skb,
4654 struct drbd_connection *connection,
4655 struct connection_info *connection_info,
4656 enum drbd_notification_type type)
4658 struct connection_statistics connection_statistics;
4659 struct drbd_genlmsghdr *dh;
4660 bool multicast = false;
4664 seq = atomic_inc_return(¬ify_genl_seq);
4665 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4673 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4675 goto nla_put_failure;
4677 dh->ret_code = NO_ERROR;
4678 if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4679 nla_put_notification_header(skb, type) ||
4680 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4681 connection_info_to_skb(skb, connection_info, true)))
4682 goto nla_put_failure;
4683 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4684 connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4685 genlmsg_end(skb, dh);
4687 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4688 /* skb has been consumed or freed in netlink_broadcast() */
4689 if (err && err != -ESRCH)
4697 drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4701 void notify_peer_device_state(struct sk_buff *skb,
4703 struct drbd_peer_device *peer_device,
4704 struct peer_device_info *peer_device_info,
4705 enum drbd_notification_type type)
4707 struct peer_device_statistics peer_device_statistics;
4708 struct drbd_resource *resource = peer_device->device->resource;
4709 struct drbd_genlmsghdr *dh;
4710 bool multicast = false;
4714 seq = atomic_inc_return(¬ify_genl_seq);
4715 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4723 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4725 goto nla_put_failure;
4727 dh->ret_code = NO_ERROR;
4728 if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4729 nla_put_notification_header(skb, type) ||
4730 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4731 peer_device_info_to_skb(skb, peer_device_info, true)))
4732 goto nla_put_failure;
4733 peer_device_to_statistics(&peer_device_statistics, peer_device);
4734 peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4735 genlmsg_end(skb, dh);
4737 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4738 /* skb has been consumed or freed in netlink_broadcast() */
4739 if (err && err != -ESRCH)
4747 drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4751 void notify_helper(enum drbd_notification_type type,
4752 struct drbd_device *device, struct drbd_connection *connection,
4753 const char *name, int status)
4755 struct drbd_resource *resource = device ? device->resource : connection->resource;
4756 struct drbd_helper_info helper_info;
4757 unsigned int seq = atomic_inc_return(¬ify_genl_seq);
4758 struct sk_buff *skb = NULL;
4759 struct drbd_genlmsghdr *dh;
4762 strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4763 helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4764 helper_info.helper_status = status;
4766 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4772 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4775 dh->minor = device ? device->minor : -1;
4776 dh->ret_code = NO_ERROR;
4777 mutex_lock(¬ification_mutex);
4778 if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4779 nla_put_notification_header(skb, type) ||
4780 drbd_helper_info_to_skb(skb, &helper_info, true))
4782 genlmsg_end(skb, dh);
4783 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4785 /* skb has been consumed or freed in netlink_broadcast() */
4786 if (err && err != -ESRCH)
4788 mutex_unlock(¬ification_mutex);
4792 mutex_unlock(¬ification_mutex);
4795 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4799 static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4801 struct drbd_genlmsghdr *dh;
4805 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4807 goto nla_put_failure;
4809 dh->ret_code = NO_ERROR;
4810 if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4811 goto nla_put_failure;
4812 genlmsg_end(skb, dh);
4817 pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4820 static void free_state_changes(struct list_head *list)
4822 while (!list_empty(list)) {
4823 struct drbd_state_change *state_change =
4824 list_first_entry(list, struct drbd_state_change, list);
4825 list_del(&state_change->list);
4826 forget_state_change(state_change);
4830 static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4833 state_change->n_connections +
4834 state_change->n_devices +
4835 state_change->n_devices * state_change->n_connections;
4838 static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4840 struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4841 unsigned int seq = cb->args[2];
4843 enum drbd_notification_type flags = 0;
4845 /* There is no need for taking notification_mutex here: it doesn't
4846 matter if the initial state events mix with later state chage
4847 events; we can always tell the events apart by the NOTIFY_EXISTS
4851 if (cb->args[5] == 1) {
4852 notify_initial_state_done(skb, seq);
4856 if (cb->args[4] < cb->args[3])
4857 flags |= NOTIFY_CONTINUES;
4859 notify_resource_state_change(skb, seq, state_change->resource,
4860 NOTIFY_EXISTS | flags);
4864 if (n < state_change->n_connections) {
4865 notify_connection_state_change(skb, seq, &state_change->connections[n],
4866 NOTIFY_EXISTS | flags);
4869 n -= state_change->n_connections;
4870 if (n < state_change->n_devices) {
4871 notify_device_state_change(skb, seq, &state_change->devices[n],
4872 NOTIFY_EXISTS | flags);
4875 n -= state_change->n_devices;
4876 if (n < state_change->n_devices * state_change->n_connections) {
4877 notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4878 NOTIFY_EXISTS | flags);
4883 if (cb->args[4] == cb->args[3]) {
4884 struct drbd_state_change *next_state_change =
4885 list_entry(state_change->list.next,
4886 struct drbd_state_change, list);
4887 cb->args[0] = (long)next_state_change;
4888 cb->args[3] = notifications_for_state_change(next_state_change);
4895 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4897 struct drbd_resource *resource;
4900 if (cb->args[5] >= 1) {
4901 if (cb->args[5] > 1)
4902 return get_initial_state(skb, cb);
4904 struct drbd_state_change *state_change =
4905 (struct drbd_state_change *)cb->args[0];
4907 /* connect list to head */
4908 list_add(&head, &state_change->list);
4909 free_state_changes(&head);
4914 cb->args[5] = 2; /* number of iterations */
4915 mutex_lock(&resources_mutex);
4916 for_each_resource(resource, &drbd_resources) {
4917 struct drbd_state_change *state_change;
4919 state_change = remember_old_state(resource, GFP_KERNEL);
4920 if (!state_change) {
4921 if (!list_empty(&head))
4922 free_state_changes(&head);
4923 mutex_unlock(&resources_mutex);
4926 copy_old_to_new_state_change(state_change);
4927 list_add_tail(&state_change->list, &head);
4928 cb->args[5] += notifications_for_state_change(state_change);
4930 mutex_unlock(&resources_mutex);
4932 if (!list_empty(&head)) {
4933 struct drbd_state_change *state_change =
4934 list_entry(head.next, struct drbd_state_change, list);
4935 cb->args[0] = (long)state_change;
4936 cb->args[3] = notifications_for_state_change(state_change);
4937 list_del(&head); /* detach list from head */
4940 cb->args[2] = cb->nlh->nlmsg_seq;
4941 return get_initial_state(skb, cb);