2 Copyright (c) 2010-2012 Red Hat, Inc. <http://www.redhat.com>
3 This file is part of GlusterFS.
5 This file is licensed to you under your choice of the GNU Lesser
6 General Public License, version 3 or any later version (LGPLv3 or
7 later), or the GNU General Public License, version 2 (GPLv2), in all
8 cases as published by the Free Software Foundation.
16 #include <sys/socket.h>
18 #include <sys/types.h>
20 #include <netinet/in.h>
24 #include "cli-mem-types.h"
28 #include "common-utils.h"
30 extern struct rpc_clnt *global_rpc;
31 extern struct rpc_clnt *global_quotad_rpc;
33 extern rpc_clnt_prog_t *cli_rpc_prog;
34 extern rpc_clnt_prog_t cli_quotad_clnt;
37 cli_cmd_volume_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word,
38 const char **words, int wordcount);
41 cli_cmd_volume_info_cbk (struct cli_state *state, struct cli_cmd_word *word,
42 const char **words, int wordcount)
45 rpc_clnt_procedure_t *proc = NULL;
46 call_frame_t *frame = NULL;
47 cli_cmd_volume_get_ctx_t ctx = {0,};
48 cli_local_t *local = NULL;
52 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOLUME];
54 frame = create_frame (THIS, THIS->ctx->pool);
58 if ((wordcount == 2) || (wordcount == 3 &&
59 !strcmp (words[2], "all"))) {
60 ctx.flags = GF_CLI_GET_NEXT_VOLUME;
61 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_NEXT_VOLUME];
62 } else if (wordcount == 3) {
63 ctx.flags = GF_CLI_GET_VOLUME;
64 ctx.volname = (char *)words[2];
65 if (strlen (ctx.volname) > GD_VOLUME_NAME_MAX) {
66 cli_out ("Invalid volume name");
69 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOLUME];
71 cli_usage_out (word->pattern);
76 local = cli_local_get ();
81 local->get_vol.flags = ctx.flags;
83 local->get_vol.volname = gf_strdup (ctx.volname);
88 ret = proc->fn (frame, THIS, &ctx);
93 cli_cmd_sent_status_get (&sent);
94 if ((sent == 0) && (parse_error == 0))
95 cli_out ("Getting Volume information failed!");
98 CLI_STACK_DESTROY (frame);
105 cli_cmd_sync_volume_cbk (struct cli_state *state, struct cli_cmd_word *word,
106 const char **words, int wordcount)
109 rpc_clnt_procedure_t *proc = NULL;
110 call_frame_t *frame = NULL;
114 cli_local_t *local = NULL;
115 gf_answer_t answer = GF_ANSWER_NO;
116 const char *question = "Sync volume may make data "
117 "inaccessible while the sync "
118 "is in progress. Do you want "
121 if ((wordcount < 3) || (wordcount > 4)) {
122 cli_usage_out (word->pattern);
131 if ((wordcount == 3) || !strcmp(words[3], "all")) {
132 ret = dict_set_int32 (dict, "flags", (int32_t)
135 gf_log (THIS->name, GF_LOG_ERROR, "failed to set"
140 ret = dict_set_str (dict, "volname", (char *) words[3]);
142 gf_log (THIS->name, GF_LOG_ERROR, "failed to set "
148 ret = dict_set_str (dict, "hostname", (char *) words[2]);
150 gf_log (THIS->name, GF_LOG_ERROR, "failed to set hostname");
154 if (!(state->mode & GLUSTER_MODE_SCRIPT)) {
155 answer = cli_cmd_get_confirmation (state, question);
156 if (GF_ANSWER_NO == answer) {
162 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SYNC_VOLUME];
164 frame = create_frame (THIS, THIS->ctx->pool);
168 CLI_LOCAL_INIT (local, words, frame, dict);
171 ret = proc->fn (frame, THIS, dict);
176 cli_cmd_sent_status_get (&sent);
177 if ((sent == 0) && (parse_error == 0))
178 cli_out ("Volume sync failed");
181 CLI_STACK_DESTROY (frame);
187 cli_cmd_volume_create_cbk (struct cli_state *state, struct cli_cmd_word *word,
188 const char **words, int wordcount)
191 rpc_clnt_procedure_t *proc = NULL;
192 call_frame_t *frame = NULL;
193 dict_t *options = NULL;
196 char *brick_list = NULL;
197 int32_t brick_count = 0;
198 int32_t sub_count = 0;
199 int32_t type = GF_CLUSTER_TYPE_NONE;
200 cli_local_t *local = NULL;
201 char *trans_type = NULL;
203 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CREATE_VOLUME];
205 frame = create_frame (THIS, THIS->ctx->pool);
209 ret = cli_cmd_volume_create_parse (state, words, wordcount, &options);
212 cli_usage_out (word->pattern);
217 ret = dict_get_str (options, "transport", &trans_type);
219 gf_log("cli", GF_LOG_ERROR, "Unable to get transport type");
223 if (state->mode & GLUSTER_MODE_WIGNORE) {
224 ret = dict_set_int32 (options, "force", _gf_true);
226 gf_log ("cli", GF_LOG_ERROR, "Failed to set force "
232 CLI_LOCAL_INIT (local, words, frame, options);
235 ret = proc->fn (frame, THIS, options);
240 cli_cmd_sent_status_get (&sent);
241 if ((sent == 0) && (parse_error == 0))
242 cli_out ("Volume create failed");
245 CLI_STACK_DESTROY (frame);
252 cli_cmd_volume_delete_cbk (struct cli_state *state, struct cli_cmd_word *word,
253 const char **words, int wordcount)
256 rpc_clnt_procedure_t *proc = NULL;
257 call_frame_t *frame = NULL;
258 char *volname = NULL;
259 gf_answer_t answer = GF_ANSWER_NO;
260 const char *question = NULL;
263 cli_local_t *local = NULL;
266 question = "Deleting volume will erase all information about the volume. "
267 "Do you want to continue?";
268 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DELETE_VOLUME];
270 frame = create_frame (THIS, THIS->ctx->pool);
278 if (wordcount != 3) {
279 cli_usage_out (word->pattern);
284 volname = (char *)words[2];
286 ret = dict_set_str (dict, "volname", volname);
288 gf_log (THIS->name, GF_LOG_WARNING, "dict set failed");
292 if (!strcmp (volname, GLUSTER_SHARED_STORAGE)) {
293 question = "Deleting the shared storage volume"
294 "(gluster_shared_storage), will affect features "
295 "like snapshot scheduler, geo-replication "
296 "and NFS-Ganesha. Do you still want to "
300 answer = cli_cmd_get_confirmation (state, question);
301 if (GF_ANSWER_NO == answer) {
306 CLI_LOCAL_INIT (local, words, frame, dict);
309 ret = proc->fn (frame, THIS, dict);
314 cli_cmd_sent_status_get (&sent);
315 if ((sent == 0) && (parse_error == 0))
316 cli_out ("Volume delete failed");
319 CLI_STACK_DESTROY (frame);
325 cli_cmd_volume_start_cbk (struct cli_state *state, struct cli_cmd_word *word,
326 const char **words, int wordcount)
329 rpc_clnt_procedure_t *proc = NULL;
330 call_frame_t *frame = NULL;
335 cli_local_t *local = NULL;
337 frame = create_frame (THIS, THIS->ctx->pool);
341 if (wordcount < 3 || wordcount > 4) {
342 cli_usage_out (word->pattern);
355 ret = dict_set_str (dict, "volname", (char *)words[2]);
357 gf_log (THIS->name, GF_LOG_ERROR, "dict set failed");
361 if (wordcount == 4) {
362 if (!strcmp("force", words[3])) {
363 flags |= GF_CLI_FLAG_OP_FORCE;
366 cli_usage_out (word->pattern);
371 ret = dict_set_int32 (dict, "flags", flags);
373 gf_log (THIS->name, GF_LOG_ERROR,
378 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_START_VOLUME];
380 CLI_LOCAL_INIT (local, words, frame, dict);
383 ret = proc->fn (frame, THIS, dict);
388 cli_cmd_sent_status_get (&sent);
389 if ((sent == 0) && (parse_error == 0))
390 cli_out ("Volume start failed");
393 CLI_STACK_DESTROY (frame);
399 cli_cmd_get_confirmation (struct cli_state *state, const char *question)
401 char answer[5] = {'\0', };
405 if (state->mode & GLUSTER_MODE_SCRIPT)
406 return GF_ANSWER_YES;
408 printf ("%s (y/n) ", question);
410 if (fgets (answer, 4, stdin) == NULL) {
411 cli_out("gluster cli read error");
415 len = strlen (answer);
417 if (len && answer [len - 1] == '\n'){
418 answer [--len] = '\0';
422 }while (flush != '\n');
428 if (!strcasecmp (answer, "y") || !strcasecmp (answer, "yes"))
429 return GF_ANSWER_YES;
431 else if (!strcasecmp (answer, "n") || !strcasecmp (answer, "no"))
435 cli_out ("Invalid input, please enter y/n");
441 cli_cmd_volume_stop_cbk (struct cli_state *state, struct cli_cmd_word *word,
442 const char **words, int wordcount)
445 rpc_clnt_procedure_t *proc = NULL;
446 call_frame_t *frame = NULL;
448 gf_answer_t answer = GF_ANSWER_NO;
452 char *volname = NULL;
453 cli_local_t *local = NULL;
455 const char *question = "Stopping volume will make its data inaccessible. "
456 "Do you want to continue?";
458 frame = create_frame (THIS, THIS->ctx->pool);
462 if (wordcount < 3 || wordcount > 4) {
463 cli_usage_out (word->pattern);
468 volname = (char*) words[2];
471 ret = dict_set_str (dict, "volname", volname);
473 gf_log (THIS->name, GF_LOG_ERROR, "dict set failed");
477 if (!strcmp (volname, GLUSTER_SHARED_STORAGE)) {
478 question = "Stopping the shared storage volume"
479 "(gluster_shared_storage), will affect features "
480 "like snapshot scheduler, geo-replication "
481 "and NFS-Ganesha. Do you still want to "
485 if (wordcount == 4) {
486 if (!strcmp("force", words[3])) {
487 flags |= GF_CLI_FLAG_OP_FORCE;
490 cli_usage_out (word->pattern);
496 ret = dict_set_int32 (dict, "flags", flags);
498 gf_log (THIS->name, GF_LOG_ERROR,
503 answer = cli_cmd_get_confirmation (state, question);
505 if (GF_ANSWER_NO == answer) {
510 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STOP_VOLUME];
512 CLI_LOCAL_INIT (local, words, frame, dict);
515 ret = proc->fn (frame, THIS, dict);
520 cli_cmd_sent_status_get (&sent);
521 if ((sent == 0) && (parse_error == 0))
522 cli_out ("Volume stop on '%s' failed", volname);
525 CLI_STACK_DESTROY (frame);
532 cli_cmd_volume_rename_cbk (struct cli_state *state, struct cli_cmd_word *word,
533 const char **words, int wordcount)
536 rpc_clnt_procedure_t *proc = NULL;
537 call_frame_t *frame = NULL;
543 frame = create_frame (THIS, THIS->ctx->pool);
551 if (wordcount != 4) {
552 cli_usage_out (word->pattern);
557 ret = dict_set_str (dict, "old-volname", (char *)words[2]);
562 ret = dict_set_str (dict, "new-volname", (char *)words[3]);
567 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RENAME_VOLUME];
570 ret = proc->fn (frame, THIS, dict);
578 cli_cmd_sent_status_get (&sent);
579 if ((sent == 0) && (parse_error == 0))
580 cli_out ("Volume rename on '%s' failed", (char *)words[2]);
583 CLI_STACK_DESTROY (frame);
589 cli_cmd_volume_defrag_cbk (struct cli_state *state, struct cli_cmd_word *word,
590 const char **words, int wordcount)
593 rpc_clnt_procedure_t *proc = NULL;
594 call_frame_t *frame = NULL;
598 cli_local_t *local = NULL;
599 #ifdef GF_SOLARIS_HOST_OS
600 cli_out ("Command not supported on Solaris");
604 frame = create_frame (THIS, THIS->ctx->pool);
608 ret = cli_cmd_volume_defrag_parse (words, wordcount, &dict);
611 cli_usage_out (word->pattern);
615 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DEFRAG_VOLUME];
617 CLI_LOCAL_INIT (local, words, frame, dict);
620 ret = proc->fn (frame, THIS, dict);
625 cli_cmd_sent_status_get (&sent);
626 if ((sent == 0) && (parse_error == 0))
627 cli_out ("Volume rebalance failed");
630 CLI_STACK_DESTROY (frame);
636 cli_cmd_volume_reset_cbk (struct cli_state *state, struct cli_cmd_word *word,
637 const char **words, int wordcount)
642 rpc_clnt_procedure_t *proc = NULL;
643 call_frame_t *frame = NULL;
644 dict_t *options = NULL;
645 cli_local_t *local = NULL;
647 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RESET_VOLUME];
649 frame = create_frame (THIS, THIS->ctx->pool);
653 ret = cli_cmd_volume_reset_parse (words, wordcount, &options);
655 cli_usage_out (word->pattern);
660 CLI_LOCAL_INIT (local, words, frame, options);
663 ret = proc->fn (frame, THIS, options);
668 cli_cmd_sent_status_get (&sent);
669 if ((sent == 0) && (parse_error == 0))
670 cli_out ("Volume reset failed");
673 CLI_STACK_DESTROY (frame);
680 cli_cmd_volume_profile_cbk (struct cli_state *state, struct cli_cmd_word *word,
681 const char **words, int wordcount)
687 rpc_clnt_procedure_t *proc = NULL;
688 call_frame_t *frame = NULL;
689 dict_t *options = NULL;
690 cli_local_t *local = NULL;
692 ret = cli_cmd_volume_profile_parse (words, wordcount, &options);
695 cli_usage_out (word->pattern);
700 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_PROFILE_VOLUME];
702 frame = create_frame (THIS, THIS->ctx->pool);
706 CLI_LOCAL_INIT (local, words, frame, options);
709 ret = proc->fn (frame, THIS, options);
714 cli_cmd_sent_status_get (&sent);
715 if ((sent == 0) && (parse_error == 0))
716 cli_out ("Volume profile failed");
719 CLI_STACK_DESTROY (frame);
726 cli_cmd_volume_set_cbk (struct cli_state *state, struct cli_cmd_word *word,
727 const char **words, int wordcount)
733 rpc_clnt_procedure_t *proc = NULL;
734 call_frame_t *frame = NULL;
735 dict_t *options = NULL;
736 cli_local_t *local = NULL;
737 char *op_errstr = NULL;
739 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SET_VOLUME];
741 frame = create_frame (THIS, THIS->ctx->pool);
745 ret = cli_cmd_volume_set_parse (state, words, wordcount,
746 &options, &op_errstr);
749 cli_err ("%s", op_errstr);
752 cli_usage_out (word->pattern);
758 CLI_LOCAL_INIT (local, words, frame, options);
761 ret = proc->fn (frame, THIS, options);
766 cli_cmd_sent_status_get (&sent);
767 if ((sent == 0) && (parse_error == 0))
768 cli_out ("Volume set failed");
771 CLI_STACK_DESTROY (frame);
778 cli_cmd_volume_add_brick_cbk (struct cli_state *state,
779 struct cli_cmd_word *word, const char **words,
783 rpc_clnt_procedure_t *proc = NULL;
784 call_frame_t *frame = NULL;
785 dict_t *options = NULL;
788 gf_answer_t answer = GF_ANSWER_NO;
789 cli_local_t *local = NULL;
791 const char *question = "Changing the 'stripe count' of the volume is "
792 "not a supported feature. In some cases it may result in data "
793 "loss on the volume. Also there may be issues with regular "
794 "filesystem operations on the volume after the change. Do you "
795 "really want to continue with 'stripe' count option ? ";
797 frame = create_frame (THIS, THIS->ctx->pool);
801 ret = cli_cmd_volume_add_brick_parse (words, wordcount, &options, 0);
803 cli_usage_out (word->pattern);
808 /* TODO: there are challenges in supporting changing of
809 stripe-count, until it is properly supported give warning to user */
810 if (dict_get (options, "stripe-count")) {
811 answer = cli_cmd_get_confirmation (state, question);
813 if (GF_ANSWER_NO == answer) {
819 if (state->mode & GLUSTER_MODE_WIGNORE) {
820 ret = dict_set_int32 (options, "force", _gf_true);
822 gf_log ("cli", GF_LOG_ERROR, "Failed to set force "
828 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_ADD_BRICK];
830 CLI_LOCAL_INIT (local, words, frame, options);
833 ret = proc->fn (frame, THIS, options);
838 cli_cmd_sent_status_get (&sent);
839 if ((sent == 0) && (parse_error == 0))
840 cli_out ("Volume add-brick failed");
843 CLI_STACK_DESTROY (frame);
849 cli_tier_validate_replica_type (dict_t *dict, int type)
852 int brick_count = -1;
853 int replica_count = 1;
856 ret = dict_get_int32 (dict, "count", &brick_count);
858 gf_log ("cli", GF_LOG_ERROR, "Failed to get brick count");
862 ret = dict_get_int32 (dict, "replica-count", &replica_count);
864 gf_log ("cli", GF_LOG_DEBUG, "Failed to get replica count. "
865 "Defaulting to one");
870 * Change the calculation of sub_count once attach-tier support
872 * sub_count = disperse_count for disperse volume
876 if (brick_count % replica_count) {
877 if (type == GF_CLUSTER_TYPE_REPLICATE)
878 cli_err ("number of bricks is not a multiple of "
880 else if (type == GF_CLUSTER_TYPE_DISPERSE)
881 cli_err ("number of bricks is not a multiple of "
884 cli_err ("number of bricks given doesn't match "
896 do_cli_cmd_volume_attach_tier (struct cli_state *state,
897 struct cli_cmd_word *word, const char **words,
901 rpc_clnt_procedure_t *proc = NULL;
902 call_frame_t *frame = NULL;
903 dict_t *options = NULL;
906 cli_local_t *local = NULL;
909 frame = create_frame (THIS, THIS->ctx->pool);
913 ret = cli_cmd_volume_add_brick_parse (words, wordcount, &options, &type);
915 cli_usage_out (word->pattern);
921 * Merge this check when attach-tier has it's own cli parse function.
923 ret = cli_tier_validate_replica_type (options, type);
925 cli_usage_out (word->pattern);
930 if (state->mode & GLUSTER_MODE_WIGNORE) {
931 ret = dict_set_int32 (options, "force", _gf_true);
933 gf_log ("cli", GF_LOG_ERROR, "Failed to set force "
939 ret = dict_set_int32 (options, "attach-tier", 1);
943 ret = dict_set_int32 (options, "hot-type", type);
947 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_ATTACH_TIER];
949 CLI_LOCAL_INIT (local, words, frame, options);
952 ret = proc->fn (frame, THIS, options);
957 cli_cmd_sent_status_get (&sent);
958 if ((sent == 0) && (parse_error == 0))
959 cli_out ("attach-tier failed");
962 CLI_STACK_DESTROY (frame);
968 do_cli_cmd_volume_detach_tier (struct cli_state *state,
969 struct cli_cmd_word *word, const char **words,
973 rpc_clnt_procedure_t *proc = NULL;
974 call_frame_t *frame = NULL;
975 dict_t *options = NULL;
978 gf_answer_t answer = GF_ANSWER_NO;
979 cli_local_t *local = NULL;
980 int need_question = 0;
982 const char *question = "Removing tier can result in data loss. "
983 "Do you want to Continue?";
985 frame = create_frame (THIS, THIS->ctx->pool);
989 ret = cli_cmd_volume_detach_tier_parse(words, wordcount, &options,
992 cli_usage_out (word->pattern);
997 ret = dict_set_int32 (options, "force", 1);
1001 ret = dict_set_int32 (options, "count", 0);
1005 if (!(state->mode & GLUSTER_MODE_SCRIPT) && need_question) {
1006 /* we need to ask question only in case of 'commit or force' */
1007 answer = cli_cmd_get_confirmation (state, question);
1008 if (GF_ANSWER_NO == answer) {
1014 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DETACH_TIER];
1016 CLI_LOCAL_INIT (local, words, frame, options);
1019 ret = proc->fn (frame, THIS, options);
1024 cli_cmd_sent_status_get (&sent);
1025 if ((sent == 0) && (parse_error == 0))
1026 cli_out ("Volume detach-tier failed");
1029 CLI_STACK_DESTROY (frame);
1035 cli_cmd_volume_tier_cbk (struct cli_state *state,
1036 struct cli_cmd_word *word, const char **words,
1040 call_frame_t *frame = NULL;
1041 dict_t *options = NULL;
1042 char *volname = NULL;
1043 rpc_clnt_procedure_t *proc = NULL;
1044 cli_local_t *local = NULL;
1047 if (wordcount < 4) {
1048 cli_usage_out (word->pattern);
1049 if (wordcount == 3 && !strcmp(words[2], "help"))
1054 if (!strcmp(words[1], "detach-tier")) {
1055 ret = do_cli_cmd_volume_detach_tier (state, word,
1058 } else if (!strcmp(words[3], "detach")) {
1059 for (i = 3; i < wordcount; i++)
1060 words[i] = words[i+1];
1062 ret = do_cli_cmd_volume_detach_tier (state, word,
1063 words, wordcount-1);
1066 } else if (!strcmp(words[1], "attach-tier")) {
1067 ret = do_cli_cmd_volume_attach_tier (state, word,
1070 } else if (!strcmp(words[3], "attach")) {
1071 for (i = 3; i < wordcount; i++)
1072 words[i] = words[i+1];
1074 ret = do_cli_cmd_volume_attach_tier (state, word,
1075 words, wordcount-1);
1079 ret = cli_cmd_volume_tier_parse (words, wordcount, &options);
1081 cli_usage_out (word->pattern);
1085 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_TIER];
1087 frame = create_frame (THIS, THIS->ctx->pool);
1091 CLI_LOCAL_INIT (local, words, frame, options);
1094 ret = proc->fn (frame, THIS, options);
1099 cli_out ("Tier command failed");
1102 dict_unref (options);
1108 gf_cli_create_auxiliary_mount (char *volname)
1111 char mountdir[PATH_MAX] = {0,};
1112 char pidfile_path[PATH_MAX] = {0,};
1113 char logfile[PATH_MAX] = {0,};
1114 char qpid [16] = {0,};
1116 GLUSTERFS_GET_AUX_MOUNT_PIDFILE (pidfile_path, volname);
1118 if (gf_is_service_running (pidfile_path, NULL)) {
1119 gf_log ("cli", GF_LOG_DEBUG, "Aux mount of volume %s is running"
1120 " already", volname);
1125 GLUSTERD_GET_QUOTA_AUX_MOUNT_PATH (mountdir, volname, "/");
1126 ret = mkdir (mountdir, 0777);
1127 if (ret && errno != EEXIST) {
1128 gf_log ("cli", GF_LOG_ERROR, "Failed to create auxiliary mount "
1129 "directory %s. Reason : %s", mountdir,
1134 snprintf (logfile, PATH_MAX-1, "%s/quota-mount-%s.log",
1135 DEFAULT_LOG_FILE_DIRECTORY, volname);
1136 snprintf(qpid, 15, "%d", GF_CLIENT_PID_QUOTA_MOUNT);
1138 ret = runcmd (SBIN_DIR"/glusterfs",
1140 "--volfile-id", volname,
1143 "--client-pid", qpid,
1148 gf_log ("cli", GF_LOG_WARNING, "failed to mount glusterfs "
1149 "client. Please check the log file %s for more details",
1162 cli_stage_quota_op (char *volname, int op_code)
1167 case GF_QUOTA_OPTION_TYPE_ENABLE:
1168 case GF_QUOTA_OPTION_TYPE_LIMIT_USAGE:
1169 case GF_QUOTA_OPTION_TYPE_LIMIT_OBJECTS:
1170 case GF_QUOTA_OPTION_TYPE_REMOVE:
1171 case GF_QUOTA_OPTION_TYPE_REMOVE_OBJECTS:
1172 case GF_QUOTA_OPTION_TYPE_LIST:
1173 ret = gf_cli_create_auxiliary_mount (volname);
1175 cli_err ("quota: Could not start quota "
1192 cli_get_soft_limit (dict_t *options, const char **words, dict_t *xdata)
1194 call_frame_t *frame = NULL;
1195 cli_local_t *local = NULL;
1196 rpc_clnt_procedure_t *proc = NULL;
1197 char *default_sl = NULL;
1198 char *default_sl_dup = NULL;
1201 frame = create_frame (THIS, THIS->ctx->pool);
1207 //We need a ref on @options to prevent CLI_STACK_DESTROY
1208 //from destroying it prematurely.
1210 CLI_LOCAL_INIT (local, words, frame, options);
1211 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA];
1212 ret = proc->fn (frame, THIS, options);
1214 ret = dict_get_str (options, "default-soft-limit", &default_sl);
1216 gf_log ("cli", GF_LOG_ERROR, "Failed to get default soft limit");
1220 default_sl_dup = gf_strdup (default_sl);
1221 if (!default_sl_dup) {
1226 ret = dict_set_dynstr (xdata, "default-soft-limit", default_sl_dup);
1228 gf_log ("cli", GF_LOG_ERROR, "Failed to set default soft limit");
1229 GF_FREE (default_sl_dup);
1234 CLI_STACK_DESTROY (frame);
1238 /* Checks if at least one limit has been set on the volume
1240 * Returns true if at least one limit is set. Returns false otherwise.
1243 _limits_set_on_volume (char *volname, int type) {
1244 gf_boolean_t limits_set = _gf_false;
1246 char quota_conf_file[PATH_MAX] = {0,};
1248 char buf[16] = {0,};
1249 float version = 0.0f;
1250 char gfid_type_stored = 0;
1253 /* TODO: fix hardcoding; Need to perform an RPC call to glusterd
1254 * to fetch working directory
1256 sprintf (quota_conf_file, "%s/vols/%s/quota.conf",
1257 GLUSTERD_DEFAULT_WORKDIR,
1259 fd = open (quota_conf_file, O_RDONLY);
1263 ret = quota_conf_read_version (fd, &version);
1267 if (type == GF_QUOTA_OPTION_TYPE_LIST)
1268 gfid_type = GF_QUOTA_CONF_TYPE_USAGE;
1270 gfid_type = GF_QUOTA_CONF_TYPE_OBJECTS;
1272 /* Try to read atleast one gfid of type 'gfid_type' */
1274 ret = quota_conf_read_gfid (fd, buf, &gfid_type_stored,
1279 if (gfid_type_stored == gfid_type) {
1280 limits_set = _gf_true;
1291 /* Checks if the mount is connected to the bricks
1293 * Returns true if connected and false if not
1296 _quota_aux_mount_online (char *volname)
1299 char mount_path[PATH_MAX + 1] = {0,};
1300 struct stat buf = {0,};
1302 GF_ASSERT (volname);
1304 /* Try to create the aux mount before checking if bricks are online */
1305 ret = gf_cli_create_auxiliary_mount (volname);
1307 cli_err ("quota: Could not start quota auxiliary mount");
1311 GLUSTERD_GET_QUOTA_AUX_MOUNT_PATH (mount_path, volname, "/");
1313 ret = sys_stat (mount_path, &buf);
1315 if (ENOTCONN == errno) {
1316 cli_err ("quota: Cannot connect to bricks. Check if "
1317 "bricks are online.");
1319 cli_err ("quota: Error on quota auxiliary mount (%s).",
1328 cli_cmd_quota_handle_list_all (const char **words, dict_t *options)
1333 rpc_clnt_procedure_t *proc = NULL;
1334 cli_local_t *local = NULL;
1335 call_frame_t *frame = NULL;
1336 dict_t *xdata = NULL;
1337 char *gfid_str = NULL;
1338 char *volname = NULL;
1339 char *volname_dup = NULL;
1340 unsigned char buf[16] = {0};
1342 char quota_conf_file[PATH_MAX] = {0};
1343 gf_boolean_t xml_err_flag = _gf_false;
1344 char err_str[NAME_MAX] = {0,};
1347 float version = 0.0f;
1349 xdata = dict_new ();
1355 ret = dict_get_str (options, "volname", &volname);
1357 gf_log ("cli", GF_LOG_ERROR, "Failed to get volume name");
1361 ret = dict_get_int32 (options, "type", &type);
1363 gf_log ("cli", GF_LOG_ERROR, "Failed to get quota option type");
1367 ret = dict_set_int32 (xdata, "type", type);
1369 gf_log ("cli", GF_LOG_ERROR, "Failed to set type in xdata");
1373 ret = cli_get_soft_limit (options, words, xdata);
1375 gf_log ("cli", GF_LOG_ERROR, "Failed to fetch default "
1380 /* Check if at least one limit is set on volume. No need to check for
1381 * quota enabled as cli_get_soft_limit() handles that
1383 if (!_limits_set_on_volume (volname, type)) {
1384 snprintf (err_str, sizeof (err_str), "No%s quota configured on"
1386 (type == GF_QUOTA_OPTION_TYPE_LIST) ? "" : " inode",
1388 if (global_state->mode & GLUSTER_MODE_XML) {
1389 xml_err_flag = _gf_true;
1391 cli_out ("quota: %s", err_str);
1397 /* Check if the mount is online before doing any listing */
1398 if (!_quota_aux_mount_online (volname)) {
1403 frame = create_frame (THIS, THIS->ctx->pool);
1409 volname_dup = gf_strdup (volname);
1415 ret = dict_set_dynstr (xdata, "volume-uuid", volname_dup);
1417 gf_log ("cli", GF_LOG_ERROR, "Failed to set volume-uuid");
1418 GF_FREE (volname_dup);
1422 //TODO: fix hardcoding; Need to perform an RPC call to glusterd
1423 //to fetch working directory
1424 sprintf (quota_conf_file, "%s/vols/%s/quota.conf",
1425 GLUSTERD_DEFAULT_WORKDIR,
1427 fd = open (quota_conf_file, O_RDONLY);
1429 //This may because no limits were yet set on the volume
1430 gf_log ("cli", GF_LOG_TRACE, "Unable to open "
1436 ret = quota_conf_read_version (fd, &version);
1440 CLI_LOCAL_INIT (local, words, frame, xdata);
1441 proc = &cli_quotad_clnt.proctable[GF_AGGREGATOR_GETLIMIT];
1443 gfid_str = GF_CALLOC (1, gf_common_mt_char, 64);
1448 for (count = 0;; count++) {
1449 ret = quota_conf_read_gfid (fd, buf, &gfid_type, version);
1452 } else if (ret < 0) {
1453 gf_log (THIS->name, GF_LOG_CRITICAL, "Quota "
1454 "configuration store may be corrupt.");
1458 if ((type == GF_QUOTA_OPTION_TYPE_LIST &&
1459 gfid_type == GF_QUOTA_CONF_TYPE_OBJECTS) ||
1460 (type == GF_QUOTA_OPTION_TYPE_LIST_OBJECTS &&
1461 gfid_type == GF_QUOTA_CONF_TYPE_USAGE))
1464 uuid_utoa_r (buf, gfid_str);
1465 ret = dict_set_str (xdata, "gfid", gfid_str);
1467 gf_log ("cli", GF_LOG_ERROR, "Failed to set gfid");
1471 ret = proc->fn (frame, THIS, xdata);
1473 gf_log ("cli", GF_LOG_ERROR, "Failed to get quota "
1474 "limits for %s", uuid_utoa ((unsigned char*)buf));
1477 dict_del (xdata, "gfid");
1478 all_failed = all_failed && ret;
1481 if (global_state->mode & GLUSTER_MODE_XML) {
1482 ret = cli_xml_output_vol_quota_limit_list_end (local);
1484 gf_log ("cli", GF_LOG_ERROR, "Error in printing "
1491 ret = all_failed? -1: 0;
1499 ret = cli_xml_output_str ("volQuota", NULL, -1, 0, err_str);
1501 gf_log ("cli", GF_LOG_ERROR, "Error outputting in "
1512 gf_log ("cli", GF_LOG_ERROR, "Could not fetch and display quota"
1515 CLI_STACK_DESTROY (frame);
1520 cli_cmd_bitrot_cbk (struct cli_state *state, struct cli_cmd_word *word,
1521 const char **words, int wordcount)
1526 call_frame_t *frame = NULL;
1527 dict_t *options = NULL;
1528 cli_local_t *local = NULL;
1529 rpc_clnt_procedure_t *proc = NULL;
1532 ret = cli_cmd_bitrot_parse (words, wordcount, &options);
1534 cli_usage_out (word->pattern);
1539 frame = create_frame (THIS, THIS->ctx->pool);
1545 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_BITROT];
1551 CLI_LOCAL_INIT (local, words, frame, options);
1554 ret = proc->fn (frame, THIS, options);
1559 cli_cmd_sent_status_get (&sent);
1560 if ((sent == 0) && (parse_err == 0))
1561 cli_err ("Bit rot command failed. Please check the cli "
1562 "logs for more details");
1566 CLI_STACK_DESTROY (frame);
1572 cli_cmd_quota_cbk (struct cli_state *state, struct cli_cmd_word *word,
1573 const char **words, int wordcount)
1579 rpc_clnt_procedure_t *proc = NULL;
1580 call_frame_t *frame = NULL;
1581 dict_t *options = NULL;
1582 gf_answer_t answer = GF_ANSWER_NO;
1583 cli_local_t *local = NULL;
1585 char *volname = NULL;
1586 const char *question = "Disabling quota will delete all the quota "
1587 "configuration. Do you want to continue?";
1589 //parse **words into options dictionary
1590 if (strcmp (words[1], "inode-quota") == 0) {
1591 ret = cli_cmd_inode_quota_parse (words, wordcount, &options);
1593 cli_usage_out (word->pattern);
1598 ret = cli_cmd_quota_parse (words, wordcount, &options);
1600 cli_usage_out (word->pattern);
1606 ret = dict_get_int32 (options, "type", &type);
1608 gf_log ("cli", GF_LOG_ERROR, "Failed to get opcode");
1612 //handle quota-disable and quota-list-all different from others
1614 case GF_QUOTA_OPTION_TYPE_DISABLE:
1615 answer = cli_cmd_get_confirmation (state, question);
1616 if (answer == GF_ANSWER_NO)
1619 case GF_QUOTA_OPTION_TYPE_LIST:
1620 case GF_QUOTA_OPTION_TYPE_LIST_OBJECTS:
1623 ret = cli_cmd_quota_handle_list_all (words, options);
1629 ret = dict_get_str (options, "volname", &volname);
1631 gf_log ("cli", GF_LOG_ERROR, "Failed to get volume name");
1635 //create auxiliary mount need for quota commands that operate on path
1636 ret = cli_stage_quota_op (volname, type);
1640 frame = create_frame (THIS, THIS->ctx->pool);
1646 CLI_LOCAL_INIT (local, words, frame, options);
1647 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA];
1650 ret = proc->fn (frame, THIS, options);
1654 cli_cmd_sent_status_get (&sent);
1655 if (sent == 0 && parse_err == 0)
1656 cli_out ("Quota command failed. Please check the cli "
1657 "logs for more details");
1660 CLI_STACK_DESTROY (frame);
1665 cli_cmd_volume_remove_brick_cbk (struct cli_state *state,
1666 struct cli_cmd_word *word, const char **words,
1670 rpc_clnt_procedure_t *proc = NULL;
1671 call_frame_t *frame = NULL;
1672 dict_t *options = NULL;
1673 gf_answer_t answer = GF_ANSWER_NO;
1675 int parse_error = 0;
1676 int need_question = 0;
1677 cli_local_t *local = NULL;
1678 char *volname = NULL;
1680 const char *question = "Removing brick(s) can result in data loss. "
1681 "Do you want to Continue?";
1683 frame = create_frame (THIS, THIS->ctx->pool);
1687 ret = cli_cmd_volume_remove_brick_parse (words, wordcount, &options,
1690 cli_usage_out (word->pattern);
1695 ret = dict_get_str (options, "volname", &volname);
1696 if (ret || !volname) {
1697 gf_log ("cli", GF_LOG_ERROR, "Failed to fetch volname");
1702 if (!strcmp (volname, GLUSTER_SHARED_STORAGE)) {
1703 question = "Removing brick from the shared storage volume"
1704 "(gluster_shared_storage), will affect features "
1705 "like snapshot scheduler, geo-replication "
1706 "and NFS-Ganesha. Do you still want to "
1708 need_question = _gf_true;
1711 if (!(state->mode & GLUSTER_MODE_SCRIPT) && need_question) {
1712 /* we need to ask question only in case of 'commit or force' */
1713 answer = cli_cmd_get_confirmation (state, question);
1714 if (GF_ANSWER_NO == answer) {
1720 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REMOVE_BRICK];
1722 CLI_LOCAL_INIT (local, words, frame, options);
1725 ret = proc->fn (frame, THIS, options);
1730 cli_cmd_sent_status_get (&sent);
1731 if ((sent == 0) && (parse_error == 0))
1732 cli_out ("Volume remove-brick failed");
1735 CLI_STACK_DESTROY (frame);
1742 cli_cmd_volume_replace_brick_cbk (struct cli_state *state,
1743 struct cli_cmd_word *word,
1748 rpc_clnt_procedure_t *proc = NULL;
1749 call_frame_t *frame = NULL;
1750 dict_t *options = NULL;
1752 int parse_error = 0;
1753 cli_local_t *local = NULL;
1755 #ifdef GF_SOLARIS_HOST_OS
1756 cli_out ("Command not supported on Solaris");
1759 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REPLACE_BRICK];
1761 frame = create_frame (THIS, THIS->ctx->pool);
1765 ret = cli_cmd_volume_replace_brick_parse (words, wordcount, &options);
1768 cli_usage_out (word->pattern);
1773 CLI_LOCAL_INIT (local, words, frame, options);
1776 ret = proc->fn (frame, THIS, options);
1781 cli_cmd_sent_status_get (&sent);
1782 if ((sent == 0) && (parse_error == 0))
1783 cli_out ("Volume replace-brick failed");
1786 CLI_STACK_DESTROY (frame);
1793 cli_cmd_volume_set_transport_cbk (struct cli_state *state,
1794 struct cli_cmd_word *word,
1795 const char **words, int wordcount)
1797 cli_cmd_broadcast_response (0);
1802 cli_cmd_volume_top_cbk (struct cli_state *state, struct cli_cmd_word *word,
1803 const char **words, int wordcount)
1807 rpc_clnt_procedure_t *proc = NULL;
1808 call_frame_t *frame = NULL;
1809 dict_t *options = NULL;
1811 int parse_error = 0;
1812 cli_local_t *local = NULL;
1814 ret = cli_cmd_volume_top_parse (words, wordcount, &options);
1818 cli_usage_out (word->pattern);
1822 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_TOP_VOLUME];
1824 frame = create_frame (THIS, THIS->ctx->pool);
1828 CLI_LOCAL_INIT (local, words, frame, options);
1831 ret = proc->fn (frame, THIS, options);
1836 cli_cmd_sent_status_get (&sent);
1837 if ((sent == 0) && (parse_error == 0))
1838 cli_out ("Volume top failed");
1841 CLI_STACK_DESTROY (frame);
1849 cli_cmd_log_rotate_cbk (struct cli_state *state, struct cli_cmd_word *word,
1850 const char **words, int wordcount)
1853 rpc_clnt_procedure_t *proc = NULL;
1854 call_frame_t *frame = NULL;
1855 dict_t *options = NULL;
1857 int parse_error = 0;
1858 cli_local_t *local = NULL;
1860 if (!((wordcount == 4) || (wordcount == 5))) {
1861 cli_usage_out (word->pattern);
1866 if (!((strcmp ("rotate", words[2]) == 0) ||
1867 (strcmp ("rotate", words[3]) == 0))) {
1868 cli_usage_out (word->pattern);
1873 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LOG_ROTATE];
1875 frame = create_frame (THIS, THIS->ctx->pool);
1879 ret = cli_cmd_log_rotate_parse (words, wordcount, &options);
1883 CLI_LOCAL_INIT (local, words, frame, options);
1886 ret = proc->fn (frame, THIS, options);
1891 cli_cmd_sent_status_get (&sent);
1892 if ((sent == 0) && (parse_error == 0))
1893 cli_out ("Volume log rotate failed");
1895 CLI_STACK_DESTROY (frame);
1900 #if (SYNCDAEMON_COMPILE)
1902 cli_check_gsync_present ()
1904 char buff[PATH_MAX] = {0, };
1905 runner_t runner = {0,};
1909 ret = setenv ("_GLUSTERD_CALLED_", "1", 1);
1911 gf_log ("", GF_LOG_WARNING, "setenv syscall failed, hence could"
1912 "not assert if geo-replication is installed");
1917 runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "--version", NULL);
1918 runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
1919 ret = runner_start (&runner);
1921 gf_log ("", GF_LOG_INFO, "geo-replication not installed");
1925 ptr = fgets(buff, sizeof(buff), runner_chio (&runner, STDOUT_FILENO));
1927 if (!strstr (buff, "gsyncd")) {
1936 ret = runner_end (&runner);
1939 gf_log ("", GF_LOG_ERROR, "geo-replication not installed");
1942 gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
1943 return ret ? -1 : 0;
1948 cli_cmd_check_gsync_exists_cbk (struct cli_cmd *this)
1953 ret = cli_check_gsync_present ();
1955 this->disable = _gf_true;
1961 cli_cmd_volume_gsync_set_cbk (struct cli_state *state, struct cli_cmd_word *word,
1962 const char **words, int wordcount)
1966 dict_t *options = NULL;
1967 rpc_clnt_procedure_t *proc = NULL;
1968 call_frame_t *frame = NULL;
1969 cli_local_t *local = NULL;
1971 proc = &cli_rpc_prog->proctable [GLUSTER_CLI_GSYNC_SET];
1973 frame = create_frame (THIS, THIS->ctx->pool);
1974 if (frame == NULL) {
1979 ret = cli_cmd_gsync_set_parse (words, wordcount, &options);
1981 cli_usage_out (word->pattern);
1986 CLI_LOCAL_INIT (local, words, frame, options);
1989 ret = proc->fn (frame, THIS, options);
1992 if (ret && parse_err == 0)
1993 cli_out (GEOREP" command failed");
1995 CLI_STACK_DESTROY (frame);
2001 cli_cmd_volume_status_cbk (struct cli_state *state,
2002 struct cli_cmd_word *word,
2003 const char **words, int wordcount)
2006 rpc_clnt_procedure_t *proc = NULL;
2007 call_frame_t *frame = NULL;
2008 dict_t *dict = NULL;
2010 cli_local_t *local = NULL;
2012 ret = cli_cmd_volume_status_parse (words, wordcount, &dict);
2015 cli_usage_out (word->pattern);
2019 ret = dict_get_uint32 (dict, "cmd", &cmd);
2023 if (!(cmd & GF_CLI_STATUS_ALL)) {
2024 /* for one volume or brick */
2025 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STATUS_VOLUME];
2027 /* volume status all or all detail */
2028 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STATUS_ALL];
2034 frame = create_frame (THIS, THIS->ctx->pool);
2038 CLI_LOCAL_INIT (local, words, frame, dict);
2040 ret = proc->fn (frame, THIS, dict);
2043 CLI_STACK_DESTROY (frame);
2050 cli_get_detail_status (dict_t *dict, int i, cli_volume_status_t *status)
2054 char key[1024] = {0};
2057 memset (key, 0, sizeof (key));
2058 snprintf (key, sizeof (key), "brick%d.free", i);
2059 ret = dict_get_uint64 (dict, key, &free);
2061 status->free = gf_uint64_2human_readable (free);
2065 memset (key, 0, sizeof (key));
2066 snprintf (key, sizeof (key), "brick%d.total", i);
2067 ret = dict_get_uint64 (dict, key, &total);
2069 status->total = gf_uint64_2human_readable (total);
2073 #ifdef GF_LINUX_HOST_OS
2074 memset (key, 0, sizeof (key));
2075 snprintf (key, sizeof (key), "brick%d.device", i);
2076 ret = dict_get_str (dict, key, &(status->device));
2078 status->device = NULL;
2081 memset (key, 0, sizeof (key));
2082 snprintf (key, sizeof (key), "brick%d.block_size", i);
2083 ret = dict_get_uint64 (dict, key, &(status->block_size));
2086 status->block_size = 0;
2089 #ifdef GF_LINUX_HOST_OS
2090 memset (key, 0, sizeof (key));
2091 snprintf (key, sizeof (key), "brick%d.mnt_options", i);
2092 ret = dict_get_str (dict, key, &(status->mount_options));
2094 status->mount_options = NULL;
2096 memset (key, 0, sizeof (key));
2097 snprintf (key, sizeof (key), "brick%d.fs_name", i);
2098 ret = dict_get_str (dict, key, &(status->fs_name));
2101 status->fs_name = NULL;
2104 memset (key, 0, sizeof (key));
2105 snprintf (key, sizeof (key), "brick%d.inode_size", i);
2106 ret = dict_get_str (dict, key, &(status->inode_size));
2108 status->inode_size = NULL;
2109 #endif /* GF_LINUX_HOST_OS */
2111 memset (key, 0, sizeof (key));
2112 snprintf (key, sizeof (key), "brick%d.total_inodes", i);
2113 ret = dict_get_uint64 (dict, key,
2114 &(status->total_inodes));
2116 status->total_inodes = 0;
2118 memset (key, 0, sizeof (key));
2119 snprintf (key, sizeof (key), "brick%d.free_inodes", i);
2120 ret = dict_get_uint64 (dict, key, &(status->free_inodes));
2123 status->free_inodes = 0;
2132 cli_print_detailed_status (cli_volume_status_t *status)
2134 cli_out ("%-20s : %-20s", "Brick", status->brick);
2136 if (status->online) {
2137 cli_out ("%-20s : %-20d", "TCP Port", status->port);
2138 cli_out ("%-20s : %-20d", "RDMA Port", status->rdma_port);
2140 cli_out ("%-20s : %-20s", "TCP Port", "N/A");
2141 cli_out ("%-20s : %-20s", "RDMA Port", "N/A");
2144 cli_out ("%-20s : %-20c", "Online", (status->online) ? 'Y' : 'N');
2145 cli_out ("%-20s : %-20s", "Pid", status->pid_str);
2147 #ifdef GF_LINUX_HOST_OS
2148 if (status->fs_name)
2149 cli_out ("%-20s : %-20s", "File System", status->fs_name);
2151 cli_out ("%-20s : %-20s", "File System", "N/A");
2154 cli_out ("%-20s : %-20s", "Device", status->device);
2156 cli_out ("%-20s : %-20s", "Device", "N/A");
2158 if (status->mount_options) {
2159 cli_out ("%-20s : %-20s", "Mount Options",
2160 status->mount_options);
2162 cli_out ("%-20s : %-20s", "Mount Options", "N/A");
2165 if (status->inode_size) {
2166 cli_out ("%-20s : %-20s", "Inode Size",
2167 status->inode_size);
2169 cli_out ("%-20s : %-20s", "Inode Size", "N/A");
2173 cli_out ("%-20s : %-20s", "Disk Space Free", status->free);
2175 cli_out ("%-20s : %-20s", "Disk Space Free", "N/A");
2178 cli_out ("%-20s : %-20s", "Total Disk Space", status->total);
2180 cli_out ("%-20s : %-20s", "Total Disk Space", "N/A");
2183 if (status->total_inodes) {
2184 cli_out ("%-20s : %-20"GF_PRI_INODE, "Inode Count",
2185 status->total_inodes);
2187 cli_out ("%-20s : %-20s", "Inode Count", "N/A");
2190 if (status->free_inodes) {
2191 cli_out ("%-20s : %-20"GF_PRI_INODE, "Free Inodes",
2192 status->free_inodes);
2194 cli_out ("%-20s : %-20s", "Free Inodes", "N/A");
2199 cli_print_brick_status (cli_volume_status_t *status)
2201 int fieldlen = CLI_VOL_STATUS_BRICK_LEN;
2207 bricklen = strlen (p);
2208 while (bricklen > 0) {
2209 if (bricklen > fieldlen) {
2210 cli_out ("%.*s", fieldlen, p);
2212 bricklen -= fieldlen;
2214 num_spaces = (fieldlen - bricklen) + 1;
2216 while (num_spaces-- != 0)
2218 if (status->port || status->rdma_port) {
2220 cli_out ("%-10d%-11d%-8c%-5s",
2223 status->online?'Y':'N',
2226 cli_out ("%-10s%-11s%-8c%-5s",
2229 status->online?'Y':'N',
2233 cli_out ("%-10s%-11s%-8c%-5s",
2234 "N/A", "N/A", status->online?'Y':'N',
2243 #define NEEDS_GLFS_HEAL(op) ((op == GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE) || \
2244 (op == GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) || \
2245 (op == GF_SHD_OP_INDEX_SUMMARY) || \
2246 (op == GF_SHD_OP_SPLIT_BRAIN_FILES))
2249 cli_launch_glfs_heal (int heal_op, dict_t *options)
2251 char buff[PATH_MAX] = {0};
2252 runner_t runner = {0};
2253 char *filename = NULL;
2254 char *hostname = NULL;
2256 char *volname = NULL;
2261 ret = dict_get_str (options, "volname", &volname);
2262 runner_add_args (&runner, SBIN_DIR"/glfsheal", volname, NULL);
2263 runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
2266 case GF_SHD_OP_INDEX_SUMMARY:
2268 case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:
2269 ret = dict_get_str (options, "file", &filename);
2270 runner_add_args (&runner, "bigger-file", filename, NULL);
2272 case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:
2273 ret = dict_get_str (options, "heal-source-hostname",
2275 ret = dict_get_str (options, "heal-source-brickpath",
2277 runner_add_args (&runner, "source-brick", NULL);
2278 runner_argprintf (&runner, "%s:%s", hostname, path);
2279 if (dict_get_str (options, "file", &filename) == 0)
2280 runner_argprintf (&runner, filename);
2282 case GF_SHD_OP_SPLIT_BRAIN_FILES:
2283 runner_add_args (&runner, "split-brain-info", NULL);
2288 ret = runner_start (&runner);
2291 while ((out = fgets (buff, sizeof(buff),
2292 runner_chio (&runner, STDOUT_FILENO)))) {
2295 ret = runner_end (&runner);
2296 ret = WEXITSTATUS (ret);
2302 cli_cmd_volume_heal_cbk (struct cli_state *state, struct cli_cmd_word *word,
2303 const char **words, int wordcount)
2306 rpc_clnt_procedure_t *proc = NULL;
2307 call_frame_t *frame = NULL;
2309 int parse_error = 0;
2310 dict_t *options = NULL;
2311 xlator_t *this = NULL;
2312 cli_local_t *local = NULL;
2316 frame = create_frame (this, this->ctx->pool);
2320 if (wordcount < 3) {
2321 cli_usage_out (word->pattern);
2326 ret = cli_cmd_volume_heal_options_parse (words, wordcount, &options);
2328 cli_usage_out (word->pattern);
2332 ret = dict_get_int32 (options, "heal-op", &heal_op);
2335 if (NEEDS_GLFS_HEAL (heal_op)) {
2336 ret = cli_launch_glfs_heal (heal_op, options);
2341 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_HEAL_VOLUME];
2343 CLI_LOCAL_INIT (local, words, frame, options);
2346 ret = proc->fn (frame, THIS, options);
2352 cli_cmd_sent_status_get (&sent);
2353 if ((sent == 0) && (parse_error == 0))
2354 cli_out ("Volume heal failed.");
2357 CLI_STACK_DESTROY (frame);
2363 cli_cmd_volume_statedump_cbk (struct cli_state *state, struct cli_cmd_word *word,
2364 const char **words, int wordcount)
2367 rpc_clnt_procedure_t *proc = NULL;
2368 call_frame_t *frame = NULL;
2369 dict_t *options = NULL;
2371 int parse_error = 0;
2372 cli_local_t *local = NULL;
2374 frame = create_frame (THIS, THIS->ctx->pool);
2378 if (wordcount < 3) {
2379 cli_usage_out (word->pattern);
2384 if (wordcount >= 3) {
2385 ret = cli_cmd_volume_statedump_options_parse (words, wordcount,
2389 gf_log ("cli", GF_LOG_ERROR, "Error parsing "
2390 "statedump options");
2391 cli_out ("Error parsing options");
2392 cli_usage_out (word->pattern);
2396 ret = dict_set_str (options, "volname", (char *)words[2]);
2400 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STATEDUMP_VOLUME];
2402 CLI_LOCAL_INIT (local, words, frame, options);
2405 ret = proc->fn (frame, THIS, options);
2410 cli_cmd_sent_status_get (&sent);
2411 if ((sent == 0) && (parse_error == 0))
2412 cli_out ("Volume statedump failed");
2415 CLI_STACK_DESTROY (frame);
2421 cli_cmd_volume_list_cbk (struct cli_state *state, struct cli_cmd_word *word,
2422 const char **words, int wordcount)
2425 call_frame_t *frame = NULL;
2426 rpc_clnt_procedure_t *proc = NULL;
2429 frame = create_frame (THIS, THIS->ctx->pool);
2433 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LIST_VOLUME];
2435 ret = proc->fn (frame, THIS, NULL);
2440 cli_cmd_sent_status_get (&sent);
2442 cli_out ("Volume list failed");
2445 CLI_STACK_DESTROY (frame);
2451 cli_cmd_volume_clearlocks_cbk (struct cli_state *state,
2452 struct cli_cmd_word *word,
2453 const char **words, int wordcount)
2456 rpc_clnt_procedure_t *proc = NULL;
2457 call_frame_t *frame = NULL;
2458 dict_t *options = NULL;
2460 int parse_error = 0;
2461 cli_local_t *local = NULL;
2463 frame = create_frame (THIS, THIS->ctx->pool);
2467 if (wordcount < 7 || wordcount > 8) {
2468 cli_usage_out (word->pattern);
2473 ret = cli_cmd_volume_clrlks_opts_parse (words, wordcount, &options);
2476 gf_log ("cli", GF_LOG_ERROR, "Error parsing "
2477 "clear-locks options");
2478 cli_out ("Error parsing options");
2479 cli_usage_out (word->pattern);
2482 ret = dict_set_str (options, "volname", (char *)words[2]);
2486 ret = dict_set_str (options, "path", (char *)words[3]);
2490 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CLRLOCKS_VOLUME];
2492 CLI_LOCAL_INIT (local, words, frame, options);
2495 ret = proc->fn (frame, THIS, options);
2500 cli_cmd_sent_status_get (&sent);
2501 if ((sent == 0) && (parse_error == 0))
2502 cli_out ("Volume clear-locks failed");
2505 CLI_STACK_DESTROY (frame);
2511 cli_cmd_volume_barrier_cbk (struct cli_state *state, struct cli_cmd_word *word,
2512 const char **words, int wordcount)
2515 rpc_clnt_procedure_t *proc = NULL;
2516 call_frame_t *frame = NULL;
2517 dict_t *options = NULL;
2519 int parse_error = 0;
2520 cli_local_t *local = NULL;
2522 frame = create_frame (THIS, THIS->ctx->pool);
2526 if (wordcount != 4) {
2527 cli_usage_out (word->pattern);
2532 options = dict_new();
2537 ret = dict_set_str(options, "volname", (char *)words[2]);
2541 ret = dict_set_str (options, "barrier", (char *)words[3]);
2545 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_BARRIER_VOLUME];
2547 CLI_LOCAL_INIT (local, words, frame, options);
2550 ret = proc->fn (frame, THIS, options);
2554 cli_cmd_sent_status_get (&sent);
2555 if ((sent == 0) && (parse_error == 0))
2556 cli_err ("Volume barrier failed");
2558 CLI_STACK_DESTROY (frame);
2560 dict_unref (options);
2566 cli_cmd_volume_getopt_cbk (struct cli_state *state, struct cli_cmd_word *word,
2567 const char **words, int wordcount)
2570 rpc_clnt_procedure_t *proc = NULL;
2571 call_frame_t *frame = NULL;
2572 dict_t *options = NULL;
2575 cli_local_t *local = NULL;
2577 if (wordcount != 4) {
2578 cli_usage_out (word->pattern);
2583 frame = create_frame (THIS, THIS->ctx->pool);
2587 options = dict_new ();
2591 ret = dict_set_str (options, "volname", (char *)words[2]);
2595 ret = dict_set_str (options, "key", (char *)words[3]);
2599 proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOL_OPT];
2601 CLI_LOCAL_INIT (local, words, frame, options);
2604 ret = proc->fn (frame, THIS, options);
2608 cli_cmd_sent_status_get (&sent);
2609 if ((sent == 0) && (parse_err == 0))
2610 cli_err ("Volume get option failed");
2612 CLI_STACK_DESTROY (frame);
2614 dict_unref (options);
2618 struct cli_cmd volume_cmds[] = {
2619 { "volume info [all|<VOLNAME>]",
2620 cli_cmd_volume_info_cbk,
2621 "list information of all volumes"},
2623 { "volume create <NEW-VOLNAME> [stripe <COUNT>] "
2624 "[replica <COUNT> [arbiter <COUNT>]] "
2625 "[disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] "
2626 "[transport <tcp|rdma|tcp,rdma>] <NEW-BRICK>"
2627 #ifdef HAVE_BD_XLATOR
2632 cli_cmd_volume_create_cbk,
2633 "create a new volume of specified type with mentioned bricks"},
2635 { "volume delete <VOLNAME>",
2636 cli_cmd_volume_delete_cbk,
2637 "delete volume specified by <VOLNAME>"},
2639 { "volume start <VOLNAME> [force]",
2640 cli_cmd_volume_start_cbk,
2641 "start volume specified by <VOLNAME>"},
2643 { "volume stop <VOLNAME> [force]",
2644 cli_cmd_volume_stop_cbk,
2645 "stop volume specified by <VOLNAME>"},
2647 /*{ "volume rename <VOLNAME> <NEW-VOLNAME>",
2648 cli_cmd_volume_rename_cbk,
2649 "rename volume <VOLNAME> to <NEW-VOLNAME>"},*/
2651 { "volume tier <VOLNAME> status\n"
2652 "volume tier <VOLNAME> attach [<replica COUNT>] <NEW-BRICK>...\n"
2653 "volume tier <VOLNAME> detach <start|stop|status|commit|[force]>\n",
2654 cli_cmd_volume_tier_cbk,
2655 "Tier translator specific operations."},
2657 { "volume attach-tier <VOLNAME> [<replica COUNT>] <NEW-BRICK>...",
2658 cli_cmd_volume_tier_cbk,
2659 "NOTE: this is old syntax, will be depreciated in next release. "
2660 "Please use gluster volume tier <vol> attach "
2661 "[<replica COUNT>] <NEW-BRICK>..."},
2663 { "volume detach-tier <VOLNAME> "
2664 " <start|stop|status|commit|force>",
2665 cli_cmd_volume_tier_cbk,
2666 "NOTE: this is old syntax, will be depreciated in next release. "
2667 "Please use gluster volume tier <vol> detach "
2668 "{start|stop|commit} [force]"},
2670 { "volume add-brick <VOLNAME> [<stripe|replica> <COUNT>] <NEW-BRICK> ... [force]",
2671 cli_cmd_volume_add_brick_cbk,
2672 "add brick to volume <VOLNAME>"},
2674 { "volume remove-brick <VOLNAME> [replica <COUNT>] <BRICK> ..."
2675 " <start|stop|status|commit|force>",
2676 cli_cmd_volume_remove_brick_cbk,
2677 "remove brick from volume <VOLNAME>"},
2679 { "volume rebalance <VOLNAME> {{fix-layout start} | {start [force]|stop|status}}",
2680 cli_cmd_volume_defrag_cbk,
2681 "rebalance operations"},
2683 { "volume replace-brick <VOLNAME> <SOURCE-BRICK> <NEW-BRICK> "
2685 cli_cmd_volume_replace_brick_cbk,
2686 "replace-brick operations"},
2688 /*{ "volume set-transport <VOLNAME> <TRANSPORT-TYPE> [<TRANSPORT-TYPE>] ...",
2689 cli_cmd_volume_set_transport_cbk,
2690 "set transport type for volume <VOLNAME>"},*/
2692 { "volume set <VOLNAME> <KEY> <VALUE>",
2693 cli_cmd_volume_set_cbk,
2694 "set options for volume <VOLNAME>"},
2697 cli_cmd_volume_help_cbk,
2698 "display help for the volume command"},
2700 { "volume log <VOLNAME> rotate [BRICK]",
2701 cli_cmd_log_rotate_cbk,
2702 "rotate the log file for corresponding volume/brick"},
2704 { "volume log rotate <VOLNAME> [BRICK]",
2705 cli_cmd_log_rotate_cbk,
2706 "rotate the log file for corresponding volume/brick"
2707 " NOTE: This is an old syntax, will be deprecated from next release."},
2709 { "volume sync <HOSTNAME> [all|<VOLNAME>]",
2710 cli_cmd_sync_volume_cbk,
2711 "sync the volume information from a peer"},
2713 { "volume reset <VOLNAME> [option] [force]",
2714 cli_cmd_volume_reset_cbk,
2715 "reset all the reconfigured options"},
2717 #if (SYNCDAEMON_COMPILE)
2718 {"volume "GEOREP" [<VOLNAME>] [<SLAVE-URL>] {create [[no-verify]|[push-pem]] [force]"
2719 "|start [force]|stop [force]|pause [force]|resume [force]|config|status [detail]|delete} [options...]",
2720 cli_cmd_volume_gsync_set_cbk,
2721 "Geo-sync operations",
2722 cli_cmd_check_gsync_exists_cbk},
2725 { "volume profile <VOLNAME> {start|info [peek|incremental [peek]|cumulative|clear]|stop} [nfs]",
2726 cli_cmd_volume_profile_cbk,
2727 "volume profile operations"},
2729 { "volume quota <VOLNAME> {enable|disable|list [<path> ...]| "
2730 "list-objects [<path> ...] | remove <path>| remove-objects <path> | "
2731 "default-soft-limit <percent>} |\n"
2732 "volume quota <VOLNAME> {limit-usage <path> <size> [<percent>]} |\n"
2733 "volume quota <VOLNAME> {limit-objects <path> <number> [<percent>]} |\n"
2734 "volume quota <VOLNAME> {alert-time|soft-timeout|hard-timeout} {<time>}",
2736 "quota translator specific operations"},
2738 { "volume inode-quota <VOLNAME> enable",
2740 "quota translator specific operations"},
2742 { "volume top <VOLNAME> {open|read|write|opendir|readdir|clear} [nfs|brick <brick>] [list-cnt <value>] |\n"
2743 "volume top <VOLNAME> {read-perf|write-perf} [bs <size> count <count>] [brick <brick>] [list-cnt <value>]",
2744 cli_cmd_volume_top_cbk,
2745 "volume top operations"},
2747 { "volume status [all | <VOLNAME> [nfs|shd|<BRICK>|quotad]]"
2748 " [detail|clients|mem|inode|fd|callpool|tasks]",
2749 cli_cmd_volume_status_cbk,
2750 "display status of all or specified volume(s)/brick"},
2752 { "volume heal <VOLNAME> [enable | disable | full |"
2753 "statistics [heal-count [replica <HOSTNAME:BRICKNAME>]] |"
2754 "info [healed | heal-failed | split-brain] |"
2755 "split-brain {bigger-file <FILE> |"
2756 "source-brick <HOSTNAME:BRICKNAME> [<FILE>]}]",
2757 cli_cmd_volume_heal_cbk,
2758 "self-heal commands on volume specified by <VOLNAME>"},
2760 {"volume statedump <VOLNAME> [nfs|quotad] [all|mem|iobuf|callpool|priv|fd|"
2761 "inode|history]...",
2762 cli_cmd_volume_statedump_cbk,
2763 "perform statedump on bricks"},
2766 cli_cmd_volume_list_cbk,
2767 "list all volumes in cluster"},
2769 {"volume clear-locks <VOLNAME> <path> kind {blocked|granted|all}"
2770 "{inode [range]|entry [basename]|posix [range]}",
2771 cli_cmd_volume_clearlocks_cbk,
2772 "Clear locks held on path"
2774 {"volume barrier <VOLNAME> {enable|disable}",
2775 cli_cmd_volume_barrier_cbk,
2776 "Barrier/unbarrier file operations on a volume"
2778 {"volume get <VOLNAME> <key|all>",
2779 cli_cmd_volume_getopt_cbk,
2780 "Get the value of the all options or given option for volume <VOLNAME>"
2782 {"volume bitrot <VOLNAME> {enable|disable} |\n"
2783 "volume bitrot <volname> scrub-throttle {lazy|normal|aggressive} |\n"
2784 "volume bitrot <volname> scrub-frequency {hourly|daily|weekly|biweekly"
2786 "volume bitrot <volname> scrub {pause|resume}",
2788 "Bitrot translator specific operation. For more information about "
2789 "bitrot command type 'man gluster'"
2791 { NULL, NULL, NULL }
2795 cli_cmd_volume_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word,
2796 const char **words, int wordcount)
2798 struct cli_cmd *cmd = NULL;
2800 for (cmd = volume_cmds; cmd->pattern; cmd++)
2801 if (_gf_false == cmd->disable)
2802 cli_out ("%s - %s", cmd->pattern, cmd->desc);
2808 cli_cmd_volume_register (struct cli_state *state)
2811 struct cli_cmd *cmd = NULL;
2813 for (cmd = volume_cmds; cmd->pattern; cmd++) {
2815 ret = cli_cmd_register (&state->tree, cmd);