summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-op-sm.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-op-sm.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c1314
1 files changed, 594 insertions, 720 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index af6194de4be..c537fc33a85 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -21,39 +21,33 @@
#include "protocol-common.h"
#include "glusterd.h"
#include <glusterfs/call-stub.h>
-#include <glusterfs/defaults.h>
#include <glusterfs/list.h>
#include <glusterfs/dict.h>
#include <glusterfs/compat.h>
#include <glusterfs/compat-errno.h>
#include <glusterfs/statedump.h>
-#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
#include "glusterd-store.h"
-#include "glusterd-hooks.h"
-#include "glusterd-volgen.h"
#include "glusterd-locks.h"
-#include "glusterd-messages.h"
-#include "glusterd-utils.h"
#include "glusterd-quota.h"
#include <glusterfs/syscall.h>
#include "cli1-xdr.h"
-#include <glusterfs/common-utils.h>
-#include <glusterfs/run.h>
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
+#include "glusterd-shd-svc-helper.h"
#include "glusterd-shd-svc.h"
-#include "glusterd-nfs-svc.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-server-quorum.h"
-#include "glusterd-volgen.h"
#include <sys/types.h>
#include <signal.h>
#include <sys/wait.h>
#include "glusterd-gfproxyd-svc-helper.h"
+#define len_strcmp(key, len, str) \
+ ((len == SLEN(str)) && (strcmp(key, str) == 0))
+
extern char local_node_hostname[PATH_MAX];
static int
glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
@@ -67,7 +61,7 @@ glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
* It's important that every value have a default, or have a special handler
* in glusterd_get_global_options_for_all_vols, or else we might crash there.
*/
-glusterd_all_vol_opts valid_all_vol_opts[] = {
+const glusterd_all_vol_opts valid_all_vol_opts[] = {
{GLUSTERD_QUORUM_RATIO_KEY, "51"},
{GLUSTERD_SHARED_STORAGE_KEY, "disable"},
/* This one actually gets filled in dynamically. */
@@ -86,6 +80,7 @@ glusterd_all_vol_opts valid_all_vol_opts[] = {
* TBD: Discuss the default value for this. Maybe this should be a
* dynamic value depending on the memory specifications per node */
{GLUSTERD_BRICKMUX_LIMIT_KEY, GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE},
+ {GLUSTERD_VOL_CNT_PER_THRD, GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE},
{GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},
{GLUSTERD_DAEMON_LOG_LEVEL_KEY, "INFO"},
{NULL},
@@ -97,14 +92,6 @@ glusterd_op_info_t opinfo = {
{0},
};
-int
-glusterd_bricks_select_rebalance_volume(dict_t *dict, char **op_errstr,
- struct cds_list_head *selected);
-
-int
-glusterd_bricks_select_tier_volume(dict_t *dict, char **op_errstr,
- struct cds_list_head *selected);
-
int32_t
glusterd_txn_opinfo_dict_init()
{
@@ -119,6 +106,7 @@ glusterd_txn_opinfo_dict_init()
priv->glusterd_txn_opinfo = dict_new();
if (!priv->glusterd_txn_opinfo) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -191,8 +179,10 @@ glusterd_generate_txn_id(dict_t *dict, uuid_t **txn_id)
GF_ASSERT(dict);
*txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
- if (!*txn_id)
+ if (!*txn_id) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
if (priv->op_version < GD_OP_VERSION_3_6_0)
gf_uuid_copy(**txn_id, priv->global_txn_id);
@@ -403,7 +393,7 @@ glusterd_op_sm_event_name_get(int event)
return glusterd_op_sm_event_names[event];
}
-void
+static void
glusterd_destroy_lock_ctx(glusterd_op_lock_ctx_t *ctx)
{
if (!ctx)
@@ -422,56 +412,49 @@ glusterd_set_volume_status(glusterd_volinfo_t *volinfo,
static int
glusterd_op_sm_inject_all_acc(uuid_t *txn_id)
{
- int32_t ret = -1;
+ int ret = -1;
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, txn_id, NULL);
gf_msg_debug("glusterd", 0, "Returning %d", ret);
return ret;
}
static int
-glusterd_check_bitrot_cmd(char *key, char *value, char *errstr, size_t size)
+glusterd_check_bitrot_cmd(char *key, const int keylen, char *errstr,
+ const size_t size)
{
int ret = -1;
- if ((!strncmp(key, "bitrot", SLEN("bitrot"))) ||
- (!strncmp(key, "features.bitrot", SLEN("features.bitrot")))) {
+ if (len_strcmp(key, keylen, "bitrot") ||
+ len_strcmp(key, keylen, "features.bitrot")) {
snprintf(errstr, size,
- " 'gluster volume set <VOLNAME> %s' "
- "is invalid command. Use 'gluster volume bitrot "
- "<VOLNAME> {enable|disable}' instead.",
+ " 'gluster volume set <VOLNAME> %s' is invalid command."
+ " Use 'gluster volume bitrot <VOLNAME> {enable|disable}'"
+ " instead.",
key);
- ret = -1;
goto out;
- } else if ((!strncmp(key, "scrub-freq", SLEN("scrub-freq"))) ||
- (!strncmp(key, "features.scrub-freq",
- SLEN("features.scrub-freq")))) {
+ } else if (len_strcmp(key, keylen, "scrub-freq") ||
+ len_strcmp(key, keylen, "features.scrub-freq")) {
snprintf(errstr, size,
- " 'gluster volume "
- "set <VOLNAME> %s' is invalid command. Use 'gluster "
- "volume bitrot <VOLNAME> scrub-frequency"
+ " 'gluster volume set <VOLNAME> %s' is invalid command."
+ " Use 'gluster volume bitrot <VOLNAME> scrub-frequency"
" {hourly|daily|weekly|biweekly|monthly}' instead.",
key);
- ret = -1;
goto out;
- } else if ((!strncmp(key, "scrub", SLEN("scrub"))) ||
- (!strncmp(key, "features.scrub", SLEN("features.scrub")))) {
+ } else if (len_strcmp(key, keylen, "scrub") ||
+ len_strcmp(key, keylen, "features.scrub")) {
snprintf(errstr, size,
- " 'gluster volume set <VOLNAME> %s' is "
- "invalid command. Use 'gluster volume bitrot "
- "<VOLNAME> scrub {pause|resume}' instead.",
+ " 'gluster volume set <VOLNAME> %s' is invalid command."
+ " Use 'gluster volume bitrot <VOLNAME> scrub {pause|resume}'"
+ " instead.",
key);
- ret = -1;
goto out;
- } else if ((!strncmp(key, "scrub-throttle", SLEN("scrub-throttle"))) ||
- (!strncmp(key, "features.scrub-throttle",
- SLEN("features.scrub-throttle")))) {
+ } else if (len_strcmp(key, keylen, "scrub-throttle") ||
+ len_strcmp(key, keylen, "features.scrub-throttle")) {
snprintf(errstr, size,
- " 'gluster volume set <VOLNAME> %s' is "
- "invalid command. Use 'gluster volume bitrot "
- "<VOLNAME> scrub-throttle {lazy|normal|aggressive}' "
- "instead.",
+ " 'gluster volume set <VOLNAME> %s' is invalid command."
+ " Use 'gluster volume bitrot <VOLNAME> scrub-throttle "
+ " {lazy|normal|aggressive}' instead.",
key);
- ret = -1;
goto out;
}
@@ -481,61 +464,52 @@ out:
}
static int
-glusterd_check_quota_cmd(char *key, char *value, char *errstr, size_t size)
+glusterd_check_quota_cmd(char *key, const int keylen, char *value, char *errstr,
+ size_t size)
{
int ret = -1;
gf_boolean_t b = _gf_false;
- if ((strcmp(key, "quota") == 0) || (strcmp(key, "features.quota") == 0)) {
+ if (len_strcmp(key, keylen, "quota") ||
+ len_strcmp(key, keylen, "features.quota")) {
ret = gf_string2boolean(value, &b);
if (ret)
goto out;
+ ret = -1;
if (b) {
snprintf(errstr, size,
- " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "quota <VOLNAME> enable' instead.",
+ " 'gluster volume set <VOLNAME> %s %s' is deprecated."
+ " Use 'gluster volume quota <VOLNAME> enable' instead.",
key, value);
- ret = -1;
- goto out;
} else {
snprintf(errstr, size,
- " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "quota <VOLNAME> disable' instead.",
+ " 'gluster volume set <VOLNAME> %s %s' is deprecated."
+ " Use 'gluster volume quota <VOLNAME> disable' instead.",
key, value);
- ret = -1;
- goto out;
}
- } else if ((strcmp(key, "inode-quota") == 0) ||
- (strcmp(key, "features.inode-quota") == 0)) {
+ goto out;
+ } else if (len_strcmp(key, keylen, "inode-quota") ||
+ len_strcmp(key, keylen, "features.inode-quota")) {
ret = gf_string2boolean(value, &b);
if (ret)
goto out;
+ ret = -1;
if (b) {
- snprintf(errstr, size,
- " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "inode-quota <VOLNAME> enable' instead.",
- key, value);
- ret = -1;
- goto out;
+ snprintf(
+ errstr, size,
+ " 'gluster volume set <VOLNAME> %s %s' is deprecated."
+ " Use 'gluster volume inode-quota <VOLNAME> enable' instead.",
+ key, value);
} else {
/* inode-quota disable not supported,
* use quota disable
*/
snprintf(errstr, size,
- " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "quota <VOLNAME> disable' instead.",
+ " 'gluster volume set <VOLNAME> %s %s' is deprecated."
+ " Use 'gluster volume quota <VOLNAME> disable' instead.",
key, value);
- ret = -1;
- goto out;
}
+ goto out;
}
ret = 0;
@@ -570,8 +544,11 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
case GD_OP_STOP_VOLUME:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_TERMINATE;
brick_req->name = brickinfo->path;
glusterd_set_brick_status(brickinfo, GF_BRICK_STOPPING);
@@ -580,8 +557,11 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_XLATOR_INFO;
brick_req->name = brickinfo->path;
@@ -590,51 +570,70 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
case GD_OP_HEAL_VOLUME: {
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_XLATOR_OP;
brick_req->name = "";
ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"),
(int32_t *)&heal_op);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=heal-op", NULL);
goto out;
+ }
ret = dict_set_int32n(dict, "xl-op", SLEN("xl-op"), heal_op);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=xl-op", NULL);
goto out;
+ }
} break;
case GD_OP_STATUS_VOLUME: {
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_STATUS;
brick_req->name = "";
ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"),
brickinfo->path);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=brick-name", NULL);
goto out;
+ }
} break;
case GD_OP_REBALANCE:
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_TIER_STATUS:
case GD_OP_DEFRAG_BRICK_VOLUME:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_XLATOR_DEFRAG;
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
goto out;
+ }
ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_VOLINFO_GET_FAIL, "Volume=%s", volname, NULL);
goto out;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- snprintf(name, sizeof(name), "%s-tier-dht", volname);
- else
- snprintf(name, sizeof(name), "%s-dht", volname);
+ }
+ snprintf(name, sizeof(name), "%s-dht", volname);
brick_req->name = gf_strdup(name);
break;
@@ -642,8 +641,11 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
case GD_OP_BARRIER:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_BRICK_BARRIER;
brick_req->name = brickinfo->path;
break;
@@ -653,10 +655,15 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
break;
}
+ brick_req->dict.dict_len = 0;
+ brick_req->dict.dict_val = NULL;
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
&brick_req->input.input_len);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
*req = brick_req;
ret = 0;
@@ -678,13 +685,19 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
GF_ASSERT(op < GD_OP_MAX);
GF_ASSERT(op > GD_OP_NONE);
GF_ASSERT(req);
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
switch (op) {
case GD_OP_PROFILE_VOLUME:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_NODE_PROFILE;
brick_req->name = "";
@@ -694,8 +707,11 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
case GD_OP_STATUS_VOLUME:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_NODE_STATUS;
brick_req->name = "";
@@ -706,14 +722,20 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
case GD_OP_SCRUB_ONDEMAND:
brick_req = GF_CALLOC(1, sizeof(*brick_req),
gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
+ if (!brick_req) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
+ NULL);
goto out;
+ }
brick_req->op = GLUSTERD_NODE_BITROT;
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
goto out;
+ }
brick_req->name = gf_strdup(volname);
break;
@@ -721,11 +743,16 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
goto out;
}
+ brick_req->dict.dict_len = 0;
+ brick_req->dict.dict_val = NULL;
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
&brick_req->input.input_len);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
+ }
*req = brick_req;
ret = 0;
@@ -733,7 +760,7 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
out:
if (ret && brick_req)
GF_FREE(brick_req);
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
@@ -749,12 +776,14 @@ glusterd_validate_quorum_options(xlator_t *this, char *fullkey, char *value,
goto out;
key = strchr(fullkey, '.');
if (key == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
ret = -1;
goto out;
}
key++;
opt = xlator_volume_option_get(this, key);
if (!opt) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, NULL);
ret = -1;
goto out;
}
@@ -775,16 +804,16 @@ glusterd_validate_brick_mx_options(xlator_t *this, char *fullkey, char *value,
}
static int
-glusterd_validate_shared_storage(char *key, char *value, char *errstr)
+glusterd_validate_shared_storage(char *value, char *errstr)
{
int32_t ret = -1;
- int32_t exists = -1;
int32_t count = -1;
char *op = NULL;
char hook_script[PATH_MAX] = "";
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
int32_t len = 0;
+ glusterd_volinfo_t *volinfo = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO("glusterd", this, out);
@@ -792,16 +821,9 @@ glusterd_validate_shared_storage(char *key, char *value, char *errstr)
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- GF_VALIDATE_OR_GOTO(this->name, key, out);
GF_VALIDATE_OR_GOTO(this->name, value, out);
GF_VALIDATE_OR_GOTO(this->name, errstr, out);
- ret = 0;
-
- if (strcmp(key, GLUSTERD_SHARED_STORAGE_KEY)) {
- goto out;
- }
-
if ((strcmp(value, "enable")) && (strcmp(value, "disable"))) {
snprintf(errstr, PATH_MAX,
"Invalid option(%s). Valid options "
@@ -852,8 +874,8 @@ glusterd_validate_shared_storage(char *key, char *value, char *errstr)
goto out;
}
- exists = glusterd_check_volume_exists(GLUSTER_SHARED_STORAGE);
- if (exists) {
+ ret = glusterd_volinfo_find(GLUSTER_SHARED_STORAGE, &volinfo);
+ if (!ret) {
snprintf(errstr, PATH_MAX,
"Shared storage volume(" GLUSTER_SHARED_STORAGE
") already exists.");
@@ -887,7 +909,7 @@ out:
}
static int
-glusterd_validate_localtime_logging(char *key, char *value, char *errstr)
+glusterd_validate_localtime_logging(char *value, char *errstr)
{
int32_t ret = -1;
xlator_t *this = NULL;
@@ -899,29 +921,11 @@ glusterd_validate_localtime_logging(char *key, char *value, char *errstr)
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
-
- GF_VALIDATE_OR_GOTO(this->name, key, out);
GF_VALIDATE_OR_GOTO(this->name, value, out);
- GF_VALIDATE_OR_GOTO(this->name, errstr, out);
-
- ret = 0;
-
- if (strcmp(key, GLUSTERD_LOCALTIME_LOGGING_KEY)) {
- goto out;
- }
-
- if ((strcmp(value, "enable")) && (strcmp(value, "disable"))) {
- snprintf(errstr, PATH_MAX,
- "Invalid option(%s). Valid options "
- "are 'enable' and 'disable'",
- value);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
- errstr);
- ret = -1;
- }
already_enabled = gf_log_get_localtime();
+ ret = 0;
if (strcmp(value, "enable") == 0) {
gf_log_set_localtime(1);
if (!already_enabled)
@@ -932,6 +936,15 @@ glusterd_validate_localtime_logging(char *key, char *value, char *errstr)
if (already_enabled)
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_LOCALTIME_LOGGING_DISABLE,
"localtime logging disable");
+ } else {
+ ret = -1;
+ GF_VALIDATE_OR_GOTO(this->name, errstr, out);
+ snprintf(errstr, PATH_MAX,
+ "Invalid option(%s). Valid options "
+ "are 'enable' and 'disable'",
+ value);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
+ errstr);
}
out:
@@ -939,7 +952,7 @@ out:
}
static int
-glusterd_validate_daemon_log_level(char *key, char *value, char *errstr)
+glusterd_validate_daemon_log_level(char *value, char *errstr)
{
int32_t ret = -1;
xlator_t *this = NULL;
@@ -951,19 +964,15 @@ glusterd_validate_daemon_log_level(char *key, char *value, char *errstr)
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- GF_VALIDATE_OR_GOTO(this->name, key, out);
GF_VALIDATE_OR_GOTO(this->name, value, out);
- GF_VALIDATE_OR_GOTO(this->name, errstr, out);
ret = 0;
- if (strcmp(key, GLUSTERD_DAEMON_LOG_LEVEL_KEY)) {
- goto out;
- }
-
if ((strcmp(value, "INFO")) && (strcmp(value, "WARNING")) &&
(strcmp(value, "DEBUG")) && (strcmp(value, "TRACE")) &&
(strcmp(value, "ERROR"))) {
+ ret = -1;
+ GF_VALIDATE_OR_GOTO(this->name, errstr, out);
snprintf(errstr, PATH_MAX,
"Invalid option(%s). Valid options "
"are 'INFO' or 'WARNING' or 'ERROR' or 'DEBUG' or "
@@ -971,7 +980,6 @@ glusterd_validate_daemon_log_level(char *key, char *value, char *errstr)
value);
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
errstr);
- ret = -1;
}
out:
@@ -991,6 +999,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
char keystr[100] = {
0,
};
+ int keystr_len;
int keylen;
char *trash_path = NULL;
int trash_path_len = 0;
@@ -1003,6 +1012,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
glusterd_brickinfo_t *brickinfo = NULL;
dict_t *val_dict = NULL;
gf_boolean_t global_opt = _gf_false;
+ gf_boolean_t key_matched = _gf_false; /* if a key was processed or not*/
glusterd_volinfo_t *voliter = NULL;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
@@ -1015,6 +1025,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
gf_boolean_t check_op_version = _gf_true;
gf_boolean_t trash_enabled = _gf_false;
gf_boolean_t all_vol = _gf_false;
+ struct volopt_map_entry *vmep = NULL;
GF_ASSERT(dict);
this = THIS;
@@ -1022,10 +1033,6 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
priv = this->private;
GF_ASSERT(priv);
- val_dict = dict_new();
- if (!val_dict)
- goto out;
-
/* Check if we can support the required op-version
* This check is not done on the originator glusterd. The originator
* glusterd sets this value.
@@ -1040,8 +1047,8 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (check_op_version) {
ret = dict_get_uint32(dict, "new-op-version", &new_op_version);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get new_op_version");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=new-op-version", NULL);
goto out;
}
@@ -1049,9 +1056,8 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
(new_op_version < GD_OP_VERSION_MIN)) {
ret = -1;
snprintf(errstr, sizeof(errstr),
- "Required op_version (%d) is not "
- "supported. Max supported op version "
- "is %d",
+ "Required op_version (%d) is not supported."
+ " Max supported op version is %d",
new_op_version, priv->op_version);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
"%s", errstr);
@@ -1060,7 +1066,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
}
}
- ret = dict_get_int32n(dict, "count", SLEN("count"), &dict_count);
+ ret = dict_get_int32_sizen(dict, "count", &dict_count);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"Count(dict),not set in Volume-Set");
@@ -1069,12 +1075,12 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (dict_count == 0) {
/*No options would be specified of volume set help */
- if (dict_getn(dict, "help", SLEN("help"))) {
+ if (dict_get_sizen(dict, "help")) {
ret = 0;
goto out;
}
- if (dict_getn(dict, "help-xml", SLEN("help-xml"))) {
+ if (dict_get_sizen(dict, "help-xml")) {
#if (HAVE_LIB_XML)
ret = 0;
goto out;
@@ -1083,8 +1089,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MODULE_NOT_INSTALLED,
"libxml not present in the system");
*op_errstr = gf_strdup(
- "Error: xml libraries not "
- "present to produce xml-output");
+ "Error: xml libraries not present to produce xml-output");
goto out;
#endif
}
@@ -1095,25 +1100,17 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
goto out;
}
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=volname", NULL);
goto out;
}
if (strcasecmp(volname, "all") != 0) {
- exists = glusterd_check_volume_exists(volname);
- if (!exists) {
- snprintf(errstr, sizeof(errstr), FMTSTR_CHECK_VOL_EXISTS, volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s",
- errstr);
- ret = -1;
- goto out;
- }
-
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
+ snprintf(errstr, sizeof(errstr), FMTSTR_CHECK_VOL_EXISTS, volname);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
FMTSTR_CHECK_VOL_EXISTS, volname);
goto out;
@@ -1130,15 +1127,23 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
all_vol = _gf_true;
}
+ val_dict = dict_new();
+ if (!val_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto out;
+ }
+
for (count = 1; ret != 1; count++) {
- global_opt = _gf_false;
- keylen = sprintf(keystr, "key%d", count);
- ret = dict_get_strn(dict, keystr, keylen, &key);
- if (ret)
+ keystr_len = sprintf(keystr, "key%d", count);
+ ret = dict_get_strn(dict, keystr, keystr_len, &key);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", keystr, NULL);
break;
+ }
- keylen = sprintf(keystr, "value%d", count);
- ret = dict_get_strn(dict, keystr, keylen, &value);
+ keystr_len = sprintf(keystr, "value%d", count);
+ ret = dict_get_strn(dict, keystr, keystr_len, &value);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
"invalid key,value pair in 'volume set'");
@@ -1146,13 +1151,15 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
goto out;
}
- if (strcmp(key, "config.memory-accounting") == 0) {
+ key_matched = _gf_false;
+ keylen = strlen(key);
+ if (len_strcmp(key, keylen, "config.memory-accounting")) {
+ key_matched = _gf_true;
gf_msg_debug(this->name, 0,
"enabling memory accounting for volume %s", volname);
ret = 0;
- }
-
- if (strcmp(key, "config.transport") == 0) {
+ } else if (len_strcmp(key, keylen, "config.transport")) {
+ key_matched = _gf_true;
gf_msg_debug(this->name, 0, "changing transport-type for volume %s",
volname);
ret = 0;
@@ -1162,23 +1169,31 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
(strcasecmp(value, "tcp,rdma") == 0) ||
(strcasecmp(value, "rdma,tcp") == 0))) {
ret = snprintf(errstr, sizeof(errstr),
- "transport-type %s does "
- "not exist",
- value);
+ "transport-type %s does not exist", value);
/* lets not bother about above return value,
its a failure anyways */
ret = -1;
goto out;
}
+ } else if (len_strcmp(key, keylen, "ganesha.enable")) {
+ key_matched = _gf_true;
+ if (!strcmp(value, "off") == 0) {
+ ret = ganesha_manage_export(dict, "off", _gf_true, op_errstr);
+ if (ret)
+ goto out;
+ }
}
- ret = glusterd_check_bitrot_cmd(key, value, errstr, sizeof(errstr));
- if (ret)
- goto out;
-
- ret = glusterd_check_quota_cmd(key, value, errstr, sizeof(errstr));
- if (ret)
- goto out;
+ if (!key_matched) {
+ ret = glusterd_check_bitrot_cmd(key, keylen, errstr,
+ sizeof(errstr));
+ if (ret)
+ goto out;
+ ret = glusterd_check_quota_cmd(key, keylen, value, errstr,
+ sizeof(errstr));
+ if (ret)
+ goto out;
+ }
if (is_key_glusterd_hooks_friendly(key))
continue;
@@ -1205,42 +1220,36 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
goto out;
}
- if (key_fixed)
+ if (key_fixed) {
key = key_fixed;
+ keylen = strlen(key_fixed);
+ }
- if (strcmp(key, "cluster.granular-entry-heal") == 0) {
+ if (len_strcmp(key, keylen, "cluster.granular-entry-heal")) {
/* For granular entry-heal, if the set command was
* invoked through volume-set CLI, then allow the
* command only if the volume is still in 'Created'
* state
*/
- if ((dict_getn(dict, "is-special-key", SLEN("is-special-key")) ==
- NULL) &&
- volinfo && (volinfo->status != GLUSTERD_STATUS_NONE)) {
+ if (volinfo && volinfo->status != GLUSTERD_STATUS_NONE &&
+ (dict_get_sizen(dict, "is-special-key") == NULL)) {
snprintf(errstr, sizeof(errstr),
- " 'gluster "
- "volume set <VOLNAME> %s {enable, "
- "disable}' is not supported. Use "
- "'gluster volume heal <VOLNAME> "
- "granular-entry-heal {enable, "
- "disable}' instead.",
+ " 'gluster volume set <VOLNAME> %s {enable, disable}'"
+ " is not supported."
+ " Use 'gluster volume heal <VOLNAME> "
+ "granular-entry-heal {enable, disable}' instead.",
key);
ret = -1;
goto out;
}
- }
-
- /* Check if the key is cluster.op-version and set
- * local_new_op_version to the value given if possible.
- */
- if (strcmp(key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
+ } else if (len_strcmp(key, keylen, GLUSTERD_GLOBAL_OP_VERSION_KEY)) {
+ /* Check if the key is cluster.op-version and set
+ * local_new_op_version to the value given if possible.
+ */
if (!all_vol) {
ret = -1;
snprintf(errstr, sizeof(errstr),
- "Option \""
- "%s\" is not valid for a single "
- "volume",
- key);
+ "Option \"%s\" is not valid for a single volume", key);
goto out;
}
/* Check if cluster.op-version is the only option being
@@ -1249,9 +1258,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (count != 1) {
ret = -1;
snprintf(errstr, sizeof(errstr),
- "Option \""
- "%s\" cannot be set along with other "
- "options",
+ "Option \"%s\" cannot be set along with other options",
key);
goto out;
}
@@ -1261,10 +1268,8 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
ret = gf_string2uint(value, &local_key_op_version);
if (ret) {
snprintf(errstr, sizeof(errstr),
- "invalid "
- "number format \"%s\" in option "
- "\"%s\"",
- value, key);
+ "invalid number format \"%s\" in option \"%s\"", value,
+ key);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s",
errstr);
goto out;
@@ -1274,9 +1279,8 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
local_key_op_version < GD_OP_VERSION_MIN) {
ret = -1;
snprintf(errstr, sizeof(errstr),
- "Required op_version (%d) is not "
- "supported. Max supported op version "
- "is %d",
+ "Required op_version (%d) is not supported."
+ " Max supported op version is %d",
local_key_op_version, priv->op_version);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
"%s", errstr);
@@ -1308,10 +1312,11 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (ret)
goto out;
- local_key_op_version = glusterd_get_op_version_for_key(key);
+ vmep = gd_get_vmep(key);
+ local_key_op_version = glusterd_get_op_version_from_vmep(vmep);
if (local_key_op_version > local_new_op_version)
local_new_op_version = local_key_op_version;
- if (gd_is_client_option(key) &&
+ if (gd_is_client_option(vmep) &&
(local_key_op_version > local_new_client_op_version))
local_new_client_op_version = local_key_op_version;
@@ -1327,8 +1332,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
ret = dict_get_uint32(dict, keystr, &key_op_version);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Failed to get key-op-version from"
- " dict");
+ "Failed to get key-op-version from dict");
goto out;
}
if (local_key_op_version != key_op_version) {
@@ -1337,60 +1341,63 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
"option: %s op-version mismatch", key);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH,
"%s, required op-version = %" PRIu32
- ", "
- "available op-version = %" PRIu32,
+ ", available op-version = %" PRIu32,
errstr, key_op_version, local_key_op_version);
goto out;
}
}
- if (glusterd_check_globaloption(key))
- global_opt = _gf_true;
-
- ret = glusterd_validate_shared_storage(key, value, errstr);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_SHARED_STRG_VOL_OPT_VALIDATE_FAIL,
- "Failed to validate shared "
- "storage volume options");
- goto out;
- }
-
- ret = glusterd_validate_localtime_logging(key, value, errstr);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_LOCALTIME_LOGGING_VOL_OPT_VALIDATE_FAIL,
- "Failed to validate localtime "
- "logging volume options");
- goto out;
- }
-
- ret = glusterd_validate_daemon_log_level(key, value, errstr);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL,
- "Failed to validate daemon-log-level volume "
- "options");
- goto out;
- }
+ global_opt = glusterd_check_globaloption(key);
- if (volinfo) {
- ret = glusterd_volinfo_get(volinfo, VKEY_FEATURES_TRASH, &val_dup);
- if (val_dup) {
- ret = gf_string2boolean(val_dup, &trash_enabled);
- if (ret)
- goto out;
+ if (len_strcmp(key, keylen, GLUSTERD_SHARED_STORAGE_KEY)) {
+ ret = glusterd_validate_shared_storage(value, errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SHARED_STRG_VOL_OPT_VALIDATE_FAIL,
+ "Failed to validate shared storage volume options");
+ goto out;
+ }
+ } else if (len_strcmp(key, keylen, GLUSTERD_LOCALTIME_LOGGING_KEY)) {
+ ret = glusterd_validate_localtime_logging(value, errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_LOCALTIME_LOGGING_VOL_OPT_VALIDATE_FAIL,
+ "Failed to validate localtime logging volume options");
+ goto out;
+ }
+ } else if (len_strcmp(key, keylen, GLUSTERD_DAEMON_LOG_LEVEL_KEY)) {
+ ret = glusterd_validate_daemon_log_level(value, errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL,
+ "Failed to validate daemon-log-level volume options");
+ goto out;
+ }
+ } else if (len_strcmp(key, keylen, "features.trash-dir")) {
+ if (volinfo) {
+ ret = glusterd_volinfo_get(volinfo, VKEY_FEATURES_TRASH,
+ &val_dup);
+ if (!ret && val_dup) {
+ ret = gf_string2boolean(val_dup, &trash_enabled);
+ if (ret)
+ goto out;
+ }
+ }
+ if (!trash_enabled) {
+ snprintf(errstr, sizeof(errstr),
+ "Trash translator is not enabled. "
+ "Use volume set %s trash on",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
+ "Unable to set the options in 'volume set': %s", errstr);
+ ret = -1;
+ goto out;
}
- }
-
- if (!strcmp(key, "features.trash-dir") && trash_enabled) {
if (strchr(value, '/')) {
snprintf(errstr, sizeof(errstr),
"Path is not allowed as option");
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
- "Unable to set the options in 'volume "
- "set': %s",
- errstr);
+ "Unable to set the options in 'volume set': %s", errstr);
ret = -1;
goto out;
}
@@ -1411,16 +1418,13 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
snprintf(errstr, sizeof(errstr), "Path %s exists",
value);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
- "Unable to set the "
- "options in "
- "'volume set': %s",
+ "Unable to set the options in 'volume set': %s",
errstr);
ret = -1;
goto out;
} else {
gf_msg_debug(this->name, 0,
- "Directory with given "
- "name does not exists,"
+ "Directory with given name does not exist,"
" continuing");
}
@@ -1431,9 +1435,7 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
snprintf(errstr, sizeof(errstr),
"One or more bricks are down");
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
- "Unable to set the "
- "options in "
- "'volume set': %s",
+ "Unable to set the options in 'volume set': %s",
errstr);
ret = -1;
goto out;
@@ -1442,22 +1444,11 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (trash_path) {
GF_FREE(trash_path);
trash_path = NULL;
- trash_path_len = 0;
}
}
- } else if (!strcmp(key, "features.trash-dir") && !trash_enabled) {
- snprintf(errstr, sizeof(errstr),
- "Trash translator is not enabled. Use "
- "volume set %s trash on",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
- "Unable to set the options in 'volume "
- "set': %s",
- errstr);
- ret = -1;
- goto out;
}
- ret = dict_set_str(val_dict, key, value);
+
+ ret = dict_set_strn(val_dict, key, keylen, value);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
@@ -1482,12 +1473,11 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
- "Could not create "
- "temp volfile, some option failed: %s",
+ "Could not create temp volfile, some option failed: %s",
*op_errstr);
goto out;
}
- dict_del(val_dict, key);
+ dict_deln(val_dict, key, keylen);
if (key_fixed) {
GF_FREE(key_fixed);
@@ -1501,7 +1491,6 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
volname, local_new_client_op_version, op_errstr);
if (ret)
goto out;
-
cont:
if (origin_glusterd) {
ret = dict_set_uint32(dict, "new-op-version", local_new_op_version);
@@ -1516,8 +1505,7 @@ cont:
* TODO: Remove this and the other places this is referred once
* 3.3.x compatibility is not required
*/
- ret = dict_set_int32n(dict, "check-op-version",
- SLEN("check-op-version"), 1);
+ ret = dict_set_int32_sizen(dict, "check-op-version", 1);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Failed to set check-op-version in dict");
@@ -1549,81 +1537,6 @@ out:
}
return ret;
}
-static int
-glusterd_water_limit_check(glusterd_volinfo_t *volinfo, gf_boolean_t is_hi,
- char **op_errstr)
-{
- int ret = -1;
- char *default_value = NULL;
- char *temp = NULL;
- uint64_t wm = 0;
- uint64_t default_wm = 0;
- struct volopt_map_entry *vmap = NULL;
- xlator_t *this = NULL;
- extern struct volopt_map_entry glusterd_volopt_map[];
- char msg[2048] = {0};
-
- this = THIS;
- GF_ASSERT(this);
-
- if (is_hi)
- ret = glusterd_volinfo_get(volinfo, "cluster.watermark-low", &temp);
- else
- ret = glusterd_volinfo_get(volinfo, "cluster.watermark-hi", &temp);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
- "failed to get watermark");
- goto out;
- }
-
- gf_string2bytesize_uint64(temp, &wm);
-
- if (is_hi)
- for (vmap = glusterd_volopt_map; vmap->key; vmap++) {
- if (strcmp(vmap->key, "cluster.watermark-hi") == 0)
- default_value = vmap->value;
- }
- else
- for (vmap = glusterd_volopt_map; vmap->key; vmap++) {
- if (strcmp(vmap->key, "cluster.watermark-low") == 0)
- default_value = vmap->value;
- }
-
- gf_string2bytesize_uint64(default_value, &default_wm);
-
- if (is_hi) {
- if (default_wm <= wm) {
- snprintf(msg, sizeof(msg),
- "Resetting hi-watermark "
- "to default will make it lower or equal to "
- "the low-watermark, which is an invalid "
- "configuration state. Please lower the "
- "low-watermark first to the desired value "
- "and then reset the hi-watermark.");
- ret = -1;
- goto out;
- }
- } else {
- if (default_wm >= wm) {
- snprintf(msg, sizeof(msg),
- "Resetting low-watermark "
- "to default will make it higher or equal to "
- "the hi-watermark, which is an invalid "
- "configuration state. Please raise the "
- "hi-watermark first to the desired value "
- "and then reset the low-watermark.");
- ret = -1;
- goto out;
- }
- }
-out:
- if (msg[0] != '\0') {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TIER_WATERMARK_RESET_FAIL,
- "%s", msg);
- *op_errstr = gf_strdup(msg);
- }
- return ret;
-}
static int
glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr)
@@ -1653,12 +1566,6 @@ glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr)
}
if (strcasecmp(volname, "all") != 0) {
- exists = glusterd_check_volume_exists(volname);
- if (!exists) {
- snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
- ret = -1;
- goto out;
- }
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
@@ -1677,18 +1584,26 @@ glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr)
goto out;
}
+ /* *
+ * If key ganesha.enable is set, then volume should be unexported from
+ * ganesha server. Also it is a volume-level option, perform only when
+ * volume name not equal to "all"(in other words if volinfo != NULL)
+ */
+ if (volinfo && (!strcmp(key, "all") || !strcmp(key, "ganesha.enable"))) {
+ if (glusterd_check_ganesha_export(volinfo)) {
+ ret = ganesha_manage_export(dict, "off", _gf_true, op_errstr);
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL,
+ "Could not reset ganesha.enable key");
+ }
+ }
+
if (strcmp(key, "all")) {
exists = glusterd_check_option_exists(key, &key_fixed);
if (exists == -1) {
ret = -1;
goto out;
- } else if (strcmp(key, "cluster.watermark-low") == 0) {
- ret = glusterd_water_limit_check(volinfo, _gf_false, op_errstr);
- } else if (strcmp(key, "cluster.watermark-hi") == 0) {
- ret = glusterd_water_limit_check(volinfo, _gf_true, op_errstr);
}
- if (ret)
- goto out;
if (!exists) {
ret = snprintf(msg, sizeof(msg), "Option %s does not exist", key);
@@ -1747,18 +1662,22 @@ glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr)
int ret = -1;
char *volname = NULL;
char *hostname = NULL;
- gf_boolean_t exists = _gf_false;
glusterd_peerinfo_t *peerinfo = NULL;
char msg[2048] = {
0,
};
glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname);
if (ret) {
snprintf(msg, sizeof(msg),
"hostname couldn't be "
"retrieved from msg");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=hostname", NULL);
*op_errstr = gf_strdup(msg);
goto out;
}
@@ -1767,39 +1686,42 @@ glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr)
// volname is not present in case of sync all
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (!ret) {
- exists = glusterd_check_volume_exists(volname);
- if (!exists) {
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
snprintf(msg, sizeof(msg),
"Volume %s "
"does not exist",
volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_NOT_FOUND,
+ "Volume=%s", volname, NULL);
*op_errstr = gf_strdup(msg);
- ret = -1;
goto out;
}
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret)
- goto out;
-
- } else {
- ret = 0;
}
} else {
RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, hostname);
if (peerinfo == NULL) {
+ RCU_READ_UNLOCK;
ret = -1;
snprintf(msg, sizeof(msg), "%s, is not a friend", hostname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND,
+ "Peer_name=%s", hostname, NULL);
*op_errstr = gf_strdup(msg);
+ goto out;
} else if (!peerinfo->connected) {
+ RCU_READ_UNLOCK;
+ ret = -1;
snprintf(msg, sizeof(msg),
"%s, is not connected at "
"the moment",
hostname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_DISCONNECTED,
+ "Peer_name=%s", hostname, NULL);
*op_errstr = gf_strdup(msg);
- ret = -1;
+ goto out;
}
RCU_READ_UNLOCK;
@@ -1826,7 +1748,9 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_volinfo_t *volinfo = NULL;
dict_t *vol_opts = NULL;
+#ifdef BUILD_GNFS
gf_boolean_t nfs_disabled = _gf_false;
+#endif
gf_boolean_t shd_enabled = _gf_false;
GF_ASSERT(dict);
@@ -1836,8 +1760,11 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
GF_ASSERT(priv);
ret = dict_get_uint32(dict, "cmd", &cmd);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=cmd", NULL);
goto out;
+ }
if (cmd & GF_CLI_STATUS_ALL)
goto out;
@@ -1848,17 +1775,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"The cluster is operating at "
"version 1. Getting the status of quotad is not "
"allowed in this state.");
- ret = -1;
- goto out;
- }
-
- if ((cmd & GF_CLI_STATUS_TIERD) &&
- (priv->op_version < GD_OP_VERSION_3_10_0)) {
- snprintf(msg, sizeof(msg),
- "The cluster is operating at "
- "version less than %d. Getting the "
- "status of tierd is not allowed in this state.",
- GD_OP_VERSION_3_10_0);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_QUOTA_GET_STAT_FAIL,
+ msg, NULL);
ret = -1;
goto out;
}
@@ -1870,6 +1788,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"version less than %d. Getting the "
"status of snapd is not allowed in this state.",
GD_OP_VERSION_3_6_0);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SNAP_STATUS_FAIL, msg,
+ NULL);
ret = -1;
goto out;
}
@@ -1884,47 +1804,61 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL,
+ "Volume=%s", volname, NULL);
ret = -1;
goto out;
}
ret = glusterd_validate_volume_id(dict, volinfo);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VALIDATE_FAILED, NULL);
goto out;
+ }
ret = glusterd_is_volume_started(volinfo);
if (!ret) {
snprintf(msg, sizeof(msg), "Volume %s is not started", volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_NOT_STARTED,
+ "Volume=%s", volname, NULL);
ret = -1;
goto out;
}
vol_opts = volinfo->dict;
- if ((cmd & GF_CLI_STATUS_NFS) != 0) {
- nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
- _gf_false);
- if (nfs_disabled) {
- ret = -1;
- snprintf(msg, sizeof(msg), "NFS server is disabled for volume %s",
- volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ if ((cmd & GF_CLI_STATUS_SHD) != 0) {
if (glusterd_is_shd_compatible_volume(volinfo)) {
shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
} else {
ret = -1;
snprintf(msg, sizeof(msg), "Volume %s is not Self-heal compatible",
volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_SHD_NOT_COMP,
+ "Volume=%s", volname, NULL);
goto out;
}
if (!shd_enabled) {
ret = -1;
snprintf(msg, sizeof(msg),
"Self-heal Daemon is disabled for volume %s", volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SELF_HEALD_DISABLED,
+ "Volume=%s", volname, NULL);
goto out;
}
+#ifdef BUILD_GNFS
+ } else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
+ nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
+ _gf_false);
+ if (nfs_disabled) {
+ ret = -1;
+ snprintf(msg, sizeof(msg), "NFS server is disabled for volume %s",
+ volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_NFS_GANESHA_DISABLED, "Volume=%s", volname, NULL);
+ goto out;
+ }
+#endif
} else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
if (!glusterd_is_volume_quota_enabled(volinfo)) {
ret = -1;
@@ -1932,6 +1866,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"Volume %s does not have "
"quota enabled",
volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_QUOTA_DISABLED,
+ "Volume=%s", volname, NULL);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
@@ -1941,15 +1877,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"Volume %s does not have "
"bitrot enabled",
volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
- if (!glusterd_is_tierd_enabled(volinfo)) {
- ret = -1;
- snprintf(msg, sizeof(msg),
- "Volume %s does not have "
- "tierd enabled.",
- volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BITROT_NOT_ENABLED,
+ "Volume=%s", volname, NULL);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
@@ -1960,6 +1889,10 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"bitrot enabled. Scrubber will be enabled "
"automatically if bitrot is enabled",
volname);
+ gf_smsg(
+ this->name, GF_LOG_ERROR, errno, GD_MSG_BITROT_NOT_ENABLED,
+ "Scrubber will be enabled automatically if bitrot is enabled",
+ "Volume=%s", volname, NULL);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
@@ -1969,12 +1902,17 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"Volume %s does not have "
"uss enabled",
volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SNAPD_NOT_RUNNING,
+ "Volume=%s", volname, NULL);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Key=brick", NULL);
goto out;
+ }
ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
_gf_false);
@@ -1983,6 +1921,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
"No brick %s in"
" volume %s",
brick, volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_NOT_FOUND,
+ "Brick=%s, Volume=%s", brick, volname, NULL);
ret = -1;
goto out;
}
@@ -2007,7 +1947,6 @@ glusterd_op_stage_stats_volume(dict_t *dict, char **op_errstr)
{
int ret = -1;
char *volname = NULL;
- gf_boolean_t exists = _gf_false;
char msg[2048] = {
0,
};
@@ -2020,14 +1959,12 @@ glusterd_op_stage_stats_volume(dict_t *dict, char **op_errstr)
goto out;
}
- exists = glusterd_check_volume_exists(volname);
ret = glusterd_volinfo_find(volname, &volinfo);
- if ((!exists) || (ret < 0)) {
+ if (ret) {
snprintf(msg, sizeof(msg),
"Volume %s, "
"doesn't exist",
volname);
- ret = -1;
goto out;
}
@@ -2050,8 +1987,8 @@ glusterd_op_stage_stats_volume(dict_t *dict, char **op_errstr)
ret = -1;
goto out;
}
- }
- if ((GF_CLI_STATS_STOP == stats_op) || (GF_CLI_STATS_INFO == stats_op)) {
+ } else if ((GF_CLI_STATS_STOP == stats_op) ||
+ (GF_CLI_STATS_INFO == stats_op)) {
if (_gf_false == glusterd_is_profile_on(volinfo)) {
snprintf(msg, sizeof(msg),
"Profile on Volume %s is"
@@ -2191,17 +2128,16 @@ glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
if (ret)
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure(volinfo);
- if (ret)
- goto out;
- }
svc = &(volinfo->gfproxyd.svc);
ret = svc->reconfigure(volinfo);
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -2216,7 +2152,7 @@ glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret)
goto out;
}
@@ -2273,8 +2209,10 @@ glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict)
ret = -1;
dup_opt = dict_new();
- if (!dup_opt)
+ if (!dup_opt) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
if (!all) {
dict_copy(conf->opts, dup_opt);
dict_del(dup_opt, key);
@@ -2285,8 +2223,11 @@ glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict)
ret = dict_set_strn(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
goto out;
+ }
ret = glusterd_store_options(this, dup_opt);
if (ret)
@@ -2297,9 +2238,11 @@ glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict)
ret = dict_set_dynstrn(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
goto out;
- else
+ } else
next_version = NULL;
if (!all) {
@@ -2393,6 +2336,16 @@ glusterd_op_reset_volume(dict_t *dict, char **op_rspstr)
}
}
+ if (!strcmp(key, "ganesha.enable") || !strcmp(key, "all")) {
+ if (glusterd_check_ganesha_export(volinfo) &&
+ is_origin_glusterd(dict)) {
+ ret = manage_export_config(volname, "off", op_rspstr);
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL,
+ "Could not reset ganesha.enable key");
+ }
+ }
+
out:
GF_FREE(key_fixed);
if (quorum_action)
@@ -2435,6 +2388,7 @@ glusterd_start_bricks(glusterd_volinfo_t *volinfo)
if (!brickinfo->start_triggered) {
pthread_mutex_lock(&brickinfo->restart_mutex);
{
+ /* coverity[SLEEP] */
ret = glusterd_brick_start(volinfo, brickinfo, _gf_false,
_gf_false);
}
@@ -2572,8 +2526,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
conf = this->private;
ret = dict_get_strn(dict, "key1", SLEN("key1"), &key);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=key1", NULL);
goto out;
+ }
ret = dict_get_strn(dict, "value1", SLEN("value1"), &value);
if (ret) {
@@ -2648,18 +2605,16 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure(volinfo);
- if (ret)
- goto out;
- }
-
svc = &(volinfo->gfproxyd.svc);
ret = svc->reconfigure(volinfo);
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0,
@@ -2673,7 +2628,7 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
}
}
if (svcs_reconfigure) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(NULL);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
"Unable to restart "
@@ -2694,12 +2649,17 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
}
ret = -1;
dup_opt = dict_new();
- if (!dup_opt)
+ if (!dup_opt) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
dict_copy(conf->opts, dup_opt);
ret = dict_set_str(dup_opt, key, value);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
ret = glusterd_get_next_global_opt_version_str(conf->opts, &next_version);
if (ret)
@@ -2707,8 +2667,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
ret = dict_set_strn(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
goto out;
+ }
ret = glusterd_store_options(this, dup_opt);
if (ret)
@@ -2719,9 +2682,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
ret = dict_set_dynstrn(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
goto out;
- else
+ } else
next_version = NULL;
dup_value = gf_strdup(value);
@@ -2729,9 +2694,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
goto out;
ret = dict_set_dynstr(conf->opts, key, dup_value);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
- else
+ } else
dup_value = NULL; /* Protect the allocation from GF_FREE */
out:
@@ -2810,7 +2777,7 @@ glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
goto out;
}
- ret = mkdir_p(GLUSTER_SHARED_STORAGE_BRICK_DIR, 0777, _gf_true);
+ ret = mkdir_p(GLUSTER_SHARED_STORAGE_BRICK_DIR, 0755, _gf_true);
if (-1 == ret) {
snprintf(errstr, PATH_MAX,
"Failed to create shared "
@@ -2944,6 +2911,11 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
if (strcmp(key, "config.memory-accounting") == 0) {
ret = gf_string2boolean(value, &volinfo->memory_accounting);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid value in key-value pair.");
+ goto out;
+ }
}
if (strcmp(key, "config.transport") == 0) {
@@ -2964,6 +2936,10 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
}
}
+ ret = glusterd_check_ganesha_cmd(key, value, errstr, dict);
+ if (ret == -1)
+ goto out;
+
if (!is_key_glusterd_hooks_friendly(key)) {
ret = glusterd_check_option_exists(key, &key_fixed);
GF_ASSERT(ret);
@@ -3043,17 +3019,16 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
if (ret)
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure(volinfo);
- if (ret)
- goto out;
- }
svc = &(volinfo->gfproxyd.svc);
ret = svc->reconfigure(volinfo);
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -3069,7 +3044,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
"Unable to restart services");
@@ -3090,18 +3065,16 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure(volinfo);
- if (ret)
- goto out;
- }
-
svc = &(volinfo->gfproxyd.svc);
ret = svc->reconfigure(volinfo);
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -3117,7 +3090,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
"Unable to restart services");
@@ -3160,6 +3133,8 @@ glusterd_op_sync_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
snprintf(msg, sizeof(msg),
"hostname couldn't be "
"retrieved from msg");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=hostname", NULL);
*op_errstr = gf_strdup(msg);
goto out;
}
@@ -3184,6 +3159,7 @@ glusterd_op_sync_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (!rsp_dict) {
// this should happen only on source
+ gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_INVALID_ARGUMENT, NULL);
ret = 0;
goto out;
}
@@ -3324,7 +3300,7 @@ glusterd_op_stats_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret)
goto out;
}
@@ -3344,10 +3320,11 @@ _add_remove_bricks_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo,
int ret = -1;
int count = 0;
int i = 0;
- char brick_key[1024] = {
+ char brick_key[16] = {
0,
};
- char dict_key[1024] = {
+ char dict_key[64] = {
+ /* dict_key is small as prefix is up to 32 chars */
0,
};
int keylen;
@@ -3412,7 +3389,7 @@ static int
_add_task_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
{
int ret = -1;
- char key[64] = {
+ char key[32] = {
0,
};
int keylen;
@@ -3427,7 +3404,6 @@ _add_task_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
GF_ASSERT(this);
switch (op) {
- case GD_OP_REMOVE_TIER_BRICK:
case GD_OP_REMOVE_BRICK:
snprintf(key, sizeof(key), "task%d", index);
ret = _add_remove_bricks_to_dict(dict, volinfo, key);
@@ -3437,7 +3413,6 @@ _add_task_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
"Failed to add remove bricks to dict");
goto out;
}
- case GD_OP_TIER_MIGRATE:
case GD_OP_REBALANCE:
uuid_str = gf_strdup(uuid_utoa(volinfo->rebal.rebalance_id));
status = volinfo->rebal.defrag_status;
@@ -3492,25 +3467,12 @@ glusterd_aggregate_task_status(dict_t *rsp_dict, glusterd_volinfo_t *volinfo)
int ret = -1;
int tasks = 0;
xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
this = THIS;
GF_ASSERT(this);
- conf = this->private;
if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- if (conf->op_version > GD_OP_VERSION_3_10_0)
- goto done;
- if (volinfo->rebal.op == GD_OP_REMOVE_BRICK)
- ret = _add_task_to_dict(rsp_dict, volinfo,
- GD_OP_REMOVE_TIER_BRICK, tasks);
- else if (volinfo->rebal.op == GD_OP_REBALANCE)
- ret = _add_task_to_dict(rsp_dict, volinfo, GD_OP_TIER_MIGRATE,
- tasks);
- } else
- ret = _add_task_to_dict(rsp_dict, volinfo, volinfo->rebal.op,
- tasks);
+ ret = _add_task_to_dict(rsp_dict, volinfo, volinfo->rebal.op, tasks);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
@@ -3519,15 +3481,12 @@ glusterd_aggregate_task_status(dict_t *rsp_dict, glusterd_volinfo_t *volinfo)
}
tasks++;
}
-done:
ret = dict_set_int32n(rsp_dict, "tasks", SLEN("tasks"), tasks);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Error setting tasks count in dict");
goto out;
}
- ret = 0;
-
out:
return ret;
}
@@ -3539,7 +3498,6 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
int node_count = 0;
int brick_index = -1;
int other_count = 0;
- int hot_brick_count = -1;
int other_index = 0;
uint32_t cmd = 0;
char *volname = NULL;
@@ -3549,9 +3507,12 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_conf_t *priv = NULL;
dict_t *vol_opts = NULL;
+#ifdef BUILD_GNFS
gf_boolean_t nfs_disabled = _gf_false;
+#endif
gf_boolean_t shd_enabled = _gf_false;
gf_boolean_t origin_glusterd = _gf_false;
+ int snapd_enabled, bitrot_enabled, volume_quota_enabled;
this = THIS;
GF_ASSERT(this);
@@ -3599,29 +3560,22 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
}
vol_opts = volinfo->dict;
- if ((cmd & GF_CLI_STATUS_NFS) != 0) {
- ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict, 0,
- vol_opts);
- if (ret)
- goto out;
- other_count++;
- node_count++;
-
- } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict, 0,
+ if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, 0,
vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
-
- } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
- ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, 0,
+#ifdef BUILD_GNFS
+ } else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
+ ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict, 0,
vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
+#endif
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
ret = glusterd_add_node_to_dict(priv->bitd_svc.name, rsp_dict, 0,
vol_opts);
@@ -3636,14 +3590,14 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
other_count++;
node_count++;
- } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
- ret = glusterd_add_tierd_to_dict(volinfo, rsp_dict, other_index);
+ } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
+ ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict, other_index);
if (ret)
goto out;
other_count++;
node_count++;
- } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
- ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict, other_index);
+ } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ ret = glusterd_add_shd_to_dict(volinfo, rsp_dict, other_index);
if (ret)
goto out;
other_count++;
@@ -3672,6 +3626,15 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
} else {
+ snapd_enabled = glusterd_is_snapd_enabled(volinfo);
+ shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
+#ifdef BUILD_GNFS
+ nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
+ _gf_false);
+#endif
+ volume_quota_enabled = glusterd_is_volume_quota_enabled(volinfo);
+ bitrot_enabled = glusterd_is_bitrot_enabled(volinfo);
+
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
brick_index++;
@@ -3690,7 +3653,7 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
other_index = brick_index + 1;
- if (glusterd_is_snapd_enabled(volinfo)) {
+ if (snapd_enabled) {
ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict,
other_index);
if (ret)
@@ -3700,18 +3663,18 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
node_count++;
}
- if (glusterd_is_tierd_enabled(volinfo)) {
- ret = glusterd_add_tierd_to_dict(volinfo, rsp_dict,
- other_index);
- if (ret)
- goto out;
- other_count++;
- other_index++;
- node_count++;
+ if (glusterd_is_shd_compatible_volume(volinfo)) {
+ if (shd_enabled) {
+ ret = glusterd_add_shd_to_dict(volinfo, rsp_dict,
+ other_index);
+ if (ret)
+ goto out;
+ other_count++;
+ other_index++;
+ node_count++;
+ }
}
-
- nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
- _gf_false);
+#ifdef BUILD_GNFS
if (!nfs_disabled) {
ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict,
other_index, vol_opts);
@@ -3721,20 +3684,8 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
other_count++;
node_count++;
}
-
- if (glusterd_is_shd_compatible_volume(volinfo))
- shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
- if (shd_enabled) {
- ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict,
- other_index, vol_opts);
- if (ret)
- goto out;
- other_count++;
- node_count++;
- other_index++;
- }
-
- if (glusterd_is_volume_quota_enabled(volinfo)) {
+#endif
+ if (volume_quota_enabled) {
ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict,
other_index, vol_opts);
if (ret)
@@ -3744,7 +3695,7 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
other_index++;
}
- if (glusterd_is_bitrot_enabled(volinfo)) {
+ if (bitrot_enabled) {
ret = glusterd_add_node_to_dict(priv->bitd_svc.name, rsp_dict,
other_index, vol_opts);
if (ret)
@@ -3752,11 +3703,8 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
other_count++;
node_count++;
other_index++;
- }
-
- /* For handling scrub status. Scrub daemon will be
- * running automatically when bitrot is enable*/
- if (glusterd_is_bitrot_enabled(volinfo)) {
+ /* For handling scrub status. Scrub daemon will be
+ * running automatically when bitrot is enable */
ret = glusterd_add_node_to_dict(priv->scrub_svc.name, rsp_dict,
other_index, vol_opts);
if (ret)
@@ -3767,35 +3715,31 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
}
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- hot_brick_count = volinfo->tier_info.hot_brick_count;
- ret = dict_set_int32n(rsp_dict, "hot_brick_count", SLEN("hot_brick_count"),
- hot_brick_count);
- if (ret)
- goto out;
-
ret = dict_set_int32n(rsp_dict, "type", SLEN("type"), volinfo->type);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=type", NULL);
goto out;
+ }
ret = dict_set_int32n(rsp_dict, "brick-index-max", SLEN("brick-index-max"),
brick_index);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Error setting brick-index-max to dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=brick-index-max", NULL);
goto out;
}
ret = dict_set_int32n(rsp_dict, "other-count", SLEN("other-count"),
other_count);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Error setting other-count to dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=other-count", NULL);
goto out;
}
ret = dict_set_int32n(rsp_dict, "count", SLEN("count"), node_count);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Error setting node count to dict");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
goto out;
}
@@ -4272,8 +4216,10 @@ glusterd_dict_set_volid(dict_t *dict, char *volname, char **op_errstr)
this = THIS;
GF_ASSERT(this);
- if (!dict || !volname)
+ if (!dict || !volname) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
goto out;
+ }
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
@@ -4423,9 +4369,7 @@ glusterd_op_build_payload(dict_t **req, char **op_errstr, dict_t *op_ctx)
req_dict = dict_ref(dict);
} break;
- case GD_OP_REMOVE_BRICK:
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_REMOVE_TIER_BRICK: {
+ case GD_OP_REMOVE_BRICK: {
dict_t *dict = ctx;
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
@@ -4477,8 +4421,6 @@ glusterd_op_build_payload(dict_t **req, char **op_errstr, dict_t *op_ctx)
case GD_OP_DEFRAG_BRICK_VOLUME:
case GD_OP_BARRIER:
case GD_OP_BITROT:
- case GD_OP_TIER_START_STOP:
- case GD_OP_TIER_STATUS:
case GD_OP_SCRUB_STATUS:
case GD_OP_SCRUB_ONDEMAND:
case GD_OP_RESET_BRICK: {
@@ -4494,7 +4436,8 @@ glusterd_op_build_payload(dict_t **req, char **op_errstr, dict_t *op_ctx)
case GD_OP_SYNC_VOLUME:
case GD_OP_COPY_FILE:
- case GD_OP_SYS_EXEC: {
+ case GD_OP_SYS_EXEC:
+ case GD_OP_GANESHA: {
dict_copy(dict, req_dict);
} break;
@@ -4674,7 +4617,7 @@ glusterd_op_volume_dict_uuid_to_hostname(dict_t *dict, const char *key_fmt,
{
int ret = -1;
int i = 0;
- char key[1024];
+ char key[128];
int keylen;
char *uuid_str = NULL;
uuid_t uuid = {
@@ -5042,9 +4985,6 @@ glusterd_op_modify_op_ctx(glusterd_op_t op, void *ctx)
* same
*/
case GD_OP_DEFRAG_BRICK_VOLUME:
- case GD_OP_TIER_STATUS:
- case GD_OP_REMOVE_TIER_BRICK:
- case GD_OP_DETACH_TIER_STATUS:
case GD_OP_SCRUB_STATUS:
case GD_OP_SCRUB_ONDEMAND:
ret = dict_get_int32n(op_ctx, "count", SLEN("count"), &count);
@@ -5701,6 +5641,7 @@ glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx)
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Failed to set transaction id.");
GF_FREE(txn_id);
+ txn_id = NULL;
goto out;
}
@@ -5719,7 +5660,8 @@ out:
* txn_opinfo can't be cleared as that'll lead to a race of referring op_ctx
* after it's being freed.
*/
- if (txn_op_info.skip_locking && priv->op_version >= GD_OP_VERSION_6_0)
+ if (txn_op_info.skip_locking && priv->op_version >= GD_OP_VERSION_6_0 &&
+ txn_id)
ret = glusterd_clear_txn_opinfo(txn_id);
if (rsp_dict)
@@ -5738,8 +5680,6 @@ glusterd_need_brick_op(glusterd_op_t op)
switch (op) {
case GD_OP_PROFILE_VOLUME:
case GD_OP_STATUS_VOLUME:
- case GD_OP_TIER_STATUS:
- case GD_OP_DETACH_TIER_STATUS:
case GD_OP_DEFRAG_BRICK_VOLUME:
case GD_OP_HEAL_VOLUME:
case GD_OP_SCRUB_STATUS:
@@ -5952,6 +5892,10 @@ glusterd_op_stage_validate(glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_op_stage_set_volume(dict, op_errstr);
break;
+ case GD_OP_GANESHA:
+ ret = glusterd_op_stage_set_ganesha(dict, op_errstr);
+ break;
+
case GD_OP_RESET_VOLUME:
ret = glusterd_op_stage_reset_volume(dict, op_errstr);
break;
@@ -6033,13 +5977,8 @@ glusterd_op_stage_validate(glusterd_op_t op, dict_t *dict, char **op_errstr,
static void
glusterd_wait_for_blockers(glusterd_conf_t *priv)
{
- uint64_t blockers = GF_ATOMIC_GET(priv->blockers);
-
- while (blockers) {
- synclock_unlock(&priv->big_lock);
- sleep(1);
- blockers = GF_ATOMIC_GET(priv->blockers);
- synclock_lock(&priv->big_lock);
+ while (GF_ATOMIC_GET(priv->blockers)) {
+ synccond_wait(&priv->cond_blockers, &priv->big_lock);
}
}
@@ -6082,7 +6021,9 @@ glusterd_op_commit_perform(glusterd_op_t op, dict_t *dict, char **op_errstr,
case GD_OP_SET_VOLUME:
ret = glusterd_op_set_volume(dict, op_errstr);
break;
-
+ case GD_OP_GANESHA:
+ ret = glusterd_op_set_ganesha(dict, op_errstr);
+ break;
case GD_OP_RESET_VOLUME:
ret = glusterd_op_reset_volume(dict, op_errstr);
break;
@@ -6267,9 +6208,6 @@ glusterd_bricks_select_remove_brick(dict_t *dict, char **op_errstr,
goto out;
}
- if (command == GF_DEFRAG_CMD_DETACH_START)
- return glusterd_bricks_select_tier_volume(dict, op_errstr, selected);
-
ret = dict_get_int32n(dict, "force", SLEN("force"), &force);
if (ret) {
gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
@@ -6373,6 +6311,7 @@ glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
goto out;
break;
case GF_CLI_STATS_INFO:
+#ifdef BUILD_GNFS
ret = dict_get_str_boolean(dict, "nfs", _gf_false);
if (ret) {
if (!priv->nfs_svc.online) {
@@ -6397,6 +6336,7 @@ glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
ret = 0;
goto out;
}
+#endif
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
if (glusterd_is_brick_started(brickinfo)) {
@@ -6428,6 +6368,7 @@ glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
break;
case GF_CLI_STATS_TOP:
+#ifdef BUILD_GNFS
ret = dict_get_str_boolean(dict, "nfs", _gf_false);
if (ret) {
if (!priv->nfs_svc.online) {
@@ -6452,6 +6393,7 @@ glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
ret = 0;
goto out;
}
+#endif
ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick);
if (!ret) {
ret = glusterd_volume_brickinfo_get_by_brick(
@@ -6671,6 +6613,10 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo = NULL;
int hxl_children = 0;
uuid_t candidate = {0};
+ int brick_index = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int delta = 0;
+ uuid_t candidate_max = {0};
if ((*index) == 0)
(*index)++;
@@ -6682,13 +6628,40 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo,
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
+ if (gf_uuid_compare(brickinfo->uuid, candidate_max) > 0) {
+ if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
+ gf_uuid_copy(candidate_max, brickinfo->uuid);
+ } else {
+ peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL);
+ if (peerinfo && peerinfo->connected) {
+ gf_uuid_copy(candidate_max, brickinfo->uuid);
+ }
+ }
+ }
+ }
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
if (gf_uuid_is_null(brickinfo->uuid))
(void)glusterd_resolve_brick(brickinfo);
- if (gf_uuid_compare(brickinfo->uuid, candidate) > 0)
- gf_uuid_copy(candidate, brickinfo->uuid);
+ delta %= hxl_children;
+ if ((*index + delta) == (brick_index + hxl_children)) {
+ if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
+ gf_uuid_copy(candidate, brickinfo->uuid);
+ } else {
+ peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL);
+ if (peerinfo && peerinfo->connected) {
+ gf_uuid_copy(candidate, brickinfo->uuid);
+ } else if (peerinfo &&
+ (!gf_uuid_compare(candidate_max, MY_UUID))) {
+ _add_hxlator_to_dict(dict, volinfo,
+ ((*index) - 1) / hxl_children,
+ (*hxlator_count));
+ (*hxlator_count)++;
+ }
+ }
- if ((*index) % hxl_children == 0) {
if (!gf_uuid_compare(MY_UUID, candidate)) {
_add_hxlator_to_dict(dict, volinfo,
((*index) - 1) / hxl_children,
@@ -6696,6 +6669,8 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo,
(*hxlator_count)++;
}
gf_uuid_clear(candidate);
+ brick_index += hxl_children;
+ delta++;
}
(*index)++;
@@ -6765,12 +6740,12 @@ fill_shd_status_for_local_bricks(dict_t *dict, glusterd_volinfo_t *volinfo,
dict_t *req_dict)
{
glusterd_brickinfo_t *brickinfo = NULL;
- char *msg = "self-heal-daemon is not running on";
- char key[1024] = {
+ static char *msg = "self-heal-daemon is not running on";
+ char key[32] = {
0,
};
int keylen;
- char value[1024] = {
+ char value[128] = {
0,
};
int ret = 0;
@@ -6839,16 +6814,18 @@ glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
int ret = -1;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
+ glusterd_svc_t *svc = NULL;
this = THIS;
GF_ASSERT(this);
priv = this->private;
GF_ASSERT(priv);
+ svc = &(volinfo->shd.svc);
switch (heal_op) {
case GF_SHD_OP_INDEX_SUMMARY:
case GF_SHD_OP_STATISTICS_HEAL_COUNT:
- if (!priv->shd_svc.online) {
+ if (!svc->online) {
if (!rsp_dict) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
"Received "
@@ -6869,7 +6846,7 @@ glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
break;
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
- if (!priv->shd_svc.online) {
+ if (!svc->online) {
if (!rsp_dict) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
"Received "
@@ -6920,7 +6897,6 @@ glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
char *volname = NULL;
glusterd_conf_t *priv = NULL;
glusterd_volinfo_t *volinfo = NULL;
- glusterd_volinfo_t *dup_volinfo = NULL;
xlator_t *this = NULL;
char msg[2048] = {
0,
@@ -6958,31 +6934,10 @@ glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
"heal op invalid");
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo, _gf_false,
- volname);
- if (ret < 0)
- goto out;
-
- ret = glusterd_shd_select_brick_xlator(
- dict, heal_op, dup_volinfo, &index, &hxlator_count, rsp_dict);
- glusterd_volinfo_delete(dup_volinfo);
- if (ret < 0)
- goto out;
- ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo, _gf_true,
- volname);
- if (ret < 0)
- goto out;
- ret = glusterd_shd_select_brick_xlator(
- dict, heal_op, dup_volinfo, &index, &hxlator_count, rsp_dict);
- glusterd_volinfo_delete(dup_volinfo);
- if (ret < 0)
- goto out;
- } else {
- ret = glusterd_shd_select_brick_xlator(dict, heal_op, volinfo, &index,
- &hxlator_count, rsp_dict);
- if (ret < 0)
- goto out;
+ ret = glusterd_shd_select_brick_xlator(dict, heal_op, volinfo, &index,
+ &hxlator_count, rsp_dict);
+ if (ret < 0) {
+ goto out;
}
if (!hxlator_count)
@@ -7004,7 +6959,7 @@ glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
ret = -1;
goto out;
} else {
- pending_node->node = &(priv->shd_svc);
+ pending_node->node = &(volinfo->shd.svc);
pending_node->type = GD_NODE_SHD;
cds_list_add_tail(&pending_node->list, selected);
pending_node = NULL;
@@ -7015,69 +6970,7 @@ out:
return ret;
}
-int
-glusterd_bricks_select_tier_volume(dict_t *dict, char **op_errstr,
- struct cds_list_head *selected)
-{
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- char msg[2048] = {
- 0,
- };
- glusterd_pending_node_t *pending_node = NULL;
- glusterd_brickinfo_t *brick = NULL;
- gf_boolean_t retval = _gf_false;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO(THIS->name, this, out);
-
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "volume name get failed");
- goto out;
- }
-
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
-
- *op_errstr = gf_strdup(msg);
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
- goto out;
- }
- /*check if this node needs tierd*/
- cds_list_for_each_entry(brick, &volinfo->bricks, brick_list)
- {
- if (gf_uuid_compare(MY_UUID, brick->uuid) == 0) {
- retval = _gf_true;
- break;
- }
- }
-
- if (!retval)
- goto out;
-
- pending_node = GF_CALLOC(1, sizeof(*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- } else {
- pending_node->node = volinfo;
- pending_node->type = GD_NODE_TIERD;
- cds_list_add_tail(&pending_node->list, selected);
- pending_node = NULL;
- }
- ret = 0;
-
-out:
- return ret;
-}
-
-int
+static int
glusterd_bricks_select_rebalance_volume(dict_t *dict, char **op_errstr,
struct cds_list_head *selected)
{
@@ -7138,6 +7031,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
glusterd_pending_node_t *pending_node = NULL;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
+ glusterd_svc_t *svc = NULL;
GF_ASSERT(dict);
@@ -7166,7 +7060,6 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
case GF_CLI_STATUS_SHD:
case GF_CLI_STATUS_QUOTAD:
case GF_CLI_STATUS_SNAPD:
- case GF_CLI_STATUS_TIERD:
case GF_CLI_STATUS_BITD:
case GF_CLI_STATUS_SCRUB:
case GF_CLI_STATUS_CLIENT_LIST:
@@ -7213,6 +7106,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
cds_list_add_tail(&pending_node->list, selected);
ret = 0;
+#ifdef BUILD_GNFS
} else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
if (!priv->nfs_svc.online) {
ret = -1;
@@ -7232,8 +7126,10 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
cds_list_add_tail(&pending_node->list, selected);
ret = 0;
+#endif
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- if (!priv->shd_svc.online) {
+ svc = &(volinfo->shd.svc);
+ if (!svc->online) {
ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SELF_HEALD_DISABLED,
"Self-heal daemon is not running");
@@ -7245,7 +7141,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
- pending_node->node = &(priv->shd_svc);
+ pending_node->node = svc;
pending_node->type = GD_NODE_SHD;
pending_node->index = 0;
cds_list_add_tail(&pending_node->list, selected);
@@ -7311,30 +7207,6 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
cds_list_add_tail(&pending_node->list, selected);
ret = 0;
- } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
- if (!volinfo->tierd.svc.online) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TIERD_NOT_RUNNING,
- "tierd is not "
- "running");
- ret = -1;
- goto out;
- }
- pending_node = GF_CALLOC(1, sizeof(*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "failed to allocate "
- "memory for pending node");
- ret = -1;
- goto out;
- }
-
- pending_node->node = (void *)(&volinfo->tierd);
- pending_node->type = GD_NODE_TIERD;
- pending_node->index = 0;
- cds_list_add_tail(&pending_node->list, selected);
-
- ret = 0;
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
if (!volinfo->snapd.svc.online) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_NOT_RUNNING,
@@ -7510,6 +7382,7 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
glusterd_op_t op = GD_OP_NONE;
glusterd_req_ctx_t *req_ctx = NULL;
char *op_errstr = NULL;
+ gf_boolean_t free_req_ctx = _gf_false;
this = THIS;
priv = this->private;
@@ -7518,6 +7391,9 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
req_ctx = ctx;
} else {
req_ctx = GF_CALLOC(1, sizeof(*req_ctx), gf_gld_mt_op_allack_ctx_t);
+ if (!req_ctx)
+ goto out;
+ free_req_ctx = _gf_true;
op = glusterd_op_get_op();
req_ctx->op = op;
gf_uuid_copy(req_ctx->uuid, MY_UUID);
@@ -7529,7 +7405,6 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
if (op_errstr == NULL)
gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
opinfo.op_errstr = op_errstr;
- GF_FREE(req_ctx);
goto out;
}
}
@@ -7548,6 +7423,8 @@ glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
}
out:
+ if (ret && free_req_ctx)
+ GF_FREE(req_ctx);
gf_msg_debug(this->name, 0, "Returning with %d", ret);
return ret;
@@ -7649,11 +7526,6 @@ glusterd_op_bricks_select(glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_bricks_select_status_volume(dict, op_errstr,
selected);
break;
- case GD_OP_TIER_STATUS:
- ret = glusterd_bricks_select_tier_volume(dict, op_errstr, selected);
- break;
-
- case GD_OP_DETACH_TIER_STATUS:
case GD_OP_DEFRAG_BRICK_VOLUME:
ret = glusterd_bricks_select_rebalance_volume(dict, op_errstr,
selected);
@@ -8097,9 +7969,12 @@ glusterd_op_sm()
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
xlator_t *this = NULL;
glusterd_op_info_t txn_op_info;
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
ret = synclock_trylock(&gd_op_sm_lock);
if (ret) {
@@ -8177,7 +8052,8 @@ glusterd_op_sm()
"Unable to clear "
"transaction's opinfo");
} else {
- if (!(event_type == GD_OP_EVENT_STAGE_OP &&
+ if ((priv->op_version < GD_OP_VERSION_6_0) ||
+ !(event_type == GD_OP_EVENT_STAGE_OP &&
opinfo.state.state == GD_OP_STATE_STAGED &&
opinfo.skip_locking)) {
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
@@ -8256,13 +8132,11 @@ glusterd_op_free_ctx(glusterd_op_t op, void *ctx)
case GD_OP_PROFILE_VOLUME:
case GD_OP_STATUS_VOLUME:
case GD_OP_REBALANCE:
- case GD_OP_TIER_START_STOP:
case GD_OP_HEAL_VOLUME:
case GD_OP_STATEDUMP_VOLUME:
case GD_OP_CLEARLOCKS_VOLUME:
case GD_OP_DEFRAG_BRICK_VOLUME:
case GD_OP_MAX_OPVERSION:
- case GD_OP_TIER_STATUS:
dict_unref(ctx);
break;
default: