diff options
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-syncop.c')
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 136 |
1 files changed, 82 insertions, 54 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index 9bab2cfd54c..b73d37ad08e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -143,6 +143,8 @@ gd_brick_op_req_free(gd1_mgmt_brick_op_req *req) if (!req) return; + if (req->dict.dict_val) + GF_FREE(req->dict.dict_val); GF_FREE(req->input.input_val); GF_FREE(req); } @@ -228,7 +230,6 @@ glusterd_syncop_aggr_rsp_dict(glusterd_op_t op, dict_t *aggr, dict_t *rsp) case GD_OP_CREATE_VOLUME: case GD_OP_ADD_BRICK: case GD_OP_START_VOLUME: - case GD_OP_ADD_TIER_BRICK: ret = glusterd_aggr_brick_mount_dirs(aggr, rsp); if (ret) { gf_msg(this->name, GF_LOG_ERROR, 0, @@ -318,11 +319,6 @@ glusterd_syncop_aggr_rsp_dict(glusterd_op_t op, dict_t *aggr, dict_t *rsp) ret = glusterd_volume_rebalance_use_rsp_dict(aggr, rsp); break; - case GD_OP_TIER_STATUS: - case GD_OP_DETACH_TIER_STATUS: - case GD_OP_REMOVE_TIER_BRICK: - ret = glusterd_volume_tier_use_rsp_dict(aggr, rsp); - /* FALLTHROUGH */ default: break; } @@ -410,8 +406,11 @@ gd_syncop_mgmt_v3_lock(glusterd_op_t op, dict_t *op_ctx, ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, &req.dict.dict_len); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } gf_uuid_copy(req.uuid, my_uuid); gf_uuid_copy(req.txn_id, txn_id); @@ -511,8 +510,11 @@ gd_syncop_mgmt_v3_unlock(dict_t *op_ctx, glusterd_peerinfo_t *peerinfo, ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, &req.dict.dict_len); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } gf_uuid_copy(req.uuid, my_uuid); gf_uuid_copy(req.txn_id, txn_id); @@ -575,14 +577,15 @@ _gd_syncop_mgmt_lock_cbk(struct rpc_req *req, struct iovec *iov, int count, /* Set peer as locked, so we unlock only the locked peers */ if (rsp.op_ret == 0) peerinfo->locked = _gf_true; + RCU_READ_UNLOCK; } else { + RCU_READ_UNLOCK; rsp.op_ret = -1; gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_PEER_NOT_FOUND, "Could not find peer with " "ID %s", uuid_utoa(*peerid)); } - RCU_READ_UNLOCK; op_ret = rsp.op_ret; op_errno = rsp.op_errno; @@ -674,14 +677,15 @@ _gd_syncop_mgmt_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count, peerinfo = glusterd_peerinfo_find(*peerid, NULL); if (peerinfo) { peerinfo->locked = _gf_false; + RCU_READ_UNLOCK; } else { + RCU_READ_UNLOCK; rsp.op_ret = -1; gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_PEER_NOT_FOUND, "Could not find peer with " "ID %s", uuid_utoa(*peerid)); } - RCU_READ_UNLOCK; op_ret = rsp.op_ret; op_errno = rsp.op_errno; @@ -844,16 +848,21 @@ gd_syncop_mgmt_stage_op(glusterd_peerinfo_t *peerinfo, struct syncargs *args, uuid_t *peerid = NULL; req = GF_CALLOC(1, sizeof(*req), gf_gld_mt_mop_stage_req_t); - if (!req) + if (!req) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } gf_uuid_copy(req->uuid, my_uuid); req->op = op; ret = dict_allocate_and_serialize(dict_out, &req->buf.buf_val, &req->buf.buf_len); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); if (ret) @@ -905,6 +914,8 @@ _gd_syncop_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count, if (rsp.output.output_len) { args->dict = dict_new(); if (!args->dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); ret = -1; args->op_errno = ENOMEM; goto out; @@ -912,8 +923,11 @@ _gd_syncop_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count, ret = dict_unserialize(rsp.output.output_val, rsp.output.output_len, &args->dict); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_UNSERIALIZE_FAIL, NULL); goto out; + } } args->op_ret = rsp.op_ret; @@ -1154,16 +1168,21 @@ gd_syncop_mgmt_commit_op(glusterd_peerinfo_t *peerinfo, struct syncargs *args, uuid_t *peerid = NULL; req = GF_CALLOC(1, sizeof(*req), gf_gld_mt_mop_commit_req_t); - if (!req) + if (!req) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } gf_uuid_copy(req->uuid, my_uuid); req->op = op; ret = dict_allocate_and_serialize(dict_out, &req->buf.buf_val, &req->buf.buf_len); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); if (ret) @@ -1191,7 +1210,12 @@ gd_lock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx, struct syncargs args = {0}; this = THIS; - synctask_barrier_init((&args)); + GF_VALIDATE_OR_GOTO("glusterd", this, out); + + ret = synctask_barrier_init((&args)); + if (ret) + goto out; + peer_cnt = 0; RCU_READ_LOCK; @@ -1275,8 +1299,10 @@ gd_stage_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, GF_ASSERT(conf); rsp_dict = dict_new(); - if (!rsp_dict) + if (!rsp_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } if ((op == GD_OP_CREATE_VOLUME) || (op == GD_OP_ADD_BRICK) || (op == GD_OP_START_VOLUME)) @@ -1321,7 +1347,10 @@ stage_done: } gd_syncargs_init(&args, aggr_dict); - synctask_barrier_init((&args)); + ret = synctask_barrier_init((&args)); + if (ret) + goto out; + peer_cnt = 0; RCU_READ_LOCK; @@ -1402,6 +1431,7 @@ gd_commit_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, rsp_dict = dict_new(); if (!rsp_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -1449,14 +1479,20 @@ commit_done: } gd_syncargs_init(&args, op_ctx); - synctask_barrier_init((&args)); + ret = synctask_barrier_init((&args)); + if (ret) + goto out; + peer_cnt = 0; origin_glusterd = is_origin_glusterd(req_dict); if (op == GD_OP_STATUS_VOLUME) { ret = dict_get_uint32(req_dict, "cmd", &cmd); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=cmd", NULL); goto out; + } if (origin_glusterd) { if ((cmd & GF_CLI_STATUS_ALL)) { @@ -1541,7 +1577,10 @@ gd_unlock_op_phase(glusterd_conf_t *conf, glusterd_op_t op, int *op_ret, goto out; } - synctask_barrier_init((&args)); + ret = synctask_barrier_init((&args)); + if (ret) + goto out; + peer_cnt = 0; if (cluster_lock) { @@ -1679,10 +1718,12 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, rpc_clnt_t *rpc = NULL; dict_t *rsp_dict = NULL; int32_t cmd = GF_OP_CMD_NONE; + glusterd_volinfo_t *volinfo = NULL; this = THIS; rsp_dict = dict_new(); if (!rsp_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -1710,37 +1751,30 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, cds_list_for_each_entry_safe(pending_node, tmp, &selected, list) { rpc = glusterd_pending_node_get_rpc(pending_node); + /* In the case of rebalance if the rpc object is null, we try to + * create the rpc object. if the rebalance daemon is down, it returns + * -1. otherwise, rpc object will be created and referenced. + */ if (!rpc) { - if (pending_node->type == GD_NODE_REBALANCE) { - ret = 0; - glusterd_defrag_volume_node_rsp(req_dict, NULL, op_ctx); + if (pending_node->type == GD_NODE_REBALANCE && pending_node->node) { + volinfo = pending_node->node; + ret = glusterd_rebalance_rpc_create(volinfo); + if (ret) { + ret = 0; + glusterd_defrag_volume_node_rsp(req_dict, NULL, op_ctx); + goto out; + } else { + rpc = glusterd_defrag_rpc_get(volinfo->rebal.defrag); + } + } else { + ret = -1; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE, + "Brick Op failed " + "due to rpc failure."); goto out; } - - ret = -1; - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE, - "Brick Op failed " - "due to rpc failure."); - goto out; } - /* Redirect operation to be detach tier via rebalance flow. */ - ret = dict_get_int32(req_dict, "command", &cmd); - if (!ret) { - if (cmd == GF_OP_CMD_DETACH_START) { - /* this change is left to support backward - * compatibility. */ - op = GD_OP_REBALANCE; - ret = dict_set_int32(req_dict, "rebalance-command", - GF_DEFRAG_CMD_START_DETACH_TIER); - } else if (cmd == GF_DEFRAG_CMD_DETACH_START) { - op = GD_OP_REMOVE_TIER_BRICK; - ret = dict_set_int32(req_dict, "rebalance-command", - GF_DEFRAG_CMD_DETACH_START); - } - if (ret) - goto out; - } ret = gd_syncop_mgmt_brick_op(rpc, pending_node, op, req_dict, op_ctx, op_errstr); if (op == GD_OP_STATUS_VOLUME) { @@ -1752,12 +1786,6 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, if (dict_get(op_ctx, "client-count")) break; } - } else if (cmd == GF_OP_CMD_DETACH_START) { - op = GD_OP_REMOVE_BRICK; - dict_del(req_dict, "rebalance-command"); - } else if (cmd == GF_DEFRAG_CMD_DETACH_START) { - op = GD_OP_REMOVE_TIER_BRICK; - dict_del(req_dict, "rebalance-command"); } if (ret) goto out; @@ -1770,7 +1798,7 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, pending_node = NULL; ret = 0; out: - if (pending_node) + if (pending_node && pending_node->node) glusterd_pending_node_put_rpc(pending_node); if (rsp_dict) |
