With this change a new disk size can be specified when recreating disks.
Signed-off-by: Michael Hanselmann <hansmi@google.com>
Reviewed-by: Iustin Pop <iustin@google.com>
- Deprecated ``admin_up`` field. Instead, ``admin_state`` is introduced,
with 3 possible values -- ``up``, ``down`` and ``offline``.
+- Replaced ``--disks`` option of ``gnt-instance replace-disks`` with a
+ more flexible ``--disk`` option. Now disk size and mode can be changed
+ upon recreation.
Version 2.5.0 rc5
from ganeti import netutils
from ganeti import ssh
from ganeti import objects
+from ganeti import ht
_EXPAND_CLUSTER = "cluster"
"""
instance_name = args[0]
+
+ disks = []
+
if opts.disks:
- try:
- opts.disks = [int(v) for v in opts.disks.split(",")]
- except (ValueError, TypeError), err:
- ToStderr("Invalid disks value: %s" % str(err))
- return 1
- else:
- opts.disks = []
+ for didx, ddict in opts.disks:
+ didx = int(didx)
+
+ if not ht.TDict(ddict):
+ msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
+ raise errors.OpPrereqError(msg)
+
+ if constants.IDISK_SIZE in ddict:
+ try:
+ ddict[constants.IDISK_SIZE] = \
+ utils.ParseUnit(ddict[constants.IDISK_SIZE])
+ except ValueError, err:
+ raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
+ (didx, err))
+
+ disks.append((didx, ddict))
+
+ # TODO: Verify modifyable parameters (already done in
+ # LUInstanceRecreateDisks, but it'd be nice to have in the client)
if opts.node:
pnode, snode = SplitNodeOption(opts.node)
nodes = []
op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
- disks=opts.disks,
- nodes=nodes)
+ disks=disks, nodes=nodes)
SubmitOrSend(op, opts)
+
return 0
"[-f] <instance>", "Deactivate an instance's disks"),
"recreate-disks": (
RecreateDisks, ARGS_ONE_INSTANCE,
- [SUBMIT_OPT, DISKIDX_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
+ [SUBMIT_OPT, DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
"<instance>", "Recreate an instance's disks"),
"grow-disk": (
GrowDisk,
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
+ _MODIFYABLE = frozenset([
+ constants.IDISK_SIZE,
+ constants.IDISK_MODE,
+ ])
+
+ # New or changed disk parameters may have different semantics
+ assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
+ constants.IDISK_ADOPT,
+
+ # TODO: Implement support changing VG while recreating
+ constants.IDISK_VG,
+ constants.IDISK_METAVG,
+ ]))
+
def CheckArguments(self):
- # normalise the disk list
- self.op.disks = sorted(frozenset(self.op.disks))
+ if self.op.disks and ht.TPositiveInt(self.op.disks[0]):
+ # Normalize and convert deprecated list of disk indices
+ self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
+
+ duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
+ if duplicates:
+ raise errors.OpPrereqError("Some disks have been specified more than"
+ " once: %s" % utils.CommaJoin(duplicates),
+ errors.ECODE_INVAL)
+
+ for (idx, params) in self.op.disks:
+ utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
+ unsupported = frozenset(params.keys()) - self._MODIFYABLE
+ if unsupported:
+ raise errors.OpPrereqError("Parameters for disk %s try to change"
+ " unmodifyable parameter(s): %s" %
+ (idx, utils.CommaJoin(unsupported)),
+ errors.ECODE_INVAL)
def ExpandNames(self):
self._ExpandAndLockInstance()
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name, errors.ECODE_INVAL)
+
# if we replace nodes *and* the old primary is offline, we don't
# check
assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE)
_CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
msg="cannot recreate disks")
- if not self.op.disks:
- self.op.disks = range(len(instance.disks))
+ if self.op.disks:
+ self.disks = dict(self.op.disks)
else:
- for idx in self.op.disks:
- if idx >= len(instance.disks):
- raise errors.OpPrereqError("Invalid disk index '%s'" % idx,
- errors.ECODE_INVAL)
- if self.op.disks != range(len(instance.disks)) and self.op.nodes:
+ self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
+
+ maxidx = max(self.disks.keys())
+ if maxidx >= len(instance.disks):
+ raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
+ errors.ECODE_INVAL)
+
+ if (self.op.nodes and
+ sorted(self.disks.keys()) != range(len(instance.disks))):
raise errors.OpPrereqError("Can't recreate disks partially and"
" change the nodes at the same time",
errors.ECODE_INVAL)
+
self.instance = instance
def Exec(self, feedback_fn):
self.owned_locks(locking.LEVEL_NODE_RES))
to_skip = []
- mods = [] # keeps track of needed logical_id changes
+ mods = [] # keeps track of needed changes
for idx, disk in enumerate(instance.disks):
- if idx not in self.op.disks: # disk idx has not been passed in
+ try:
+ changes = self.disks[idx]
+ except KeyError:
+ # Disk should not be recreated
to_skip.append(idx)
continue
+
# update secondaries for disks, if needed
- if self.op.nodes:
- if disk.dev_type == constants.LD_DRBD8:
- # need to update the nodes and minors
- assert len(self.op.nodes) == 2
- assert len(disk.logical_id) == 6 # otherwise disk internals
- # have changed
- (_, _, old_port, _, _, old_secret) = disk.logical_id
- new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
- new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
- new_minors[0], new_minors[1], old_secret)
- assert len(disk.logical_id) == len(new_id)
- mods.append((idx, new_id))
+ if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
+ # need to update the nodes and minors
+ assert len(self.op.nodes) == 2
+ assert len(disk.logical_id) == 6 # otherwise disk internals
+ # have changed
+ (_, _, old_port, _, _, old_secret) = disk.logical_id
+ new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
+ new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
+ new_minors[0], new_minors[1], old_secret)
+ assert len(disk.logical_id) == len(new_id)
+ else:
+ new_id = None
+
+ mods.append((idx, new_id, changes))
# now that we have passed all asserts above, we can apply the mods
# in a single run (to avoid partial changes)
- for idx, new_id in mods:
- instance.disks[idx].logical_id = new_id
+ for idx, new_id, changes in mods:
+ disk = instance.disks[idx]
+ if new_id is not None:
+ assert disk.dev_type == constants.LD_DRBD8
+ disk.logical_id = new_id
+ if changes:
+ disk.Update(size=changes.get(constants.IDISK_SIZE, None),
+ mode=changes.get(constants.IDISK_MODE, None))
# change primary node, if needed
if self.op.nodes:
raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
" disk type %s" % self.dev_type)
+ def Update(self, size=None, mode=None):
+ """Apply changes to size and mode.
+
+ """
+ if self.dev_type == constants.LD_DRBD8:
+ if self.children:
+ self.children[0].Update(size=size, mode=mode)
+ else:
+ assert not self.children
+
+ if size is not None:
+ self.size = size
+ if mode is not None:
+ self.mode = mode
+
def UnsetSize(self):
"""Sets recursively the size to zero for the disk and its children.
ht.TListOf(ht.TAnd(ht.TIsLength(len(_TSetParamsResultItemItems)),
ht.TItems(_TSetParamsResultItemItems)))
+# TODO: Generate check from constants.IDISK_PARAMS_TYPES (however, not all users
+# of this check support all parameters)
+_TDiskParams = ht.TDictOf(ht.TElemOf(constants.IDISK_PARAMS),
+ ht.TOr(ht.TNonEmptyString, ht.TInt))
+
_SUMMARY_PREFIX = {
"CLUSTER_": "C_",
"GROUP_": "G_",
_PNameCheck,
_PIgnoreIpolicy,
("beparams", ht.EmptyDict, ht.TDict, "Backend parameters for instance"),
- ("disks", ht.NoDefault,
- # TODO: Generate check from constants.IDISK_PARAMS_TYPES
- ht.TListOf(ht.TDictOf(ht.TElemOf(constants.IDISK_PARAMS),
- ht.TOr(ht.TNonEmptyString, ht.TInt))),
+ ("disks", ht.NoDefault, ht.TListOf(_TDiskParams),
"Disk descriptions, for example ``[{\"%s\": 100}, {\"%s\": 5}]``;"
" each disk definition must contain a ``%s`` value and"
" can contain an optional ``%s`` value denoting the disk access mode"
class OpInstanceRecreateDisks(OpCode):
"""Recreate an instance's disks."""
+ _TDiskChanges = \
+ ht.TAnd(ht.TIsLength(2),
+ ht.TItems([ht.Comment("Disk index")(ht.TPositiveInt),
+ ht.Comment("Parameters")(_TDiskParams)]))
+
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
- ("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
- "List of disk indexes"),
+ ("disks", ht.EmptyList,
+ ht.TOr(ht.TListOf(ht.TPositiveInt), ht.TListOf(_TDiskChanges)),
+ "List of disk indexes (deprecated) or a list of tuples containing a disk"
+ " index and a possibly empty dictionary with disk parameter changes"),
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"New instance nodes, if relocation is desired"),
]
RECREATE-DISKS
^^^^^^^^^^^^^^
-**recreate-disks** [--submit] [--disks=``indices``] [-n node1:[node2]]
- {*instance*}
+| **recreate-disks** [--submit] [-n node1:[node2]]
+| [--disk=*N*[:[size=*VAL*][,mode=*ro\|rw*]]] {*instance*}
-Recreates the disks of the given instance, or only a subset of the
-disks (if the option ``disks`` is passed, which must be a
-comma-separated list of disk indices, starting from zero).
+Recreates all or a subset of disks of the given instance.
Note that this functionality should only be used for missing disks; if
any of the given disks already exists, the operation will fail. While
this is suboptimal, recreate-disks should hopefully not be needed in
normal operation and as such the impact of this is low.
+If only a subset should be recreated, any number of ``disk`` options can
+be specified. It expects a disk index and an optional list of disk
+parameters to change. Only ``size`` and ``mode`` can be changed while
+recreating disks. To recreate all disks while changing parameters on
+a subset only, a ``--disk`` option must be given for every disk of the
+instance.
+
Optionally the instance's disks can be recreated on different
nodes. This can be useful if, for example, the original nodes of the
instance have gone down (and are marked offline), so we can't recreate
on the same nodes. To do this, pass the new node(s) via ``-n`` option,
with a syntax similar to the **add** command. The number of nodes
passed must equal the number of nodes that the instance currently
-has. Note that changing nodes is only allowed for 'all disk'
-replacement (when ``--disks`` is not passed).
+has. Note that changing nodes is only allowed when all disks are
+replaced, e.g. when no ``--disk`` option is passed.
The ``--submit`` option is used to send the job to the master daemon
but not wait for its completion. The job ID will be shown so that it