raise errors.OpPrereqError("Invalid file driver name '%s'" %
self.op.file_driver, errors.ECODE_INVAL)
- if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
- raise errors.OpPrereqError("File storage directory path not absolute",
- errors.ECODE_INVAL)
+ if self.op.disk_template == constants.DT_FILE:
+ opcodes.RequireFileStorage()
### Node/iallocator related checks
_CheckIAllocatorOrNode(self, "iallocator", "pnode")
if name in os_defs and os_defs[name] == self.op.osparams[name]:
del self.op.osparams[name]
+ def _CalculateFileStorageDir(self):
+ """Calculate final instance file storage dir.
+
+ """
+ # file storage dir calculation/check
+ self.instance_file_storage_dir = None
+ if self.op.disk_template == constants.DT_FILE:
+ # build the full file storage dir path
+ joinargs = []
+
+ cfg_storagedir = self.cfg.GetFileStorageDir()
+ if not cfg_storagedir:
+ raise errors.OpPrereqError("Cluster file storage dir not defined")
+ joinargs.append(cfg_storagedir)
+
+ if self.op.file_storage_dir is not None:
+ joinargs.append(self.op.file_storage_dir)
+
+ joinargs.append(self.op.instance_name)
+
+ # pylint: disable-msg=W0142
+ self.instance_file_storage_dir = utils.PathJoin(*joinargs)
+
def CheckPrereq(self):
"""Check prerequisites.
"""
+ self._CalculateFileStorageDir()
+
if self.op.mode == constants.INSTANCE_IMPORT:
export_info = self._ReadExportInfo()
self._ReadExportParams(export_info)
else:
network_port = None
- if constants.ENABLE_FILE_STORAGE:
- # this is needed because os.path.join does not accept None arguments
- if self.op.file_storage_dir is None:
- string_file_storage_dir = ""
- else:
- string_file_storage_dir = self.op.file_storage_dir
-
- # build the full file storage dir path
- file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
- string_file_storage_dir, instance)
- else:
- file_storage_dir = ""
-
disks = _GenerateDiskTemplate(self,
self.op.disk_template,
instance, pnode_name,
self.secondaries,
self.disks,
- file_storage_dir,
+ self.instance_file_storage_dir,
self.op.file_driver,
0,
feedback_fn)
else:
fn = self._ExecDrbd8DiskOnly
- return fn(feedback_fn)
-
+ result = fn(feedback_fn)
finally:
# Deactivate the instance disks if we're replacing them on a
# down instance
if activate_disks:
_SafeShutdownInstanceDisks(self.lu, self.instance)
- if __debug__:
- # Verify owned locks
- owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
- assert ((self.early_release and not owned_locks) or
- (not self.early_release and
- set(owned_locks) == set(self.node_secondary_ip))), \
- ("Not owning the correct locks, early_release=%s, owned=%r" %
- (self.early_release, owned_locks))
+ if __debug__:
+ # Verify owned locks
+ owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
+ assert ((self.early_release and not owned_locks) or
+ (not self.early_release and
+ set(owned_locks) == set(self.node_secondary_ip))), \
+ ("Not owning the correct locks, early_release=%s, owned=%r" %
+ (self.early_release, owned_locks))
+
+ return result
def _CheckVolumeGroup(self, nodes):
self.lu.LogInfo("Checking volume groups")
(node_name, self.instance.name))
def _CreateNewStorage(self, node_name):
+ """Create new storage on the primary or secondary node.
+
+ This is only used for same-node replaces, not for changing the
+ secondary node, hence we don't want to modify the existing disk.
+
+ """
iv_names = {}
for idx, dev in enumerate(self.instance.disks):
logical_id=(vg_meta, names[1]))
new_lvs = [lv_data, lv_meta]
- old_lvs = dev.children
+ old_lvs = [child.Copy() for child in dev.children]
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
# we pass force_create=True to force the LVM creation
rename_new_to_old)
result.Raise("Can't rename new LVs on node %s" % self.target_node)
+ # Intermediate steps of in memory modifications
for old, new in zip(old_lvs, new_lvs):
new.logical_id = old.logical_id
self.cfg.SetDiskID(new, self.target_node)
+ # We need to modify old_lvs so that removal later removes the
+ # right LVs, not the newly added ones; note that old_lvs is a
+ # copy here
for disk in old_lvs:
disk.logical_id = ren_fn(disk, temp_suffix)
self.cfg.SetDiskID(disk, self.target_node)
"volumes"))
raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
- dev.children = new_lvs
-
- self.cfg.Update(self.instance, feedback_fn)
-
cstep = 5
if self.early_release:
self.lu.LogStep(cstep, steps_total, "Removing old storage")
locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
def Exec(self, feedback_fn):
+ instances = []
+ for node in self.op.nodes:
+ instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
+ if not instances:
+ return []
+
if self.op.remote_node is not None:
- instances = []
- for node in self.op.nodes:
- instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
result = []
for i in instances:
if i.primary_node == self.op.remote_node: