+ try:
+ helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
+ instance)
+
+ helper.CreateSnapshots()
+ try:
+ if (self.op.shutdown and instance.admin_up and
+ not self.op.remove_instance):
+ assert not activate_disks
+ feedback_fn("Starting instance %s" % instance.name)
+ result = self.rpc.call_instance_start(src_node, instance, None, None)
+ msg = result.fail_msg
+ if msg:
+ feedback_fn("Failed to start instance: %s" % msg)
+ _ShutdownInstanceDisks(self, instance)
+ raise errors.OpExecError("Could not start instance: %s" % msg)
+
+ if self.op.mode == constants.EXPORT_MODE_LOCAL:
+ (fin_resu, dresults) = helper.LocalExport(self.dst_node)
+ elif self.op.mode == constants.EXPORT_MODE_REMOTE:
+ connect_timeout = constants.RIE_CONNECT_TIMEOUT
+ timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
+
+ (key_name, _, _) = self.x509_key_name
+
+ dest_ca_pem = \
+ OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
+ self.dest_x509_ca)
+
+ (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
+ key_name, dest_ca_pem,
+ timeouts)
+ finally:
+ helper.Cleanup()
+
+ # Check for backwards compatibility
+ assert len(dresults) == len(instance.disks)
+ assert compat.all(isinstance(i, bool) for i in dresults), \
+ "Not all results are boolean: %r" % dresults
+
+ finally:
+ if activate_disks:
+ feedback_fn("Deactivating disks for %s" % instance.name)
+ _ShutdownInstanceDisks(self, instance)
+
+ if not (compat.all(dresults) and fin_resu):
+ failures = []
+ if not fin_resu:
+ failures.append("export finalization")
+ if not compat.all(dresults):
+ fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
+ if not dsk)
+ failures.append("disk export: disk(s) %s" % fdsk)
+
+ raise errors.OpExecError("Export failed, errors in %s" %
+ utils.CommaJoin(failures))
+
+ # At this point, the export was successful, we can cleanup/finish
+
+ # Remove instance if requested
+ if self.op.remove_instance:
+ feedback_fn("Removing instance %s" % instance.name)
+ _RemoveInstance(self, feedback_fn, instance,
+ self.op.ignore_remove_failures)
+
+ if self.op.mode == constants.EXPORT_MODE_LOCAL:
+ self._CleanupExports(feedback_fn)
+
+ return fin_resu, dresults
+
+
+class LUBackupRemove(NoHooksLU):
+ """Remove exports related to the named instance.
+
+ """
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self.needed_locks = {}
+ # We need all nodes to be locked in order for RemoveExport to work, but we
+ # don't need to lock the instance itself, as nothing will happen to it (and
+ # we can remove exports also for a removed instance)
+ self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+ def Exec(self, feedback_fn):
+ """Remove any export.
+
+ """
+ instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
+ # If the instance was not found we'll try with the name that was passed in.
+ # This will only work if it was an FQDN, though.
+ fqdn_warn = False
+ if not instance_name:
+ fqdn_warn = True
+ instance_name = self.op.instance_name
+
+ locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
+ exportlist = self.rpc.call_export_list(locked_nodes)
+ found = False
+ for node in exportlist:
+ msg = exportlist[node].fail_msg
+ if msg:
+ self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
+ continue
+ if instance_name in exportlist[node].payload:
+ found = True
+ result = self.rpc.call_export_remove(node, instance_name)
+ msg = result.fail_msg
+ if msg:
+ logging.error("Could not remove export for instance %s"
+ " on node %s: %s", instance_name, node, msg)
+
+ if fqdn_warn and not found:
+ feedback_fn("Export not found. If trying to remove an export belonging"
+ " to a deleted instance please use its Fully Qualified"
+ " Domain Name.")
+
+
+class LUGroupAdd(LogicalUnit):
+ """Logical unit for creating node groups.
+
+ """
+ HPATH = "group-add"
+ HTYPE = constants.HTYPE_GROUP
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ # We need the new group's UUID here so that we can create and acquire the
+ # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
+ # that it should not check whether the UUID exists in the configuration.
+ self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
+ self.needed_locks = {}
+ self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This checks that the given group name is not an existing node group
+ already.
+
+ """
+ try:
+ existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
+ except errors.OpPrereqError:
+ pass
+ else:
+ raise errors.OpPrereqError("Desired group name '%s' already exists as a"
+ " node group (UUID: %s)" %
+ (self.op.group_name, existing_uuid),
+ errors.ECODE_EXISTS)
+
+ if self.op.ndparams:
+ utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+
+ def BuildHooksEnv(self):
+ """Build hooks env.
+
+ """
+ return {
+ "GROUP_NAME": self.op.group_name,
+ }
+
+ def BuildHooksNodes(self):
+ """Build hooks nodes.
+
+ """
+ mn = self.cfg.GetMasterNode()
+ return ([mn], [mn])
+
+ def Exec(self, feedback_fn):
+ """Add the node group to the cluster.
+
+ """
+ group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
+ uuid=self.group_uuid,
+ alloc_policy=self.op.alloc_policy,
+ ndparams=self.op.ndparams)
+
+ self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
+ del self.remove_locks[locking.LEVEL_NODEGROUP]
+
+
+class LUGroupAssignNodes(NoHooksLU):
+ """Logical unit for assigning nodes to groups.
+
+ """
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ # These raise errors.OpPrereqError on their own:
+ self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
+ self.op.nodes = _GetWantedNodes(self, self.op.nodes)
+
+ # We want to lock all the affected nodes and groups. We have readily
+ # available the list of nodes, and the *destination* group. To gather the
+ # list of "source" groups, we need to fetch node information.
+ self.node_data = self.cfg.GetAllNodesInfo()
+ affected_groups = set(self.node_data[node].group for node in self.op.nodes)
+ affected_groups.add(self.group_uuid)
+
+ self.needed_locks = {
+ locking.LEVEL_NODEGROUP: list(affected_groups),
+ locking.LEVEL_NODE: self.op.nodes,
+ }
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ """
+ self.group = self.cfg.GetNodeGroup(self.group_uuid)
+ instance_data = self.cfg.GetAllInstancesInfo()
+
+ if self.group is None:
+ raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
+ (self.op.group_name, self.group_uuid))
+
+ (new_splits, previous_splits) = \
+ self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
+ for node in self.op.nodes],
+ self.node_data, instance_data)
+
+ if new_splits:
+ fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
+
+ if not self.op.force:
+ raise errors.OpExecError("The following instances get split by this"
+ " change and --force was not given: %s" %
+ fmt_new_splits)
+ else:
+ self.LogWarning("This operation will split the following instances: %s",
+ fmt_new_splits)
+
+ if previous_splits:
+ self.LogWarning("In addition, these already-split instances continue"
+ " to be spit across groups: %s",
+ utils.CommaJoin(utils.NiceSort(previous_splits)))
+
+ def Exec(self, feedback_fn):
+ """Assign nodes to a new group.
+
+ """
+ for node in self.op.nodes:
+ self.node_data[node].group = self.group_uuid
+
+ self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
+
+ @staticmethod
+ def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
+ """Check for split instances after a node assignment.
+
+ This method considers a series of node assignments as an atomic operation,
+ and returns information about split instances after applying the set of
+ changes.
+
+ In particular, it returns information about newly split instances, and
+ instances that were already split, and remain so after the change.
+
+ Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
+ considered.
+
+ @type changes: list of (node_name, new_group_uuid) pairs.
+ @param changes: list of node assignments to consider.
+ @param node_data: a dict with data for all nodes
+ @param instance_data: a dict with all instances to consider
+ @rtype: a two-tuple
+ @return: a list of instances that were previously okay and result split as a
+ consequence of this change, and a list of instances that were previously
+ split and this change does not fix.
+
+ """
+ changed_nodes = dict((node, group) for node, group in changes
+ if node_data[node].group != group)
+
+ all_split_instances = set()
+ previously_split_instances = set()
+
+ def InstanceNodes(instance):
+ return [instance.primary_node] + list(instance.secondary_nodes)
+
+ for inst in instance_data.values():
+ if inst.disk_template not in constants.DTS_INT_MIRROR:
+ continue
+
+ instance_nodes = InstanceNodes(inst)
+
+ if len(set(node_data[node].group for node in instance_nodes)) > 1:
+ previously_split_instances.add(inst.name)
+
+ if len(set(changed_nodes.get(node, node_data[node].group)
+ for node in instance_nodes)) > 1:
+ all_split_instances.add(inst.name)
+
+ return (list(all_split_instances - previously_split_instances),
+ list(previously_split_instances & all_split_instances))
+
+
+class _GroupQuery(_QueryBase):
+ FIELDS = query.GROUP_FIELDS
+
+ def ExpandNames(self, lu):
+ lu.needed_locks = {}
+
+ self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
+ name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
+
+ if not self.names:
+ self.wanted = [name_to_uuid[name]
+ for name in utils.NiceSort(name_to_uuid.keys())]
+ else:
+ # Accept names to be either names or UUIDs.
+ missing = []
+ self.wanted = []
+ all_uuid = frozenset(self._all_groups.keys())
+
+ for name in self.names:
+ if name in all_uuid:
+ self.wanted.append(name)
+ elif name in name_to_uuid:
+ self.wanted.append(name_to_uuid[name])
+ else:
+ missing.append(name)
+
+ if missing:
+ raise errors.OpPrereqError("Some groups do not exist: %s" % missing,
+ errors.ECODE_NOENT)
+
+ def DeclareLocks(self, lu, level):
+ pass
+
+ def _GetQueryData(self, lu):
+ """Computes the list of node groups and their attributes.
+
+ """
+ do_nodes = query.GQ_NODE in self.requested_data
+ do_instances = query.GQ_INST in self.requested_data
+
+ group_to_nodes = None
+ group_to_instances = None
+
+ # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
+ # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
+ # latter GetAllInstancesInfo() is not enough, for we have to go through
+ # instance->node. Hence, we will need to process nodes even if we only need
+ # instance information.
+ if do_nodes or do_instances:
+ all_nodes = lu.cfg.GetAllNodesInfo()
+ group_to_nodes = dict((uuid, []) for uuid in self.wanted)
+ node_to_group = {}
+
+ for node in all_nodes.values():
+ if node.group in group_to_nodes:
+ group_to_nodes[node.group].append(node.name)
+ node_to_group[node.name] = node.group
+
+ if do_instances:
+ all_instances = lu.cfg.GetAllInstancesInfo()
+ group_to_instances = dict((uuid, []) for uuid in self.wanted)
+
+ for instance in all_instances.values():
+ node = instance.primary_node
+ if node in node_to_group:
+ group_to_instances[node_to_group[node]].append(instance.name)
+
+ if not do_nodes:
+ # Do not pass on node information if it was not requested.
+ group_to_nodes = None
+
+ return query.GroupQueryData([self._all_groups[uuid]
+ for uuid in self.wanted],
+ group_to_nodes, group_to_instances)
+
+
+class LUGroupQuery(NoHooksLU):
+ """Logical unit for querying node groups.
+
+ """
+ REQ_BGL = False
+
+ def CheckArguments(self):
+ self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
+ self.op.output_fields, False)
+
+ def ExpandNames(self):
+ self.gq.ExpandNames(self)
+
+ def Exec(self, feedback_fn):
+ return self.gq.OldStyleQuery(self)
+
+
+class LUGroupSetParams(LogicalUnit):
+ """Modifies the parameters of a node group.
+
+ """
+ HPATH = "group-modify"
+ HTYPE = constants.HTYPE_GROUP
+ REQ_BGL = False
+
+ def CheckArguments(self):
+ all_changes = [
+ self.op.ndparams,
+ self.op.alloc_policy,
+ ]
+
+ if all_changes.count(None) == len(all_changes):
+ raise errors.OpPrereqError("Please pass at least one modification",
+ errors.ECODE_INVAL)
+
+ def ExpandNames(self):
+ # This raises errors.OpPrereqError on its own:
+ self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)