Index nodes by their UUID
authorThomas Thrainer <thomasth@google.com>
Mon, 27 May 2013 14:17:41 +0000 (16:17 +0200)
committerThomas Thrainer <thomasth@google.com>
Thu, 13 Jun 2013 09:32:23 +0000 (11:32 +0200)
No longer index nodes by their name but by their UUID in the cluster
config. This change changes large parts of the code, as the following
adjustments were necessary:
 * Change the index key to UUID in the configuration and the
   ConfigWriter, including all methods.
 * Change all cross-references to nodes to use UUID's.
 * External interfaces (command line interface, IAllocator interface,
   hook scripts, etc.) are kept stable.
 * RPC-calls can resolve UUID's as target node arguments, if the RPC
   runner is based on a ConfigWriter instance. The result dictionary is
   presented in the form the nodes are addressed: by UUID if UUID's were
   given, or by name if names were given.
 * Node UUID's are resolved in ExpandNames and then stored in the
   OpCode. This allows to check for node renames if the OpCode is
   reloaded after a cluster restart. This check is currently only done
   for single node parameters.
 * Variable names are renamed to follow the following pattern:
   - Suffix is 'node' or 'nodes': Variable holds Node objects
   - Suffix is 'name' or 'names': Variable holds node names
   - Suffix is 'uuid' or 'uuids': Variable holds node UUID's
 * Tests are adapted.

Signed-off-by: Thomas Thrainer <thomasth@google.com>
Reviewed-by: Klaus Aehlig <aehlig@google.com>

59 files changed:
Makefile.am
lib/backend.py
lib/bootstrap.py
lib/client/gnt_cluster.py
lib/client/gnt_instance.py
lib/cmdlib/backup.py
lib/cmdlib/base.py
lib/cmdlib/cluster.py
lib/cmdlib/common.py
lib/cmdlib/group.py
lib/cmdlib/instance.py
lib/cmdlib/instance_migration.py
lib/cmdlib/instance_operation.py
lib/cmdlib/instance_query.py
lib/cmdlib/instance_storage.py
lib/cmdlib/instance_utils.py
lib/cmdlib/misc.py
lib/cmdlib/network.py
lib/cmdlib/node.py
lib/cmdlib/operating_system.py
lib/cmdlib/tags.py
lib/cmdlib/test.py
lib/config.py
lib/hooksmaster.py
lib/hypervisor/hv_base.py
lib/hypervisor/hv_chroot.py
lib/hypervisor/hv_fake.py
lib/hypervisor/hv_kvm.py
lib/hypervisor/hv_lxc.py
lib/hypervisor/hv_xen.py
lib/masterd/iallocator.py
lib/masterd/instance.py
lib/objects.py
lib/opcodes.py
lib/query.py
lib/rpc.py
lib/rpc_defs.py
lib/server/masterd.py
lib/server/noded.py
src/Ganeti/Confd/Server.hs
src/Ganeti/Config.hs
src/Ganeti/HTools/Cluster.hs
src/Ganeti/HTools/Program/Harep.hs
src/Ganeti/OpCodes.hs
src/Ganeti/OpParams.hs
src/Ganeti/Query/Cluster.hs [new file with mode: 0644]
src/Ganeti/Query/Server.hs
test/hs/Test/Ganeti/OpCodes.hs
test/py/ganeti.cmdlib_unittest.py
test/py/ganeti.config_unittest.py
test/py/ganeti.hooks_unittest.py
test/py/ganeti.hypervisor.hv_chroot_unittest.py
test/py/ganeti.hypervisor.hv_fake_unittest.py
test/py/ganeti.hypervisor.hv_kvm_unittest.py
test/py/ganeti.hypervisor.hv_lxc_unittest.py
test/py/ganeti.hypervisor.hv_xen_unittest.py
test/py/ganeti.query_unittest.py
test/py/ganeti.rpc_unittest.py
test/py/mocks.py

index c6c99c6..9269f9b 100644 (file)
@@ -596,6 +596,7 @@ HS_LIB_SRCS = \
        src/Ganeti/OpCodes.hs \
        src/Ganeti/OpParams.hs \
        src/Ganeti/Path.hs \
+       src/Ganeti/Query/Cluster.hs \
        src/Ganeti/Query/Common.hs \
        src/Ganeti/Query/Export.hs \
        src/Ganeti/Query/Filter.hs \
index 44afc32..704031f 100644 (file)
@@ -3777,14 +3777,13 @@ def CleanupImportExport(name):
   shutil.rmtree(status_dir, ignore_errors=True)
 
 
-def _FindDisks(nodes_ip, disks):
+def _FindDisks(target_node_uuid, nodes_ip, disks):
   """Sets the physical ID on disks and returns the block devices.
 
   """
   # set the correct physical ID
-  my_name = netutils.Hostname.GetSysName()
   for cf in disks:
-    cf.SetPhysicalID(my_name, nodes_ip)
+    cf.SetPhysicalID(target_node_uuid, nodes_ip)
 
   bdevs = []
 
@@ -3796,11 +3795,11 @@ def _FindDisks(nodes_ip, disks):
   return bdevs
 
 
-def DrbdDisconnectNet(nodes_ip, disks):
+def DrbdDisconnectNet(target_node_uuid, nodes_ip, disks):
   """Disconnects the network on a list of drbd devices.
 
   """
-  bdevs = _FindDisks(nodes_ip, disks)
+  bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
 
   # disconnect disks
   for rd in bdevs:
@@ -3811,11 +3810,12 @@ def DrbdDisconnectNet(nodes_ip, disks):
             err, exc=True)
 
 
-def DrbdAttachNet(nodes_ip, disks, instance_name, multimaster):
+def DrbdAttachNet(target_node_uuid, nodes_ip, disks, instance_name,
+                  multimaster):
   """Attaches the network on a list of drbd devices.
 
   """
-  bdevs = _FindDisks(nodes_ip, disks)
+  bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
 
   if multimaster:
     for idx, rd in enumerate(bdevs):
@@ -3873,7 +3873,7 @@ def DrbdAttachNet(nodes_ip, disks, instance_name, multimaster):
         _Fail("Can't change to primary mode: %s", err)
 
 
-def DrbdWaitSync(nodes_ip, disks):
+def DrbdWaitSync(target_node_uuid, nodes_ip, disks):
   """Wait until DRBDs have synchronized.
 
   """
@@ -3883,7 +3883,7 @@ def DrbdWaitSync(nodes_ip, disks):
       raise utils.RetryAgain()
     return stats
 
-  bdevs = _FindDisks(nodes_ip, disks)
+  bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
 
   min_resync = 100
   alldone = True
index 1df7645..2052f28 100644 (file)
@@ -609,7 +609,6 @@ def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
     mac_prefix=mac_prefix,
     volume_group_name=vg_name,
     tcpudp_port_pool=set(),
-    master_node=hostname.name,
     master_ip=clustername.ip,
     master_netmask=master_netmask,
     master_netdev=master_netdev,
@@ -688,13 +687,14 @@ def InitConfig(version, cluster_config, master_node_config,
                                                 _INITCONF_ECID)
   master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
                                                     _INITCONF_ECID)
+  cluster_config.master_node = master_node_config.uuid
   nodes = {
-    master_node_config.name: master_node_config,
+    master_node_config.uuid: master_node_config,
     }
   default_nodegroup = objects.NodeGroup(
     uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
     name=constants.INITIAL_NODE_GROUP_NAME,
-    members=[master_node_config.name],
+    members=[master_node_config.uuid],
     diskparams={},
     )
   nodegroups = {
@@ -714,7 +714,7 @@ def InitConfig(version, cluster_config, master_node_config,
                   mode=0600)
 
 
-def FinalizeClusterDestroy(master):
+def FinalizeClusterDestroy(master_uuid):
   """Execute the last steps of cluster destroy
 
   This function shuts down all the daemons, completing the destroy
@@ -725,22 +725,24 @@ def FinalizeClusterDestroy(master):
   modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
   runner = rpc.BootstrapRunner()
 
+  master_name = cfg.GetNodeName(master_uuid)
+
   master_params = cfg.GetMasterNetworkParameters()
-  master_params.name = master
+  master_params.uuid = master_uuid
   ems = cfg.GetUseExternalMipScript()
-  result = runner.call_node_deactivate_master_ip(master_params.name,
-                                                 master_params, ems)
+  result = runner.call_node_deactivate_master_ip(master_name, master_params,
+                                                 ems)
 
   msg = result.fail_msg
   if msg:
     logging.warning("Could not disable the master IP: %s", msg)
 
-  result = runner.call_node_stop_master(master)
+  result = runner.call_node_stop_master(master_name)
   msg = result.fail_msg
   if msg:
     logging.warning("Could not disable the master role: %s", msg)
 
-  result = runner.call_node_leave_cluster(master, modify_ssh_setup)
+  result = runner.call_node_leave_cluster(master_name, modify_ssh_setup)
   msg = result.fail_msg
   if msg:
     logging.warning("Could not shutdown the node daemon and cleanup"
@@ -788,7 +790,7 @@ def MasterFailover(no_voting=False):
   sstore = ssconf.SimpleStore()
 
   old_master, new_master = ssconf.GetMasterAndMyself(sstore)
-  node_list = sstore.GetNodeList()
+  node_names = sstore.GetNodeList()
   mc_list = sstore.GetMasterCandidates()
 
   if old_master == new_master:
@@ -807,7 +809,7 @@ def MasterFailover(no_voting=False):
                                errors.ECODE_STATE)
 
   if not no_voting:
-    vote_list = GatherMasterVotes(node_list)
+    vote_list = GatherMasterVotes(node_names)
 
     if vote_list:
       voted_master = vote_list[0][0]
@@ -832,8 +834,20 @@ def MasterFailover(no_voting=False):
     # configuration data
     cfg = config.ConfigWriter(accept_foreign=True)
 
+    old_master_node = cfg.GetNodeInfoByName(old_master)
+    if old_master_node is None:
+      raise errors.OpPrereqError("Could not find old master node '%s' in"
+                                 " cluster configuration." % old_master,
+                                 errors.ECODE_NOENT)
+
     cluster_info = cfg.GetClusterInfo()
-    cluster_info.master_node = new_master
+    new_master_node = cfg.GetNodeInfoByName(new_master)
+    if new_master_node is None:
+      raise errors.OpPrereqError("Could not find new master node '%s' in"
+                                 " cluster configuration." % new_master,
+                                 errors.ECODE_NOENT)
+
+    cluster_info.master_node = new_master_node.uuid
     # this will also regenerate the ssconf files, since we updated the
     # cluster info
     cfg.Update(cluster_info, logging.error)
@@ -851,9 +865,9 @@ def MasterFailover(no_voting=False):
 
   runner = rpc.BootstrapRunner()
   master_params = cfg.GetMasterNetworkParameters()
-  master_params.name = old_master
+  master_params.uuid = old_master_node.uuid
   ems = cfg.GetUseExternalMipScript()
-  result = runner.call_node_deactivate_master_ip(master_params.name,
+  result = runner.call_node_deactivate_master_ip(old_master,
                                                  master_params, ems)
 
   msg = result.fail_msg
@@ -917,7 +931,7 @@ def GetMaster():
   return old_master
 
 
-def GatherMasterVotes(node_list):
+def GatherMasterVotes(node_names):
   """Check the agreement on who is the master.
 
   This function will return a list of (node, number of votes), ordered
@@ -931,8 +945,8 @@ def GatherMasterVotes(node_list):
   since we use the same source for configuration information for both
   backend and boostrap, we'll always vote for ourselves.
 
-  @type node_list: list
-  @param node_list: the list of nodes to query for master info; the current
+  @type node_names: list
+  @param node_names: the list of nodes to query for master info; the current
       node will be removed if it is in the list
   @rtype: list
   @return: list of (node, votes)
@@ -940,30 +954,31 @@ def GatherMasterVotes(node_list):
   """
   myself = netutils.Hostname.GetSysName()
   try:
-    node_list.remove(myself)
+    node_names.remove(myself)
   except ValueError:
     pass
-  if not node_list:
+  if not node_names:
     # no nodes left (eventually after removing myself)
     return []
-  results = rpc.BootstrapRunner().call_master_info(node_list)
+  results = rpc.BootstrapRunner().call_master_info(node_names)
   if not isinstance(results, dict):
     # this should not happen (unless internal error in rpc)
     logging.critical("Can't complete rpc call, aborting master startup")
-    return [(None, len(node_list))]
+    return [(None, len(node_names))]
   votes = {}
-  for node in results:
-    nres = results[node]
+  for node_name in results:
+    nres = results[node_name]
     data = nres.payload
     msg = nres.fail_msg
     fail = False
     if msg:
-      logging.warning("Error contacting node %s: %s", node, msg)
+      logging.warning("Error contacting node %s: %s", node_name, msg)
       fail = True
     # for now we accept both length 3, 4 and 5 (data[3] is primary ip version
     # and data[4] is the master netmask)
     elif not isinstance(data, (tuple, list)) or len(data) < 3:
-      logging.warning("Invalid data received from node %s: %s", node, data)
+      logging.warning("Invalid data received from node %s: %s",
+                      node_name, data)
       fail = True
     if fail:
       if None not in votes:
index 7a8a7bc..f0aa698 100644 (file)
@@ -275,10 +275,10 @@ def DestroyCluster(opts, args):
     return 1
 
   op = opcodes.OpClusterDestroy()
-  master = SubmitOpCode(op, opts=opts)
+  master_uuid = SubmitOpCode(op, opts=opts)
   # if we reached this, the opcode didn't fail; we can proceed to
   # shutdown all the daemons
-  bootstrap.FinalizeClusterDestroy(master)
+  bootstrap.FinalizeClusterDestroy(master_uuid)
   return 0
 
 
index 6578064..e69f55e 100644 (file)
@@ -915,25 +915,29 @@ def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout,
   return constants.EXIT_SUCCESS
 
 
-def _FormatLogicalID(dev_type, logical_id, roman):
+def _FormatDiskDetails(dev_type, dev, roman):
   """Formats the logical_id of a disk.
 
   """
   if dev_type == constants.LD_DRBD8:
-    node_a, node_b, port, minor_a, minor_b, key = logical_id
+    drbd_info = dev["drbd_info"]
     data = [
-      ("nodeA", "%s, minor=%s" % (node_a, compat.TryToRoman(minor_a,
-                                                            convert=roman))),
-      ("nodeB", "%s, minor=%s" % (node_b, compat.TryToRoman(minor_b,
-                                                            convert=roman))),
-      ("port", str(compat.TryToRoman(port, convert=roman))),
-      ("auth key", str(key)),
+      ("nodeA", "%s, minor=%s" %
+                (drbd_info["primary_node"],
+                 compat.TryToRoman(drbd_info["primary_minor"],
+                                   convert=roman))),
+      ("nodeB", "%s, minor=%s" %
+                (drbd_info["secondary_node"],
+                 compat.TryToRoman(drbd_info["secondary_minor"],
+                                   convert=roman))),
+      ("port", str(compat.TryToRoman(drbd_info["port"], convert=roman))),
+      ("auth key", str(drbd_info["secret"])),
       ]
   elif dev_type == constants.LD_LV:
-    vg_name, lv_name = logical_id
+    vg_name, lv_name = dev["logical_id"]
     data = ["%s/%s" % (vg_name, lv_name)]
   else:
-    data = [str(logical_id)]
+    data = [str(dev["logical_id"])]
 
   return data
 
@@ -1032,7 +1036,7 @@ def _FormatBlockDevInfo(idx, top_level, dev, roman):
     data.append(("access mode", dev["mode"]))
   if dev["logical_id"] is not None:
     try:
-      l_id = _FormatLogicalID(dev["dev_type"], dev["logical_id"], roman)
+      l_id = _FormatDiskDetails(dev["dev_type"], dev, roman)
     except ValueError:
       l_id = [str(dev["logical_id"])]
     if len(l_id) == 1:
index f853a84..f54b489 100644 (file)
@@ -35,7 +35,7 @@ from ganeti import utils
 
 from ganeti.cmdlib.base import QueryBase, NoHooksLU, LogicalUnit
 from ganeti.cmdlib.common import GetWantedNodes, ShareAll, CheckNodeOnline, \
-  ExpandNodeName
+  ExpandNodeUuidAndName
 from ganeti.cmdlib.instance_storage import StartInstanceDisks, \
   ShutdownInstanceDisks
 from ganeti.cmdlib.instance_utils import GetClusterDomainSecret, \
@@ -53,7 +53,7 @@ class ExportQuery(QueryBase):
 
     # The following variables interact with _QueryBase._GetNames
     if self.names:
-      self.wanted = GetWantedNodes(lu, self.names)
+      (self.wanted, _) = GetWantedNodes(lu, self.names)
     else:
       self.wanted = locking.ALL_SET
 
@@ -82,15 +82,15 @@ class ExportQuery(QueryBase):
                            if level != locking.LEVEL_CLUSTER) or
                 self.do_locking or self.use_locking)
 
-    nodes = self._GetNames(lu, lu.cfg.GetNodeList(), locking.LEVEL_NODE)
+    node_uuids = self._GetNames(lu, lu.cfg.GetNodeList(), locking.LEVEL_NODE)
 
     result = []
 
-    for (node, nres) in lu.rpc.call_export_list(nodes).items():
+    for (node_uuid, nres) in lu.rpc.call_export_list(node_uuids).items():
       if nres.fail_msg:
-        result.append((node, None))
+        result.append((node_uuid, None))
       else:
-        result.extend((node, expname) for expname in nres.payload)
+        result.extend((node_uuid, expname) for expname in nres.payload)
 
     return result
 
@@ -154,10 +154,12 @@ class LUBackupPrepare(NoHooksLU):
     if self.op.mode == constants.EXPORT_MODE_REMOTE:
       salt = utils.GenerateSecret(8)
 
-      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
+      feedback_fn("Generating X509 certificate on %s" %
+                  self.cfg.GetNodeName(instance.primary_node))
       result = self.rpc.call_x509_cert_create(instance.primary_node,
                                               constants.RIE_CERT_VALIDITY)
-      result.Raise("Can't create X509 key and certificate on %s" % result.node)
+      result.Raise("Can't create X509 key and certificate on %s" %
+                   self.cfg.GetNodeName(result.node))
 
       (name, cert_pem) = result.payload
 
@@ -203,6 +205,9 @@ class LUBackupExport(LogicalUnit):
 
     # Lock all nodes for local exports
     if self.op.mode == constants.EXPORT_MODE_LOCAL:
+      (self.op.target_node_uuid, self.op.target_node) = \
+        ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
+                              self.op.target_node)
       # FIXME: lock only instance primary and destination node
       #
       # Sad but true, for now we have do lock all nodes, as we don't know where
@@ -248,7 +253,7 @@ class LUBackupExport(LogicalUnit):
     nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
 
     if self.op.mode == constants.EXPORT_MODE_LOCAL:
-      nl.append(self.op.target_node)
+      nl.append(self.op.target_node_uuid)
 
     return (nl, nl)
 
@@ -272,12 +277,11 @@ class LUBackupExport(LogicalUnit):
                                  " down before", errors.ECODE_STATE)
 
     if self.op.mode == constants.EXPORT_MODE_LOCAL:
-      self.op.target_node = ExpandNodeName(self.cfg, self.op.target_node)
-      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
+      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
       assert self.dst_node is not None
 
-      CheckNodeOnline(self, self.dst_node.name)
-      CheckNodeNotDrained(self, self.dst_node.name)
+      CheckNodeOnline(self, self.dst_node.uuid)
+      CheckNodeNotDrained(self, self.dst_node.uuid)
 
       self._cds = None
       self.dest_disk_info = None
@@ -355,24 +359,25 @@ class LUBackupExport(LogicalUnit):
     """
     assert self.op.mode != constants.EXPORT_MODE_REMOTE
 
-    nodelist = self.cfg.GetNodeList()
-    nodelist.remove(self.dst_node.name)
+    node_uuids = self.cfg.GetNodeList()
+    node_uuids.remove(self.dst_node.uuid)
 
     # on one-node clusters nodelist will be empty after the removal
     # if we proceed the backup would be removed because OpBackupQuery
     # substitutes an empty list with the full cluster node list.
     iname = self.instance.name
-    if nodelist:
+    if node_uuids:
       feedback_fn("Removing old exports for instance %s" % iname)
-      exportlist = self.rpc.call_export_list(nodelist)
-      for node in exportlist:
-        if exportlist[node].fail_msg:
+      exportlist = self.rpc.call_export_list(node_uuids)
+      for node_uuid in exportlist:
+        if exportlist[node_uuid].fail_msg:
           continue
-        if iname in exportlist[node].payload:
-          msg = self.rpc.call_export_remove(node, iname).fail_msg
+        if iname in exportlist[node_uuid].payload:
+          msg = self.rpc.call_export_remove(node_uuid, iname).fail_msg
           if msg:
             self.LogWarning("Could not remove older export for instance %s"
-                            " on node %s: %s", iname, node, msg)
+                            " on node %s: %s", iname,
+                            self.cfg.GetNodeName(node_uuid), msg)
 
   def Exec(self, feedback_fn):
     """Export an instance to an image in the cluster.
@@ -381,22 +386,23 @@ class LUBackupExport(LogicalUnit):
     assert self.op.mode in constants.EXPORT_MODES
 
     instance = self.instance
-    src_node = instance.primary_node
+    src_node_uuid = instance.primary_node
 
     if self.op.shutdown:
       # shutdown the instance, but not the disks
       feedback_fn("Shutting down instance %s" % instance.name)
-      result = self.rpc.call_instance_shutdown(src_node, instance,
+      result = self.rpc.call_instance_shutdown(src_node_uuid, instance,
                                                self.op.shutdown_timeout,
                                                self.op.reason)
       # TODO: Maybe ignore failures if ignore_remove_failures is set
       result.Raise("Could not shutdown instance %s on"
-                   " node %s" % (instance.name, src_node))
+                   " node %s" % (instance.name,
+                                 self.cfg.GetNodeName(src_node_uuid)))
 
     # set the disks ID correctly since call_instance_start needs the
     # correct drbd minor to create the symlinks
     for disk in instance.disks:
-      self.cfg.SetDiskID(disk, src_node)
+      self.cfg.SetDiskID(disk, src_node_uuid)
 
     activate_disks = not instance.disks_active
 
@@ -416,7 +422,7 @@ class LUBackupExport(LogicalUnit):
             not self.op.remove_instance):
           assert not activate_disks
           feedback_fn("Starting instance %s" % instance.name)
-          result = self.rpc.call_instance_start(src_node,
+          result = self.rpc.call_instance_start(src_node_uuid,
                                                 (instance, None, None), False,
                                                  self.op.reason)
           msg = result.fail_msg
@@ -515,18 +521,20 @@ class LUBackupRemove(NoHooksLU):
     locked_nodes = self.owned_locks(locking.LEVEL_NODE)
     exportlist = self.rpc.call_export_list(locked_nodes)
     found = False
-    for node in exportlist:
-      msg = exportlist[node].fail_msg
+    for node_uuid in exportlist:
+      msg = exportlist[node_uuid].fail_msg
       if msg:
-        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
+        self.LogWarning("Failed to query node %s (continuing): %s",
+                        self.cfg.GetNodeName(node_uuid), msg)
         continue
-      if instance_name in exportlist[node].payload:
+      if instance_name in exportlist[node_uuid].payload:
         found = True
-        result = self.rpc.call_export_remove(node, instance_name)
+        result = self.rpc.call_export_remove(node_uuid, instance_name)
         msg = result.fail_msg
         if msg:
           logging.error("Could not remove export for instance %s"
-                        " on node %s: %s", instance_name, node, msg)
+                        " on node %s: %s", instance_name,
+                        self.cfg.GetNodeName(node_uuid), msg)
 
     if fqdn_warn and not found:
       feedback_fn("Export not found. If trying to remove an export belonging"
index 18ab241..dcb0826 100644 (file)
@@ -181,7 +181,7 @@ class LogicalUnit(object):
       }
       # Acquire just two nodes
       self.needed_locks = {
-        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
+        locking.LEVEL_NODE: ['node1-uuid', 'node2-uuid'],
       }
       # Acquire no locks
       self.needed_locks = {} # No, you can't leave it to the default value None
@@ -269,11 +269,14 @@ class LogicalUnit(object):
   def BuildHooksNodes(self):
     """Build list of nodes to run LU's hooks.
 
-    @rtype: tuple; (list, list)
-    @return: Tuple containing a list of node names on which the hook
-      should run before the execution and a list of node names on which the
-      hook should run after the execution. No nodes should be returned as an
-      empty list (and not None).
+    @rtype: tuple; (list, list) or (list, list, list)
+    @return: Tuple containing a list of node UUIDs on which the hook
+      should run before the execution and a list of node UUIDs on which the
+      hook should run after the execution. As it might be possible that the
+      node UUID is not known at the time this method is invoked, an optional
+      third list can be added which contains node names on which the hook
+      should run after the execution (in case of node add, for instance).
+      No nodes should be returned as an empty list (and not None).
     @note: If the C{HPATH} attribute of the LU class is C{None}, this function
       will not be called.
 
@@ -356,17 +359,17 @@ class LogicalUnit(object):
     # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
     # future we might want to have different behaviors depending on the value
     # of self.recalculate_locks[locking.LEVEL_NODE]
-    wanted_nodes = []
+    wanted_node_uuids = []
     locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
     for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
-      wanted_nodes.append(instance.primary_node)
+      wanted_node_uuids.append(instance.primary_node)
       if not primary_only:
-        wanted_nodes.extend(instance.secondary_nodes)
+        wanted_node_uuids.extend(instance.secondary_nodes)
 
     if self.recalculate_locks[level] == constants.LOCKS_REPLACE:
-      self.needed_locks[level] = wanted_nodes
+      self.needed_locks[level] = wanted_node_uuids
     elif self.recalculate_locks[level] == constants.LOCKS_APPEND:
-      self.needed_locks[level].extend(wanted_nodes)
+      self.needed_locks[level].extend(wanted_node_uuids)
     else:
       raise errors.ProgrammerError("Unknown recalculation mode")
 
index 9624142..51cc1cd 100644 (file)
@@ -71,7 +71,7 @@ class LUClusterActivateMasterIp(NoHooksLU):
     """
     master_params = self.cfg.GetMasterNetworkParameters()
     ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_activate_master_ip(master_params.name,
+    result = self.rpc.call_node_activate_master_ip(master_params.uuid,
                                                    master_params, ems)
     result.Raise("Could not activate the master IP")
 
@@ -86,7 +86,7 @@ class LUClusterDeactivateMasterIp(NoHooksLU):
     """
     master_params = self.cfg.GetMasterNetworkParameters()
     ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_deactivate_master_ip(master_params.name,
+    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
                                                      master_params, ems)
     result.Raise("Could not deactivate the master IP")
 
@@ -163,13 +163,13 @@ class LUClusterDestroy(LogicalUnit):
     master_params = self.cfg.GetMasterNetworkParameters()
 
     # Run post hooks on master node before it's removed
-    RunPostHook(self, master_params.name)
+    RunPostHook(self, self.cfg.GetNodeName(master_params.uuid))
 
     ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_deactivate_master_ip(master_params.name,
+    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
                                                      master_params, ems)
     result.Warn("Error disabling the master IP address", self.LogWarning)
-    return master_params.name
+    return master_params.uuid
 
 
 class LUClusterPostInit(LogicalUnit):
@@ -232,8 +232,10 @@ class ClusterQuery(QueryBase):
 
     if query.CQ_CONFIG in self.requested_data:
       cluster = lu.cfg.GetClusterInfo()
+      nodes = lu.cfg.GetAllNodesInfo()
     else:
       cluster = NotImplemented
+      nodes = NotImplemented
 
     if query.CQ_QUEUE_DRAINED in self.requested_data:
       drain_flag = os.path.exists(pathutils.JOB_QUEUE_DRAIN_FILE)
@@ -241,17 +243,17 @@ class ClusterQuery(QueryBase):
       drain_flag = NotImplemented
 
     if query.CQ_WATCHER_PAUSE in self.requested_data:
-      master_name = lu.cfg.GetMasterNode()
+      master_node_uuid = lu.cfg.GetMasterNode()
 
-      result = lu.rpc.call_get_watcher_pause(master_name)
+      result = lu.rpc.call_get_watcher_pause(master_node_uuid)
       result.Raise("Can't retrieve watcher pause from master node '%s'" %
-                   master_name)
+                   lu.cfg.GetMasterNodeName())
 
       watcher_pause = result.payload
     else:
       watcher_pause = NotImplemented
 
-    return query.ClusterQueryData(cluster, drain_flag, watcher_pause)
+    return query.ClusterQueryData(cluster, nodes, drain_flag, watcher_pause)
 
 
 class LUClusterQuery(NoHooksLU):
@@ -290,7 +292,7 @@ class LUClusterQuery(NoHooksLU):
       "export_version": constants.EXPORT_VERSION,
       "architecture": runtime.GetArchInfo(),
       "name": cluster.cluster_name,
-      "master": cluster.master_node,
+      "master": self.cfg.GetMasterNodeName(),
       "default_hypervisor": cluster.primary_hypervisor,
       "enabled_hypervisors": cluster.enabled_hypervisors,
       "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
@@ -405,7 +407,7 @@ class LUClusterRename(LogicalUnit):
     # shutdown the master IP
     master_params = self.cfg.GetMasterNetworkParameters()
     ems = self.cfg.GetUseExternalMipScript()
-    result = self.rpc.call_node_deactivate_master_ip(master_params.name,
+    result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
                                                      master_params, ems)
     result.Raise("Could not disable the master role")
 
@@ -419,13 +421,13 @@ class LUClusterRename(LogicalUnit):
       ssh.WriteKnownHostsFile(self.cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
       node_list = self.cfg.GetOnlineNodeList()
       try:
-        node_list.remove(master_params.name)
+        node_list.remove(master_params.uuid)
       except ValueError:
         pass
       UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
     finally:
       master_params.ip = new_ip
-      result = self.rpc.call_node_activate_master_ip(master_params.name,
+      result = self.rpc.call_node_activate_master_ip(master_params.uuid,
                                                      master_params, ems)
       result.Warn("Could not re-enable the master role on the master,"
                   " please restart manually", self.LogWarning)
@@ -523,24 +525,26 @@ class LUClusterRepairDiskSizes(NoHooksLU):
       "Not owning correct locks"
     assert not self.owned_locks(locking.LEVEL_NODE)
 
-    es_flags = rpc.GetExclusiveStorageForNodeNames(self.cfg,
-                                                   per_node_disks.keys())
+    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
+                                               per_node_disks.keys())
 
     changed = []
-    for node, dskl in per_node_disks.items():
+    for node_uuid, dskl in per_node_disks.items():
       newl = [v[2].Copy() for v in dskl]
       for dsk in newl:
-        self.cfg.SetDiskID(dsk, node)
-      result = self.rpc.call_blockdev_getdimensions(node, newl)
+        self.cfg.SetDiskID(dsk, node_uuid)
+      node_name = self.cfg.GetNodeName(node_uuid)
+      result = self.rpc.call_blockdev_getdimensions(node_uuid, newl)
       if result.fail_msg:
         self.LogWarning("Failure in blockdev_getdimensions call to node"
-                        " %s, ignoring", node)
+                        " %s, ignoring", node_name)
         continue
       if len(result.payload) != len(dskl):
         logging.warning("Invalid result from node %s: len(dksl)=%d,"
-                        " result.payload=%s", node, len(dskl), result.payload)
+                        " result.payload=%s", node_name, len(dskl),
+                        result.payload)
         self.LogWarning("Invalid result from node %s, ignoring node results",
-                        node)
+                        node_name)
         continue
       for ((instance, idx, disk), dimensions) in zip(dskl, result.payload):
         if dimensions is None:
@@ -565,7 +569,7 @@ class LUClusterRepairDiskSizes(NoHooksLU):
           disk.size = size
           self.cfg.Update(instance, feedback_fn)
           changed.append((instance.name, idx, "size", size))
-        if es_flags[node]:
+        if es_flags[node_uuid]:
           if spindles is None:
             self.LogWarning("Disk %d of instance %s did not return valid"
                             " spindles information, ignoring", idx,
@@ -666,7 +670,7 @@ class LUClusterSetParams(LogicalUnit):
     mn = self.cfg.GetMasterNode()
     return ([mn], [mn])
 
-  def _CheckVgName(self, node_list, enabled_disk_templates,
+  def _CheckVgName(self, node_uuids, enabled_disk_templates,
                    new_enabled_disk_templates):
     """Check the consistency of the vg name on all nodes and in case it gets
        unset whether there are instances still using it.
@@ -682,26 +686,28 @@ class LUClusterSetParams(LogicalUnit):
            (self.cfg.GetVGName() is not None and
             utils.LvmGetsEnabled(enabled_disk_templates,
                                  new_enabled_disk_templates)):
-      self._CheckVgNameOnNodes(node_list)
+      self._CheckVgNameOnNodes(node_uuids)
 
-  def _CheckVgNameOnNodes(self, node_list):
+  def _CheckVgNameOnNodes(self, node_uuids):
     """Check the status of the volume group on each node.
 
     """
-    vglist = self.rpc.call_vg_list(node_list)
-    for node in node_list:
-      msg = vglist[node].fail_msg
+    vglist = self.rpc.call_vg_list(node_uuids)
+    for node_uuid in node_uuids:
+      msg = vglist[node_uuid].fail_msg
       if msg:
         # ignoring down node
         self.LogWarning("Error while gathering data on node %s"
-                        " (ignoring node): %s", node, msg)
+                        " (ignoring node): %s",
+                        self.cfg.GetNodeName(node_uuid), msg)
         continue
-      vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
+      vgstatus = utils.CheckVolumeGroupSize(vglist[node_uuid].payload,
                                             self.op.vg_name,
                                             constants.MIN_VG_SIZE)
       if vgstatus:
         raise errors.OpPrereqError("Error on node '%s': %s" %
-                                   (node, vgstatus), errors.ECODE_ENVIRON)
+                                   (self.cfg.GetNodeName(node_uuid), vgstatus),
+                                   errors.ECODE_ENVIRON)
 
   def _GetEnabledDiskTemplates(self, cluster):
     """Determines the enabled disk templates and the subset of disk templates
@@ -732,35 +738,37 @@ class LUClusterSetParams(LogicalUnit):
                                    " drbd-based instances exist",
                                    errors.ECODE_INVAL)
 
-    node_list = self.owned_locks(locking.LEVEL_NODE)
+    node_uuids = self.owned_locks(locking.LEVEL_NODE)
     self.cluster = cluster = self.cfg.GetClusterInfo()
 
-    vm_capable_nodes = [node.name
-                        for node in self.cfg.GetAllNodesInfo().values()
-                        if node.name in node_list and node.vm_capable]
+    vm_capable_node_uuids = [node.uuid
+                             for node in self.cfg.GetAllNodesInfo().values()
+                             if node.uuid in node_uuids and node.vm_capable]
 
     (enabled_disk_templates, new_enabled_disk_templates) = \
       self._GetEnabledDiskTemplates(cluster)
 
-    self._CheckVgName(vm_capable_nodes, enabled_disk_templates,
+    self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates,
                       new_enabled_disk_templates)
 
     if self.op.drbd_helper:
       # checks given drbd helper on all nodes
-      helpers = self.rpc.call_drbd_helper(node_list)
-      for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
+      helpers = self.rpc.call_drbd_helper(node_uuids)
+      for (_, ninfo) in self.cfg.GetMultiNodeInfo(node_uuids):
         if ninfo.offline:
-          self.LogInfo("Not checking drbd helper on offline node %s", node)
+          self.LogInfo("Not checking drbd helper on offline node %s",
+                       ninfo.name)
           continue
-        msg = helpers[node].fail_msg
+        msg = helpers[ninfo.uuid].fail_msg
         if msg:
           raise errors.OpPrereqError("Error checking drbd helper on node"
-                                     " '%s': %s" % (node, msg),
+                                     " '%s': %s" % (ninfo.name, msg),
                                      errors.ECODE_ENVIRON)
-        node_helper = helpers[node].payload
+        node_helper = helpers[ninfo.uuid].payload
         if node_helper != self.op.drbd_helper:
           raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
-                                     (node, node_helper), errors.ECODE_ENVIRON)
+                                     (ninfo.name, node_helper),
+                                     errors.ECODE_ENVIRON)
 
     # validate params changes
     if self.op.beparams:
@@ -800,8 +808,8 @@ class LUClusterSetParams(LogicalUnit):
       violations = set()
       for group in self.cfg.GetAllNodeGroupsInfo().values():
         instances = frozenset([inst for inst in all_instances
-                               if compat.any(node in group.members
-                                             for node in inst.all_nodes)])
+                               if compat.any(nuuid in group.members
+                                             for nuuid in inst.all_nodes)])
         new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
         ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
         new = ComputeNewInstanceViolations(ipol,
@@ -920,7 +928,7 @@ class LUClusterSetParams(LogicalUnit):
           hv_class = hypervisor.GetHypervisorClass(hv_name)
           utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
           hv_class.CheckParameterSyntax(hv_params)
-          CheckHVParams(self, node_list, hv_name, hv_params)
+          CheckHVParams(self, node_uuids, hv_name, hv_params)
 
     self._CheckDiskTemplateConsistency()
 
@@ -935,7 +943,7 @@ class LUClusterSetParams(LogicalUnit):
           new_osp = objects.FillDict(cluster_defaults, hv_params)
           hv_class = hypervisor.GetHypervisorClass(hv_name)
           hv_class.CheckParameterSyntax(new_osp)
-          CheckHVParams(self, node_list, hv_name, new_osp)
+          CheckHVParams(self, node_uuids, hv_name, new_osp)
 
     if self.op.default_iallocator:
       alloc_script = utils.FindFile(self.op.default_iallocator,
@@ -1095,7 +1103,7 @@ class LUClusterSetParams(LogicalUnit):
       ems = self.cfg.GetUseExternalMipScript()
       feedback_fn("Shutting down master ip on the current netdev (%s)" %
                   self.cluster.master_netdev)
-      result = self.rpc.call_node_deactivate_master_ip(master_params.name,
+      result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
                                                        master_params, ems)
       result.Raise("Could not disable the master ip")
       feedback_fn("Changing master_netdev from %s to %s" %
@@ -1105,11 +1113,10 @@ class LUClusterSetParams(LogicalUnit):
     if self.op.master_netmask:
       master_params = self.cfg.GetMasterNetworkParameters()
       feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
-      result = self.rpc.call_node_change_master_netmask(master_params.name,
-                                                        master_params.netmask,
-                                                        self.op.master_netmask,
-                                                        master_params.ip,
-                                                        master_params.netdev)
+      result = self.rpc.call_node_change_master_netmask(
+                 master_params.uuid, master_params.netmask,
+                 self.op.master_netmask, master_params.ip,
+                 master_params.netdev)
       result.Warn("Could not change the master IP netmask", feedback_fn)
       self.cluster.master_netmask = self.op.master_netmask
 
@@ -1120,7 +1127,7 @@ class LUClusterSetParams(LogicalUnit):
       feedback_fn("Starting the master ip on the new master netdev (%s)" %
                   self.op.master_netdev)
       ems = self.cfg.GetUseExternalMipScript()
-      result = self.rpc.call_node_activate_master_ip(master_params.name,
+      result = self.rpc.call_node_activate_master_ip(master_params.uuid,
                                                      master_params, ems)
       result.Warn("Could not re-enable the master ip on the master,"
                   " please restart manually", self.LogWarning)
@@ -1352,14 +1359,14 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
     # occur, it would never be caught by VerifyGroup, which only acts on
     # nodes/instances reachable from existing node groups.
 
-    dangling_nodes = set(node.name for node in self.all_node_info.values()
+    dangling_nodes = set(node for node in self.all_node_info.values()
                          if node.group not in self.all_group_info)
 
     dangling_instances = {}
     no_node_instances = []
 
     for inst in self.all_inst_info.values():
-      if inst.primary_node in dangling_nodes:
+      if inst.primary_node in [node.uuid for node in dangling_nodes]:
         dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
       elif inst.primary_node not in self.all_node_info:
         no_node_instances.append(inst.name)
@@ -1367,7 +1374,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
     pretty_dangling = [
         "%s (%s)" %
         (node.name,
-         utils.CommaJoin(dangling_instances.get(node.name,
+         utils.CommaJoin(dangling_instances.get(node.uuid,
                                                 ["no instances"])))
         for node in dangling_nodes]
 
@@ -1397,8 +1404,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
   class NodeImage(object):
     """A class representing the logical and physical status of a node.
 
-    @type name: string
-    @ivar name: the node name to which this object refers
+    @type uuid: string
+    @ivar uuid: the node UUID to which this object refers
     @ivar volumes: a structure as returned from
         L{ganeti.backend.GetVolumeList} (runtime)
     @ivar instances: a list of running instances (runtime)
@@ -1430,8 +1437,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @ivar pv_max: size in MiB of the biggest PVs
 
     """
-    def __init__(self, offline=False, name=None, vm_capable=True):
-      self.name = name
+    def __init__(self, offline=False, uuid=None, vm_capable=True):
+      self.uuid = uuid
       self.volumes = {}
       self.instances = []
       self.pinst = []
@@ -1494,20 +1501,21 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
     self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
 
-    group_nodes = set(self.group_info.members)
+    group_node_uuids = set(self.group_info.members)
     group_instances = \
       self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
 
-    unlocked_nodes = \
-        group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
+    unlocked_node_uuids = \
+        group_node_uuids.difference(self.owned_locks(locking.LEVEL_NODE))
 
     unlocked_instances = \
         group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
 
-    if unlocked_nodes:
-      raise errors.OpPrereqError("Missing lock for nodes: %s" %
-                                 utils.CommaJoin(unlocked_nodes),
-                                 errors.ECODE_STATE)
+    if unlocked_node_uuids:
+      raise errors.OpPrereqError(
+        "Missing lock for nodes: %s" %
+        utils.CommaJoin(self.cfg.GetNodeNames(unlocked_node_uuids)),
+        errors.ECODE_STATE)
 
     if unlocked_instances:
       raise errors.OpPrereqError("Missing lock for instances: %s" %
@@ -1517,12 +1525,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     self.all_node_info = self.cfg.GetAllNodesInfo()
     self.all_inst_info = self.cfg.GetAllInstancesInfo()
 
-    self.my_node_names = utils.NiceSort(group_nodes)
-    self.my_inst_names = utils.NiceSort(group_instances)
-
-    self.my_node_info = dict((name, self.all_node_info[name])
-                             for name in self.my_node_names)
+    self.my_node_uuids = group_node_uuids
+    self.my_node_info = dict((node_uuid, self.all_node_info[node_uuid])
+                             for node_uuid in group_node_uuids)
 
+    self.my_inst_names = utils.NiceSort(group_instances)
     self.my_inst_info = dict((name, self.all_inst_info[name])
                              for name in self.my_inst_names)
 
@@ -1532,9 +1539,9 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     for inst in self.my_inst_info.values():
       if inst.disk_template in constants.DTS_INT_MIRROR:
-        for nname in inst.all_nodes:
-          if self.all_node_info[nname].group != self.group_uuid:
-            extra_lv_nodes.add(nname)
+        for nuuid in inst.all_nodes:
+          if self.all_node_info[nuuid].group != self.group_uuid:
+            extra_lv_nodes.add(nuuid)
 
     unlocked_lv_nodes = \
         extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
@@ -1560,12 +1567,12 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
          reasonable values in the respose)
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     # main result, nresult should be a non-empty dict
     test = not nresult or not isinstance(nresult, dict)
-    _ErrorIf(test, constants.CV_ENODERPC, node,
+    _ErrorIf(test, constants.CV_ENODERPC, node_name,
                   "unable to verify node: no data returned")
     if test:
       return False
@@ -1576,13 +1583,13 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     test = not (remote_version and
                 isinstance(remote_version, (list, tuple)) and
                 len(remote_version) == 2)
-    _ErrorIf(test, constants.CV_ENODERPC, node,
+    _ErrorIf(test, constants.CV_ENODERPC, node_name,
              "connection to node returned invalid data")
     if test:
       return False
 
     test = local_version != remote_version[0]
-    _ErrorIf(test, constants.CV_ENODEVERSION, node,
+    _ErrorIf(test, constants.CV_ENODEVERSION, node_name,
              "incompatible protocol versions: master %s,"
              " node %s", local_version, remote_version[0])
     if test:
@@ -1592,7 +1599,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     # full package version
     self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
-                  constants.CV_ENODEVERSION, node,
+                  constants.CV_ENODEVERSION, node_name,
                   "software version mismatch: master %s, node %s",
                   constants.RELEASE_VERSION, remote_version[1],
                   code=self.ETYPE_WARNING)
@@ -1601,19 +1608,19 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     if ninfo.vm_capable and isinstance(hyp_result, dict):
       for hv_name, hv_result in hyp_result.iteritems():
         test = hv_result is not None
-        _ErrorIf(test, constants.CV_ENODEHV, node,
+        _ErrorIf(test, constants.CV_ENODEHV, node_name,
                  "hypervisor %s verify failure: '%s'", hv_name, hv_result)
 
     hvp_result = nresult.get(constants.NV_HVPARAMS, None)
     if ninfo.vm_capable and isinstance(hvp_result, list):
       for item, hv_name, hv_result in hvp_result:
-        _ErrorIf(True, constants.CV_ENODEHV, node,
+        _ErrorIf(True, constants.CV_ENODEHV, node_name,
                  "hypervisor %s parameter verify failure (source %s): %s",
                  hv_name, item, hv_result)
 
     test = nresult.get(constants.NV_NODESETUP,
                        ["Missing NODESETUP results"])
-    _ErrorIf(test, constants.CV_ENODESETUP, node, "node setup error: %s",
+    _ErrorIf(test, constants.CV_ENODESETUP, node_name, "node setup error: %s",
              "; ".join(test))
 
     return True
@@ -1629,14 +1636,15 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param nvinfo_endtime: the end time of the RPC call
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     ntime = nresult.get(constants.NV_TIME, None)
     try:
       ntime_merged = utils.MergeTime(ntime)
     except (ValueError, TypeError):
-      _ErrorIf(True, constants.CV_ENODETIME, node, "Node returned invalid time")
+      _ErrorIf(True, constants.CV_ENODETIME, node_name,
+               "Node returned invalid time")
       return
 
     if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
@@ -1646,7 +1654,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     else:
       ntime_diff = None
 
-    _ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node,
+    _ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node_name,
              "Node time diverges by at least %s from master node time",
              ntime_diff)
 
@@ -1664,22 +1672,23 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     if vg_name is None:
       return
 
-    node = ninfo.name
+    node_name = ninfo.name
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     # checks vg existence and size > 20G
     vglist = nresult.get(constants.NV_VGLIST, None)
     test = not vglist
-    _ErrorIf(test, constants.CV_ENODELVM, node, "unable to check volume groups")
+    _ErrorIf(test, constants.CV_ENODELVM, node_name,
+             "unable to check volume groups")
     if not test:
       vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
                                             constants.MIN_VG_SIZE)
-      _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
+      _ErrorIf(vgstatus, constants.CV_ENODELVM, node_name, vgstatus)
 
     # Check PVs
     (errmsgs, pvminmax) = CheckNodePVs(nresult, self._exclusive_storage)
     for em in errmsgs:
-      self._Error(constants.CV_ENODELVM, node, em)
+      self._Error(constants.CV_ENODELVM, node_name, em)
     if pvminmax is not None:
       (nimg.pv_min, nimg.pv_max) = pvminmax
 
@@ -1692,15 +1701,15 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     node_versions = {}
-    for node, ndata in node_verify_infos.items():
+    for node_uuid, ndata in node_verify_infos.items():
       nresult = ndata.payload
       version = nresult.get(constants.NV_DRBDVERSION, "Missing DRBD version")
-      node_versions[node] = version
+      node_versions[node_uuid] = version
 
     if len(set(node_versions.values())) > 1:
-      for node, version in sorted(node_versions.items()):
+      for node_uuid, version in sorted(node_versions.items()):
         msg = "DRBD version mismatch: %s" % version
-        self._Error(constants.CV_ENODEDRBDHELPER, node, msg,
+        self._Error(constants.CV_ENODEDRBDHELPER, node_uuid, msg,
                     code=self.ETYPE_WARNING)
 
   def _VerifyGroupLVM(self, node_image, vg_name):
@@ -1745,15 +1754,15 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     if not bridges:
       return
 
-    node = ninfo.name
+    node_name = ninfo.name
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     missing = nresult.get(constants.NV_BRIDGES, None)
     test = not isinstance(missing, list)
-    _ErrorIf(test, constants.CV_ENODENET, node,
+    _ErrorIf(test, constants.CV_ENODENET, node_name,
              "did not return valid bridge information")
     if not test:
-      _ErrorIf(bool(missing), constants.CV_ENODENET, node,
+      _ErrorIf(bool(missing), constants.CV_ENODENET, node_name,
                "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
 
   def _VerifyNodeUserScripts(self, ninfo, nresult):
@@ -1764,15 +1773,15 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param nresult: the remote results for the node
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
 
     test = not constants.NV_USERSCRIPTS in nresult
-    self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, node,
+    self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, node_name,
                   "did not return user scripts information")
 
     broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
     if not test:
-      self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, node,
+      self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, node_name,
                     "user scripts not present or not executable: %s" %
                     utils.CommaJoin(sorted(broken_scripts)))
 
@@ -1784,39 +1793,39 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param nresult: the remote results for the node
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     test = constants.NV_NODELIST not in nresult
-    _ErrorIf(test, constants.CV_ENODESSH, node,
+    _ErrorIf(test, constants.CV_ENODESSH, node_name,
              "node hasn't returned node ssh connectivity data")
     if not test:
       if nresult[constants.NV_NODELIST]:
         for a_node, a_msg in nresult[constants.NV_NODELIST].items():
-          _ErrorIf(True, constants.CV_ENODESSH, node,
+          _ErrorIf(True, constants.CV_ENODESSH, node_name,
                    "ssh communication with node '%s': %s", a_node, a_msg)
 
     test = constants.NV_NODENETTEST not in nresult
-    _ErrorIf(test, constants.CV_ENODENET, node,
+    _ErrorIf(test, constants.CV_ENODENET, node_name,
              "node hasn't returned node tcp connectivity data")
     if not test:
       if nresult[constants.NV_NODENETTEST]:
         nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
         for anode in nlist:
-          _ErrorIf(True, constants.CV_ENODENET, node,
+          _ErrorIf(True, constants.CV_ENODENET, node_name,
                    "tcp communication with node '%s': %s",
                    anode, nresult[constants.NV_NODENETTEST][anode])
 
     test = constants.NV_MASTERIP not in nresult
-    _ErrorIf(test, constants.CV_ENODENET, node,
+    _ErrorIf(test, constants.CV_ENODENET, node_name,
              "node hasn't returned node master IP reachability data")
     if not test:
       if not nresult[constants.NV_MASTERIP]:
-        if node == self.master_node:
+        if ninfo.uuid == self.master_node:
           msg = "the master node cannot reach the master IP (not configured?)"
         else:
           msg = "cannot reach the master IP"
-        _ErrorIf(True, constants.CV_ENODENET, node, msg)
+        _ErrorIf(True, constants.CV_ENODENET, node_name, msg)
 
   def _VerifyInstance(self, instance, inst_config, node_image,
                       diskstatus):
@@ -1850,16 +1859,17 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       for volume in node_vol_should[node]:
         test = volume not in n_img.volumes
         _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
-                 "volume %s missing on node %s", volume, node)
+                 "volume %s missing on node %s", volume,
+                 self.cfg.GetNodeName(node))
 
     if inst_config.admin_state == constants.ADMINST_UP:
       test = instance not in pnode_img.instances and not pnode_img.offline
       _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
                "instance not running on its primary node %s",
-               pnode)
+               self.cfg.GetNodeName(pnode))
       _ErrorIf(pnode_img.offline, constants.CV_EINSTANCEBADNODE, instance,
                "instance is marked as running and lives on offline node %s",
-               pnode)
+               self.cfg.GetNodeName(pnode))
 
     diskdata = [(nname, success, status, idx)
                 for (nname, disks) in diskstatus.items()
@@ -1874,11 +1884,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                not success and not bad_snode,
                constants.CV_EINSTANCEFAULTYDISK, instance,
                "couldn't retrieve status for disk/%s on %s: %s",
-               idx, nname, bdev_status)
+               idx, self.cfg.GetNodeName(nname), bdev_status)
       _ErrorIf((inst_config.disks_active and
                 success and bdev_status.ldisk_status == constants.LDS_FAULTY),
                constants.CV_EINSTANCEFAULTYDISK, instance,
-               "disk/%s on %s is faulty", idx, nname)
+               "disk/%s on %s is faulty", idx, self.cfg.GetNodeName(nname))
 
     _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
              constants.CV_ENODERPC, pnode, "instance %s, connection to"
@@ -1890,8 +1900,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
              utils.CommaJoin(inst_config.secondary_nodes),
              code=self.ETYPE_WARNING)
 
-    es_flags = rpc.GetExclusiveStorageForNodeNames(self.cfg,
-                                                   inst_config.all_nodes)
+    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
+                                               inst_config.all_nodes)
     if any(es_flags.values()):
       if inst_config.disk_template not in constants.DTS_EXCL_STORAGE:
         # Disk template not compatible with exclusive_storage: no instance
@@ -1902,7 +1912,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         self._Error(constants.CV_EINSTANCEUNSUITABLENODE, instance,
                     "instance has template %s, which is not supported on nodes"
                     " that have exclusive storage set: %s",
-                    inst_config.disk_template, utils.CommaJoin(es_nodes))
+                    inst_config.disk_template,
+                    utils.CommaJoin(self.cfg.GetNodeNames(es_nodes)))
       for (idx, disk) in enumerate(inst_config.disks):
         _ErrorIf(disk.spindles is None,
                  constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance,
@@ -1920,7 +1931,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                                    []).append(node)
 
       pretty_list = [
-        "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
+        "%s (group %s)" % (utils.CommaJoin(self.cfg.GetNodeNames(nodes)),
+                           groupinfo[group].name)
         # Sort so that we always list the primary node first.
         for group, nodes in sorted(instance_groups.items(),
                                    key=lambda (_, nodes): pnode in nodes,
@@ -1945,13 +1957,15 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     # warn that the instance lives on offline nodes
     _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
              "instance has offline secondary node(s) %s",
-             utils.CommaJoin(inst_nodes_offline))
+             utils.CommaJoin(self.cfg.GetNodeNames(inst_nodes_offline)))
     # ... or ghost/non-vm_capable nodes
     for node in inst_config.all_nodes:
       _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
-               instance, "instance lives on ghost node %s", node)
+               instance, "instance lives on ghost node %s",
+               self.cfg.GetNodeName(node))
       _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
-               instance, "instance lives on non-vm_capable node %s", node)
+               instance, "instance lives on non-vm_capable node %s",
+               self.cfg.GetNodeName(node))
 
   def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
     """Verify if there are any unknown volumes in the cluster.
@@ -1963,16 +1977,17 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param reserved: a FieldSet of reserved volume names
 
     """
-    for node, n_img in node_image.items():
+    for node_uuid, n_img in node_image.items():
       if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
-          self.all_node_info[node].group != self.group_uuid):
+          self.all_node_info[node_uuid].group != self.group_uuid):
         # skip non-healthy nodes
         continue
       for volume in n_img.volumes:
-        test = ((node not in node_vol_should or
-                volume not in node_vol_should[node]) and
+        test = ((node_uuid not in node_vol_should or
+                volume not in node_vol_should[node_uuid]) and
                 not reserved.Matches(volume))
-        self._ErrorIf(test, constants.CV_ENODEORPHANLV, node,
+        self._ErrorIf(test, constants.CV_ENODEORPHANLV,
+                      self.cfg.GetNodeName(node_uuid),
                       "volume %s is unknown", volume)
 
   def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
@@ -1983,7 +1998,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     """
     cluster_info = self.cfg.GetClusterInfo()
-    for node, n_img in node_image.items():
+    for node_uuid, n_img in node_image.items():
       # This code checks that every node which is now listed as
       # secondary has enough memory to host all instances it is
       # supposed to should a single other node in the cluster fail.
@@ -1992,7 +2007,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       # WARNING: we currently take into account down instances as well
       # as up ones, considering that even if they're down someone
       # might want to start them even in the event of a node failure.
-      if n_img.offline or self.all_node_info[node].group != self.group_uuid:
+      if n_img.offline or \
+         self.all_node_info[node_uuid].group != self.group_uuid:
         # we're skipping nodes marked offline and nodes in other groups from
         # the N+1 warning, since most likely we don't have good memory
         # infromation from them; we already list instances living on such
@@ -2006,19 +2022,18 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
           if bep[constants.BE_AUTO_BALANCE]:
             needed_mem += bep[constants.BE_MINMEM]
         test = n_img.mfree < needed_mem
-        self._ErrorIf(test, constants.CV_ENODEN1, node,
+        self._ErrorIf(test, constants.CV_ENODEN1,
+                      self.cfg.GetNodeName(node_uuid),
                       "not enough memory to accomodate instance failovers"
                       " should node %s fail (%dMiB needed, %dMiB available)",
-                      prinode, needed_mem, n_img.mfree)
+                      self.cfg.GetNodeName(prinode), needed_mem, n_img.mfree)
 
-  @classmethod
-  def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
+  def _VerifyFiles(self, nodes, master_node_uuid, all_nvinfo,
                    (files_all, files_opt, files_mc, files_vm)):
     """Verifies file checksums collected from all nodes.
 
-    @param errorif: Callback for reporting errors
-    @param nodeinfo: List of L{objects.Node} objects
-    @param master_node: Name of master node
+    @param nodes: List of L{objects.Node} objects
+    @param master_node_uuid: UUID of master node
     @param all_nvinfo: RPC results
 
     """
@@ -2026,7 +2041,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     files2nodefn = [
       (files_all, None),
       (files_mc, lambda node: (node.master_candidate or
-                               node.name == master_node)),
+                               node.uuid == master_node_uuid)),
       (files_vm, lambda node: node.vm_capable),
       ]
 
@@ -2034,11 +2049,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     nodefiles = {}
     for (files, fn) in files2nodefn:
       if fn is None:
-        filenodes = nodeinfo
+        filenodes = nodes
       else:
-        filenodes = filter(fn, nodeinfo)
+        filenodes = filter(fn, nodes)
       nodefiles.update((filename,
-                        frozenset(map(operator.attrgetter("name"), filenodes)))
+                        frozenset(map(operator.attrgetter("uuid"), filenodes)))
                        for filename in files)
 
     assert set(nodefiles) == (files_all | files_mc | files_vm)
@@ -2046,12 +2061,12 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     fileinfo = dict((filename, {}) for filename in nodefiles)
     ignore_nodes = set()
 
-    for node in nodeinfo:
+    for node in nodes:
       if node.offline:
-        ignore_nodes.add(node.name)
+        ignore_nodes.add(node.uuid)
         continue
 
-      nresult = all_nvinfo[node.name]
+      nresult = all_nvinfo[node.uuid]
 
       if nresult.fail_msg or not nresult.payload:
         node_files = None
@@ -2062,24 +2077,24 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         del fingerprints
 
       test = not (node_files and isinstance(node_files, dict))
-      errorif(test, constants.CV_ENODEFILECHECK, node.name,
-              "Node did not return file checksum data")
+      self._ErrorIf(test, constants.CV_ENODEFILECHECK, node.name,
+                    "Node did not return file checksum data")
       if test:
-        ignore_nodes.add(node.name)
+        ignore_nodes.add(node.uuid)
         continue
 
       # Build per-checksum mapping from filename to nodes having it
       for (filename, checksum) in node_files.items():
         assert filename in nodefiles
-        fileinfo[filename].setdefault(checksum, set()).add(node.name)
+        fileinfo[filename].setdefault(checksum, set()).add(node.uuid)
 
     for (filename, checksums) in fileinfo.items():
       assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
 
       # Nodes having the file
-      with_file = frozenset(node_name
-                            for nodes in fileinfo[filename].values()
-                            for node_name in nodes) - ignore_nodes
+      with_file = frozenset(node_uuid
+                            for node_uuids in fileinfo[filename].values()
+                            for node_uuid in node_uuids) - ignore_nodes
 
       expected_nodes = nodefiles[filename] - ignore_nodes
 
@@ -2088,36 +2103,44 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
       if filename in files_opt:
         # All or no nodes
-        errorif(missing_file and missing_file != expected_nodes,
-                constants.CV_ECLUSTERFILECHECK, None,
-                "File %s is optional, but it must exist on all or no"
-                " nodes (not found on %s)",
-                filename, utils.CommaJoin(utils.NiceSort(missing_file)))
+        self._ErrorIf(missing_file and missing_file != expected_nodes,
+                      constants.CV_ECLUSTERFILECHECK, None,
+                      "File %s is optional, but it must exist on all or no"
+                      " nodes (not found on %s)",
+                      filename,
+                      utils.CommaJoin(
+                        utils.NiceSort(
+                          map(self.cfg.GetNodeName, missing_file))))
       else:
-        errorif(missing_file, constants.CV_ECLUSTERFILECHECK, None,
-                "File %s is missing from node(s) %s", filename,
-                utils.CommaJoin(utils.NiceSort(missing_file)))
+        self._ErrorIf(missing_file, constants.CV_ECLUSTERFILECHECK, None,
+                      "File %s is missing from node(s) %s", filename,
+                      utils.CommaJoin(
+                        utils.NiceSort(
+                          map(self.cfg.GetNodeName, missing_file))))
 
         # Warn if a node has a file it shouldn't
         unexpected = with_file - expected_nodes
-        errorif(unexpected,
-                constants.CV_ECLUSTERFILECHECK, None,
-                "File %s should not exist on node(s) %s",
-                filename, utils.CommaJoin(utils.NiceSort(unexpected)))
+        self._ErrorIf(unexpected,
+                      constants.CV_ECLUSTERFILECHECK, None,
+                      "File %s should not exist on node(s) %s",
+                      filename, utils.CommaJoin(
+                        utils.NiceSort(map(self.cfg.GetNodeName, unexpected))))
 
       # See if there are multiple versions of the file
       test = len(checksums) > 1
       if test:
         variants = ["variant %s on %s" %
-                    (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
-                    for (idx, (checksum, nodes)) in
+                    (idx + 1,
+                     utils.CommaJoin(utils.NiceSort(
+                       map(self.cfg.GetNodeName, node_uuids))))
+                    for (idx, (checksum, node_uuids)) in
                       enumerate(sorted(checksums.items()))]
       else:
         variants = []
 
-      errorif(test, constants.CV_ECLUSTERFILECHECK, None,
-              "File %s found with %s different checksums (%s)",
-              filename, len(checksums), "; ".join(variants))
+      self._ErrorIf(test, constants.CV_ECLUSTERFILECHECK, None,
+                    "File %s found with %s different checksums (%s)",
+                    filename, len(checksums), "; ".join(variants))
 
   def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
                       drbd_map):
@@ -2132,26 +2155,26 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         L{ganeti.config.ConfigWriter.ComputeDRBDMap}
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     if drbd_helper:
       helper_result = nresult.get(constants.NV_DRBDHELPER, None)
       test = (helper_result is None)
-      _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
+      _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node_name,
                "no drbd usermode helper returned")
       if helper_result:
         status, payload = helper_result
         test = not status
-        _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
+        _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node_name,
                  "drbd usermode helper check unsuccessful: %s", payload)
         test = status and (payload != drbd_helper)
-        _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
+        _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node_name,
                  "wrong drbd usermode helper: %s", payload)
 
     # compute the DRBD minors
     node_drbd = {}
-    for minor, instance in drbd_map[node].items():
+    for minor, instance in drbd_map[ninfo.uuid].items():
       test = instance not in instanceinfo
       _ErrorIf(test, constants.CV_ECLUSTERCFG, None,
                "ghost instance '%s' in temporary DRBD map", instance)
@@ -2167,7 +2190,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     # and now check them
     used_minors = nresult.get(constants.NV_DRBDLIST, [])
     test = not isinstance(used_minors, (tuple, list))
-    _ErrorIf(test, constants.CV_ENODEDRBD, node,
+    _ErrorIf(test, constants.CV_ENODEDRBD, node_name,
              "cannot parse drbd status file: %s", str(used_minors))
     if test:
       # we cannot check drbd status
@@ -2175,11 +2198,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     for minor, (iname, must_exist) in node_drbd.items():
       test = minor not in used_minors and must_exist
-      _ErrorIf(test, constants.CV_ENODEDRBD, node,
+      _ErrorIf(test, constants.CV_ENODEDRBD, node_name,
                "drbd minor %d of instance %s is not active", minor, iname)
     for minor in used_minors:
       test = minor not in node_drbd
-      _ErrorIf(test, constants.CV_ENODEDRBD, node,
+      _ErrorIf(test, constants.CV_ENODEDRBD, node_name,
                "unallocated drbd minor %d is in use", minor)
 
   def _UpdateNodeOS(self, ninfo, nresult, nimg):
@@ -2191,7 +2214,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param nimg: the node image object
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     remote_os = nresult.get(constants.NV_OSLIST, None)
@@ -2199,7 +2222,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
             not compat.all(isinstance(v, list) and len(v) == 7
                            for v in remote_os))
 
-    _ErrorIf(test, constants.CV_ENODEOS, node,
+    _ErrorIf(test, constants.CV_ENODEOS, node_name,
              "node hasn't returned valid OS data")
 
     nimg.os_fail = test
@@ -2232,7 +2255,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param base: the 'template' node we match against (e.g. from the master)
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
@@ -2241,16 +2264,16 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     for os_name, os_data in nimg.oslist.items():
       assert os_data, "Empty OS status for OS %s?!" % os_name
       f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
-      _ErrorIf(not f_status, constants.CV_ENODEOS, node,
+      _ErrorIf(not f_status, constants.CV_ENODEOS, node_name,
                "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
-      _ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node,
+      _ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node_name,
                "OS '%s' has multiple entries (first one shadows the rest): %s",
                os_name, utils.CommaJoin([v[0] for v in os_data]))
       # comparisons with the 'base' image
       test = os_name not in base.oslist
-      _ErrorIf(test, constants.CV_ENODEOS, node,
+      _ErrorIf(test, constants.CV_ENODEOS, node_name,
                "Extra OS %s not present on reference node (%s)",
-               os_name, base.name)
+               os_name, self.cfg.GetNodeName(base.uuid))
       if test:
         continue
       assert base.oslist[os_name], "Base node has empty OS status?"
@@ -2262,16 +2285,16 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                          ("variants list", f_var, b_var),
                          ("parameters", beautify_params(f_param),
                           beautify_params(b_param))]:
-        _ErrorIf(a != b, constants.CV_ENODEOS, node,
+        _ErrorIf(a != b, constants.CV_ENODEOS, node_name,
                  "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
-                 kind, os_name, base.name,
+                 kind, os_name, self.cfg.GetNodeName(base.uuid),
                  utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
 
     # check any missing OSes
     missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
-    _ErrorIf(missing, constants.CV_ENODEOS, node,
+    _ErrorIf(missing, constants.CV_ENODEOS, node_name,
              "OSes present on reference node %s but missing on this node: %s",
-             base.name, utils.CommaJoin(missing))
+             self.cfg.GetNodeName(base.uuid), utils.CommaJoin(missing))
 
   def _VerifyFileStoragePaths(self, ninfo, nresult, is_master):
     """Verifies paths in L{pathutils.FILE_STORAGE_PATHS_FILE}.
@@ -2283,7 +2306,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param is_master: Whether node is the master node
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
 
     if (is_master and
         (constants.ENABLE_FILE_STORAGE or
@@ -2292,15 +2315,15 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         fspaths = nresult[constants.NV_FILE_STORAGE_PATHS]
       except KeyError:
         # This should never happen
-        self._ErrorIf(True, constants.CV_ENODEFILESTORAGEPATHS, node,
+        self._ErrorIf(True, constants.CV_ENODEFILESTORAGEPATHS, node_name,
                       "Node did not return forbidden file storage paths")
       else:
-        self._ErrorIf(fspaths, constants.CV_ENODEFILESTORAGEPATHS, node,
+        self._ErrorIf(fspaths, constants.CV_ENODEFILESTORAGEPATHS, node_name,
                       "Found forbidden file storage paths: %s",
                       utils.CommaJoin(fspaths))
     else:
       self._ErrorIf(constants.NV_FILE_STORAGE_PATHS in nresult,
-                    constants.CV_ENODEFILESTORAGEPATHS, node,
+                    constants.CV_ENODEFILESTORAGEPATHS, node_name,
                     "Node should not have returned forbidden file storage"
                     " paths")
 
@@ -2312,13 +2335,14 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param nresult: the remote results for the node
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
     # We just have to verify the paths on master and/or master candidates
     # as the oob helper is invoked on the master
     if ((ninfo.master_candidate or ninfo.master_capable) and
         constants.NV_OOB_PATHS in nresult):
       for path_result in nresult[constants.NV_OOB_PATHS]:
-        self._ErrorIf(path_result, constants.CV_ENODEOOBPATH, node, path_result)
+        self._ErrorIf(path_result, constants.CV_ENODEOOBPATH,
+                      node_name, path_result)
 
   def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
     """Verifies and updates the node volume data.
@@ -2333,7 +2357,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param vg_name: the configured VG name
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     nimg.lvm_fail = True
@@ -2341,10 +2365,10 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     if vg_name is None:
       pass
     elif isinstance(lvdata, basestring):
-      _ErrorIf(True, constants.CV_ENODELVM, node, "LVM problem on node: %s",
-               utils.SafeEncode(lvdata))
+      _ErrorIf(True, constants.CV_ENODELVM, node_name,
+               "LVM problem on node: %s", utils.SafeEncode(lvdata))
     elif not isinstance(lvdata, dict):
-      _ErrorIf(True, constants.CV_ENODELVM, node,
+      _ErrorIf(True, constants.CV_ENODELVM, node_name,
                "rpc call to node failed (lvlist)")
     else:
       nimg.volumes = lvdata
@@ -2383,40 +2407,40 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     @param vg_name: the configured VG name
 
     """
-    node = ninfo.name
+    node_name = ninfo.name
     _ErrorIf = self._ErrorIf # pylint: disable=C0103
 
     # try to read free memory (from the hypervisor)
     hv_info = nresult.get(constants.NV_HVINFO, None)
     test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
-    _ErrorIf(test, constants.CV_ENODEHV, node,
+    _ErrorIf(test, constants.CV_ENODEHV, node_name,
              "rpc call to node failed (hvinfo)")
     if not test:
       try:
         nimg.mfree = int(hv_info["memory_free"])
       except (ValueError, TypeError):
-        _ErrorIf(True, constants.CV_ENODERPC, node,
+        _ErrorIf(True, constants.CV_ENODERPC, node_name,
                  "node returned invalid nodeinfo, check hypervisor")
 
     # FIXME: devise a free space model for file based instances as well
     if vg_name is not None:
       test = (constants.NV_VGLIST not in nresult or
               vg_name not in nresult[constants.NV_VGLIST])
-      _ErrorIf(test, constants.CV_ENODELVM, node,
+      _ErrorIf(test, constants.CV_ENODELVM, node_name,
                "node didn't return data for the volume group '%s'"
                " - it is either missing or broken", vg_name)
       if not test:
         try:
           nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
         except (ValueError, TypeError):
-          _ErrorIf(True, constants.CV_ENODERPC, node,
+          _ErrorIf(True, constants.CV_ENODERPC, node_name,
                    "node returned invalid LVM info, check LVM status")
 
-  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
+  def _CollectDiskInfo(self, node_uuids, node_image, instanceinfo):
     """Gets per-disk status information for all instances.
 
-    @type nodelist: list of strings
-    @param nodelist: Node names
+    @type node_uuids: list of strings
+    @param node_uuids: Node UUIDs
     @type node_image: dict of (name, L{objects.Node})
     @param node_image: Node objects
     @type instanceinfo: dict of (name, L{objects.Instance})
@@ -2427,16 +2451,14 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         list of tuples (success, payload)
 
     """
-    _ErrorIf = self._ErrorIf # pylint: disable=C0103
-
     node_disks = {}
     node_disks_devonly = {}
     diskless_instances = set()
     diskless = constants.DT_DISKLESS
 
-    for nname in nodelist:
-      node_instances = list(itertools.chain(node_image[nname].pinst,
-                                            node_image[nname].sinst))
+    for nuuid in node_uuids:
+      node_instances = list(itertools.chain(node_image[nuuid].pinst,
+                                            node_image[nuuid].sinst))
       diskless_instances.update(inst for inst in node_instances
                                 if instanceinfo[inst].disk_template == diskless)
       disks = [(inst, disk)
@@ -2447,16 +2469,16 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
         # No need to collect data
         continue
 
-      node_disks[nname] = disks
+      node_disks[nuuid] = disks
 
       # _AnnotateDiskParams makes already copies of the disks
       devonly = []
       for (inst, dev) in disks:
         (anno_disk,) = AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
-        self.cfg.SetDiskID(anno_disk, nname)
+        self.cfg.SetDiskID(anno_disk, nuuid)
         devonly.append(anno_disk)
 
-      node_disks_devonly[nname] = devonly
+      node_disks_devonly[nuuid] = devonly
 
     assert len(node_disks) == len(node_disks_devonly)
 
@@ -2468,16 +2490,17 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     instdisk = {}
 
-    for (nname, nres) in result.items():
-      disks = node_disks[nname]
+    for (nuuid, nres) in result.items():
+      node = self.cfg.GetNodeInfo(nuuid)
+      disks = node_disks[node.uuid]
 
       if nres.offline:
         # No data from this node
         data = len(disks) * [(False, "node offline")]
       else:
         msg = nres.fail_msg
-        _ErrorIf(msg, constants.CV_ENODERPC, nname,
-                 "while getting disk information: %s", msg)
+        self._ErrorIf(msg, constants.CV_ENODERPC, node.name,
+                      "while getting disk information: %s", msg)
         if msg:
           # No data from this node
           data = len(disks) * [(False, msg)]
@@ -2488,11 +2511,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
               data.append(i)
             else:
               logging.warning("Invalid result from node %s, entry %d: %s",
-                              nname, idx, i)
+                              node.name, idx, i)
               data.append((False, "Invalid result from the remote node"))
 
       for ((inst, _), status) in zip(disks, data):
-        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
+        instdisk.setdefault(inst, {}).setdefault(node.uuid, []).append(status)
 
     # Add empty entries for diskless instances.
     for inst in diskless_instances:
@@ -2500,11 +2523,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       instdisk[inst] = {}
 
     assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
-                      len(nnames) <= len(instanceinfo[inst].all_nodes) and
+                      len(nuuids) <= len(instanceinfo[inst].all_nodes) and
                       compat.all(isinstance(s, (tuple, list)) and
                                  len(s) == 2 for s in statuses)
-                      for inst, nnames in instdisk.items()
-                      for nname, statuses in nnames.items())
+                      for inst, nuuids in instdisk.items()
+                      for nuuid, statuses in nuuids.items())
     if __debug__:
       instdisk_keys = set(instdisk)
       instanceinfo_keys = set(instanceinfo)
@@ -2568,7 +2591,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     """Build hooks nodes.
 
     """
-    return ([], self.my_node_names)
+    return ([], list(self.my_node_info.keys()))
 
   def Exec(self, feedback_fn):
     """Verify integrity of the node group, performing various test on nodes.
@@ -2577,7 +2600,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     # This method has too many local variables. pylint: disable=R0914
     feedback_fn("* Verifying group '%s'" % self.group_info.name)
 
-    if not self.my_node_names:
+    if not self.my_node_uuids:
       # empty node group
       feedback_fn("* Empty node group, skipping verification")
       return True
@@ -2591,7 +2614,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     drbd_helper = self.cfg.GetDRBDHelper()
     cluster = self.cfg.GetClusterInfo()
     hypervisors = cluster.enabled_hypervisors
-    node_data_list = [self.my_node_info[name] for name in self.my_node_names]
+    node_data_list = self.my_node_info.values()
 
     i_non_redundant = [] # Non redundant instances
     i_non_a_balanced = [] # Non auto-balanced instances
@@ -2606,10 +2629,10 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     filemap = ComputeAncillaryFiles(cluster, False)
 
     # do local checksums
-    master_node = self.master_node = self.cfg.GetMasterNode()
+    master_node_uuid = self.master_node = self.cfg.GetMasterNode()
     master_ip = self.cfg.GetMasterIP()
 
-    feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
+    feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_uuids))
 
     user_scripts = []
     if self.cfg.GetUseExternalMipScript():
@@ -2635,7 +2658,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       constants.NV_HVINFO: self.cfg.GetHypervisorType(),
       constants.NV_NODESETUP: None,
       constants.NV_TIME: None,
-      constants.NV_MASTERIP: (master_node, master_ip),
+      constants.NV_MASTERIP: (self.cfg.GetMasterNodeName(), master_ip),
       constants.NV_OSLIST: None,
       constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
       constants.NV_USERSCRIPTS: user_scripts,
@@ -2653,7 +2676,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
 
     if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
       # Load file storage paths only from master node
-      node_verify_param[constants.NV_FILE_STORAGE_PATHS] = master_node
+      node_verify_param[constants.NV_FILE_STORAGE_PATHS] = \
+        self.cfg.GetMasterNodeName()
 
     # bridge checks
     # FIXME: this needs to be changed per node-group, not cluster-wide
@@ -2671,8 +2695,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       node_verify_param[constants.NV_BRIDGES] = list(bridges)
 
     # Build our expected cluster state
-    node_image = dict((node.name, self.NodeImage(offline=node.offline,
-                                                 name=node.name,
+    node_image = dict((node.uuid, self.NodeImage(offline=node.offline,
+                                                 uuid=node.uuid,
                                                  vm_capable=node.vm_capable))
                       for node in node_data_list)
 
@@ -2691,11 +2715,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       if inst_config.admin_state == constants.ADMINST_OFFLINE:
         i_offline += 1
 
-      for nname in inst_config.all_nodes:
-        if nname not in node_image:
-          gnode = self.NodeImage(name=nname)
-          gnode.ghost = (nname not in self.all_node_info)
-          node_image[nname] = gnode
+      for nuuid in inst_config.all_nodes:
+        if nuuid not in node_image:
+          gnode = self.NodeImage(uuid=nuuid)
+          gnode.ghost = (nuuid not in self.all_node_info)
+          node_image[nuuid] = gnode
 
       inst_config.MapLVsByNode(node_vol_should)
 
@@ -2709,7 +2733,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
           nimg.sbp[pnode] = []
         nimg.sbp[pnode].append(instance)
 
-    es_flags = rpc.GetExclusiveStorageForNodeNames(self.cfg, self.my_node_names)
+    es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
+                                               self.my_node_info.keys())
     # The value of exclusive_storage should be the same across the group, so if
     # it's True for at least a node, we act as if it were set for all the nodes
     self._exclusive_storage = compat.any(es_flags.values())
@@ -2724,7 +2749,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     # time before and after executing the request, we can at least have a time
     # window.
     nvinfo_starttime = time.time()
-    all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
+    all_nvinfo = self.rpc.call_node_verify(self.my_node_uuids,
                                            node_verify_param,
                                            self.cfg.GetClusterName(),
                                            self.cfg.GetClusterInfo().hvparams)
@@ -2742,56 +2767,55 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     all_drbd_map = self.cfg.ComputeDRBDMap()
 
     feedback_fn("* Gathering disk information (%s nodes)" %
-                len(self.my_node_names))
-    instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
+                len(self.my_node_uuids))
+    instdisk = self._CollectDiskInfo(self.my_node_info.keys(), node_image,
                                      self.my_inst_info)
 
     feedback_fn("* Verifying configuration file consistency")
 
     # If not all nodes are being checked, we need to make sure the master node
     # and a non-checked vm_capable node are in the list.
-    absent_nodes = set(self.all_node_info).difference(self.my_node_info)
-    if absent_nodes:
+    absent_node_uuids = set(self.all_node_info).difference(self.my_node_info)
+    if absent_node_uuids:
       vf_nvinfo = all_nvinfo.copy()
       vf_node_info = list(self.my_node_info.values())
-      additional_nodes = []
-      if master_node not in self.my_node_info:
-        additional_nodes.append(master_node)
-        vf_node_info.append(self.all_node_info[master_node])
+      additional_node_uuids = []
+      if master_node_uuid not in self.my_node_info:
+        additional_node_uuids.append(master_node_uuid)
+        vf_node_info.append(self.all_node_info[master_node_uuid])
       # Add the first vm_capable node we find which is not included,
       # excluding the master node (which we already have)
-      for node in absent_nodes:
-        nodeinfo = self.all_node_info[node]
+      for node_uuid in absent_node_uuids:
+        nodeinfo = self.all_node_info[node_uuid]
         if (nodeinfo.vm_capable and not nodeinfo.offline and
-            node != master_node):
-          additional_nodes.append(node)
-          vf_node_info.append(self.all_node_info[node])
+            node_uuid != master_node_uuid):
+          additional_node_uuids.append(node_uuid)
+          vf_node_info.append(self.all_node_info[node_uuid])
           break
       key = constants.NV_FILELIST
       vf_nvinfo.update(self.rpc.call_node_verify(
-         additional_nodes, {key: node_verify_param[key]},
+         additional_node_uuids, {key: node_verify_param[key]},
          self.cfg.GetClusterName(), self.cfg.GetClusterInfo().hvparams))
     else:
       vf_nvinfo = all_nvinfo
       vf_node_info = self.my_node_info.values()
 
-    self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
+    self._VerifyFiles(vf_node_info, master_node_uuid, vf_nvinfo, filemap)
 
     feedback_fn("* Verifying node status")
 
     refos_img = None
 
     for node_i in node_data_list:
-      node = node_i.name
-      nimg = node_image[node]
+      nimg = node_image[node_i.uuid]
 
       if node_i.offline:
         if verbose:
-          feedback_fn("* Skipping offline node %s" % (node,))
+          feedback_fn("* Skipping offline node %s" % (node_i.name,))
         n_offline += 1
         continue
 
-      if node == master_node:
+      if node_i.uuid == master_node_uuid:
         ntype = "master"
       elif node_i.master_candidate:
         ntype = "master candidate"
@@ -2801,16 +2825,16 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       else:
         ntype = "regular"
       if verbose:
-        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
+        feedback_fn("* Verifying node %s (%s)" % (node_i.name, ntype))
 
-      msg = all_nvinfo[node].fail_msg
-      _ErrorIf(msg, constants.CV_ENODERPC, node, "while contacting node: %s",
-               msg)
+      msg = all_nvinfo[node_i.uuid].fail_msg
+      _ErrorIf(msg, constants.CV_ENODERPC, node_i.name,
+               "while contacting node: %s", msg)
       if msg:
         nimg.rpc_fail = True
         continue
 
-      nresult = all_nvinfo[node].payload
+      nresult = all_nvinfo[node_i.uuid].payload
 
       nimg.call_ok = self._VerifyNode(node_i, nresult)
       self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
@@ -2818,7 +2842,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       self._VerifyNodeUserScripts(node_i, nresult)
       self._VerifyOob(node_i, nresult)
       self._VerifyFileStoragePaths(node_i, nresult,
-                                   node == master_node)
+                                   node_i.uuid == master_node_uuid)
 
       if nimg.vm_capable:
         self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)
@@ -2851,9 +2875,9 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     self._VerifyGroupDRBDVersion(all_nvinfo)
     self._VerifyGroupLVM(node_image, vg_name)
 
-    for node, result in extra_lv_nvinfo.items():
-      self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
-                              node_image[node], vg_name)
+    for node_uuid, result in extra_lv_nvinfo.items():
+      self._UpdateNodeVolumes(self.all_node_info[node_uuid], result.payload,
+                              node_image[node_uuid], vg_name)
 
     feedback_fn("* Verifying instance status")
     for instance in self.my_inst_names:
@@ -2927,7 +2951,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     """
     # We only really run POST phase hooks, only for non-empty groups,
     # and are only interested in their results
-    if not self.my_node_names:
+    if not self.my_node_uuids:
       # empty node group
       pass
     elif phase == constants.HOOKS_PHASE_POST:
index 6fb9faa..c9b42c2 100644 (file)
@@ -48,17 +48,17 @@ CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
   ]))
 
 
-def _ExpandItemName(fn, name, kind):
+def _ExpandItemName(expand_fn, name, kind):
   """Expand an item name.
 
-  @param fn: the function to use for expansion
+  @param expand_fn: the function to use for expansion
   @param name: requested item name
   @param kind: text description ('Node' or 'Instance')
-  @return: the resolved (full) name
+  @return: the result of the expand_fn, if successful
   @raise errors.OpPrereqError: if the item is not found
 
   """
-  full_name = fn(name)
+  full_name = expand_fn(name)
   if full_name is None:
     raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
                                errors.ECODE_NOENT)
@@ -70,9 +70,26 @@ def ExpandInstanceName(cfg, name):
   return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
 
 
-def ExpandNodeName(cfg, name):
-  """Wrapper over L{_ExpandItemName} for nodes."""
-  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
+def ExpandNodeUuidAndName(cfg, expected_uuid, name):
+  """Expand a short node name into the node UUID and full name.
+
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: The cluster configuration
+  @type expected_uuid: string
+  @param expected_uuid: expected UUID for the node (or None if there is no
+        expectation). If it does not match, a L{errors.OpPrereqError} is
+        raised.
+  @type name: string
+  @param name: the short node name
+
+  """
+  (uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
+  if expected_uuid is not None and uuid != expected_uuid:
+    raise errors.OpPrereqError(
+      "The nodes UUID '%s' does not match the expected UUID '%s' for node"
+      " '%s'. Maybe the node changed since you submitted this job." %
+      (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
+  return (uuid, full_name)
 
 
 def ShareAll():
@@ -106,22 +123,25 @@ def CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
   return wanted_instances
 
 
-def GetWantedNodes(lu, nodes):
+def GetWantedNodes(lu, short_node_names):
   """Returns list of checked and expanded node names.
 
   @type lu: L{LogicalUnit}
   @param lu: the logical unit on whose behalf we execute
-  @type nodes: list
-  @param nodes: list of node names or None for all nodes
-  @rtype: list
-  @return: the list of nodes, sorted
+  @type short_node_names: list
+  @param short_node_names: list of node names or None for all nodes
+  @rtype: tuple of lists
+  @return: tupe with (list of node UUIDs, list of node names)
   @raise errors.ProgrammerError: if the nodes parameter is wrong type
 
   """
-  if nodes:
-    return [ExpandNodeName(lu.cfg, name) for name in nodes]
+  if short_node_names:
+    node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0]
+                  for name in short_node_names]
+  else:
+    node_uuids = lu.cfg.GetNodeList()
 
-  return utils.NiceSort(lu.cfg.GetNodeList())
+  return (node_uuids, [lu.cfg.GetNodeInfo(uuid).name for uuid in node_uuids])
 
 
 def GetWantedInstances(lu, instances):
@@ -150,42 +170,34 @@ def RunPostHook(lu, node_name):
   """
   hm = lu.proc.BuildHooksManager(lu)
   try:
-    hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
+    node_names = [node_name]
+    hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=node_names)
   except Exception, err: # pylint: disable=W0703
     lu.LogWarning("Errors occurred running hooks on %s: %s",
                   node_name, err)
 
 
-def RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
+def RedistributeAncillaryFiles(lu):
   """Distribute additional files which are part of the cluster configuration.
 
   ConfigWriter takes care of distributing the config and ssconf files, but
   there are more files which should be distributed to all nodes. This function
   makes sure those are copied.
 
-  @param lu: calling logical unit
-  @param additional_nodes: list of nodes not in the config to distribute to
-  @type additional_vm: boolean
-  @param additional_vm: whether the additional nodes are vm-capable or not
-
   """
   # Gather target nodes
   cluster = lu.cfg.GetClusterInfo()
   master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
 
-  online_nodes = lu.cfg.GetOnlineNodeList()
-  online_set = frozenset(online_nodes)
-  vm_nodes = list(online_set.intersection(lu.cfg.GetVmCapableNodeList()))
-
-  if additional_nodes is not None:
-    online_nodes.extend(additional_nodes)
-    if additional_vm:
-      vm_nodes.extend(additional_nodes)
+  online_node_uuids = lu.cfg.GetOnlineNodeList()
+  online_node_uuid_set = frozenset(online_node_uuids)
+  vm_node_uuids = list(online_node_uuid_set.intersection(
+                         lu.cfg.GetVmCapableNodeList()))
 
   # Never distribute to master node
-  for nodelist in [online_nodes, vm_nodes]:
-    if master_info.name in nodelist:
-      nodelist.remove(master_info.name)
+  for node_uuids in [online_node_uuids, vm_node_uuids]:
+    if master_info.uuid in node_uuids:
+      node_uuids.remove(master_info.uuid)
 
   # Gather file lists
   (files_all, _, files_mc, files_vm) = \
@@ -197,14 +209,14 @@ def RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
   assert not files_mc, "Master candidates not handled in this function"
 
   filemap = [
-    (online_nodes, files_all),
-    (vm_nodes, files_vm),
+    (online_node_uuids, files_all),
+    (vm_node_uuids, files_vm),
     ]
 
   # Upload the files
-  for (node_list, files) in filemap:
+  for (node_uuids, files) in filemap:
     for fname in files:
-      UploadHelper(lu, node_list, fname)
+      UploadHelper(lu, node_uuids, fname)
 
 
 def ComputeAncillaryFiles(cluster, redist):
@@ -286,17 +298,17 @@ def ComputeAncillaryFiles(cluster, redist):
   return (files_all, files_opt, files_mc, files_vm)
 
 
-def UploadHelper(lu, nodes, fname):
+def UploadHelper(lu, node_uuids, fname):
   """Helper for uploading a file and showing warnings.
 
   """
   if os.path.exists(fname):
-    result = lu.rpc.call_upload_file(nodes, fname)
-    for to_node, to_result in result.items():
+    result = lu.rpc.call_upload_file(node_uuids, fname)
+    for to_node_uuids, to_result in result.items():
       msg = to_result.fail_msg
       if msg:
         msg = ("Copy of file %s to node %s failed: %s" %
-               (fname, to_node, msg))
+               (fname, lu.cfg.GetNodeName(to_node_uuids), msg))
         lu.LogWarning(msg)
 
 
@@ -345,7 +357,7 @@ def MergeAndVerifyDiskState(op_input, obj_input):
   return None
 
 
-def CheckOSParams(lu, required, nodenames, osname, osparams):
+def CheckOSParams(lu, required, node_uuids, osname, osparams):
   """OS parameters validation.
 
   @type lu: L{LogicalUnit}
@@ -353,8 +365,8 @@ def CheckOSParams(lu, required, nodenames, osname, osparams):
   @type required: boolean
   @param required: whether the validation should fail if the OS is not
       found
-  @type nodenames: list
-  @param nodenames: the list of nodes on which we should check
+  @type node_uuids: list
+  @param node_uuids: the list of nodes on which we should check
   @type osname: string
   @param osname: the name of the hypervisor we should use
   @type osparams: dict
@@ -362,20 +374,21 @@ def CheckOSParams(lu, required, nodenames, osname, osparams):
   @raise errors.OpPrereqError: if the parameters are not valid
 
   """
-  nodenames = _FilterVmNodes(lu, nodenames)
-  result = lu.rpc.call_os_validate(nodenames, required, osname,
+  node_uuids = _FilterVmNodes(lu, node_uuids)
+  result = lu.rpc.call_os_validate(node_uuids, required, osname,
                                    [constants.OS_VALIDATE_PARAMETERS],
                                    osparams)
-  for node, nres in result.items():
+  for node_uuid, nres in result.items():
     # we don't check for offline cases since this should be run only
     # against the master node and/or an instance's nodes
-    nres.Raise("OS Parameters validation failed on node %s" % node)
+    nres.Raise("OS Parameters validation failed on node %s" %
+               lu.cfg.GetNodeName(node_uuid))
     if not nres.payload:
       lu.LogInfo("OS %s not found on node %s, validation skipped",
-                 osname, node)
+                 osname, lu.cfg.GetNodeName(node_uuid))
 
 
-def CheckHVParams(lu, nodenames, hvname, hvparams):
+def CheckHVParams(lu, node_uuids, hvname, hvparams):
   """Hypervisor parameter validation.
 
   This function abstract the hypervisor parameter validation to be
@@ -383,8 +396,8 @@ def CheckHVParams(lu, nodenames, hvname, hvparams):
 
   @type lu: L{LogicalUnit}
   @param lu: the logical unit for which we check
-  @type nodenames: list
-  @param nodenames: the list of nodes on which we should check
+  @type node_uuids: list
+  @param node_uuids: the list of nodes on which we should check
   @type hvname: string
   @param hvname: the name of the hypervisor we should use
   @type hvparams: dict
@@ -392,17 +405,18 @@ def CheckHVParams(lu, nodenames, hvname, hvparams):
   @raise errors.OpPrereqError: if the parameters are not valid
 
   """
-  nodenames = _FilterVmNodes(lu, nodenames)
+  node_uuids = _FilterVmNodes(lu, node_uuids)
 
   cluster = lu.cfg.GetClusterInfo()
   hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
 
-  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, hvname, hvfull)
-  for node in nodenames:
-    info = hvinfo[node]
+  hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull)
+  for node_uuid in node_uuids:
+    info = hvinfo[node_uuid]
     if info.offline:
       continue
-    info.Raise("Hypervisor parameter validation failed on node %s" % node)
+    info.Raise("Hypervisor parameter validation failed on node %s" %
+               lu.cfg.GetNodeName(node_uuid))
 
 
 def AdjustCandidatePool(lu, exceptions):
@@ -413,8 +427,8 @@ def AdjustCandidatePool(lu, exceptions):
   if mod_list:
     lu.LogInfo("Promoted nodes to master candidate role: %s",
                utils.CommaJoin(node.name for node in mod_list))
-    for name in mod_list:
-      lu.context.ReaddNode(name)
+    for node in mod_list:
+      lu.context.ReaddNode(node)
   mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
   if mc_now > mc_max:
     lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
@@ -548,7 +562,7 @@ def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
   be_full = cfg.GetClusterInfo().FillBE(instance)
   mem_size = be_full[constants.BE_MAXMEM]
   cpu_count = be_full[constants.BE_VCPUS]
-  es_flags = rpc.GetExclusiveStorageForNodeNames(cfg, instance.all_nodes)
+  es_flags = rpc.GetExclusiveStorageForNodes(cfg, instance.all_nodes)
   if any(es_flags.values()):
     # With exclusive storage use the actual spindles
     try:
@@ -736,19 +750,19 @@ def _UpdateAndVerifySubDict(base, updates, type_check):
   return ret
 
 
-def _FilterVmNodes(lu, nodenames):
+def _FilterVmNodes(lu, node_uuids):
   """Filters out non-vm_capable nodes from a list.
 
   @type lu: L{LogicalUnit}
   @param lu: the logical unit for which we check
-  @type nodenames: list
-  @param nodenames: the list of nodes on which we should check
+  @type node_uuids: list
+  @param node_uuids: the list of nodes on which we should check
   @rtype: list
   @return: the list of vm-capable nodes
 
   """
   vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
-  return [name for name in nodenames if name not in vm_nodes]
+  return [uuid for uuid in node_uuids if uuid not in vm_nodes]
 
 
 def GetDefaultIAllocator(cfg, ialloc):
@@ -774,7 +788,7 @@ def GetDefaultIAllocator(cfg, ialloc):
   return ialloc
 
 
-def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
+def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_node_uuids,
                              cur_group_uuid):
   """Checks if node groups for locked instances are still correct.
 
@@ -784,14 +798,14 @@ def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
   @param instances: Dictionary, instance name as key, instance object as value
   @type owned_groups: iterable of string
   @param owned_groups: List of owned groups
-  @type owned_nodes: iterable of string
-  @param owned_nodes: List of owned nodes
+  @type owned_node_uuids: iterable of string
+  @param owned_node_uuids: List of owned nodes
   @type cur_group_uuid: string or None
   @param cur_group_uuid: Optional group UUID to check against instance's groups
 
   """
   for (name, inst) in instances.items():
-    assert owned_nodes.issuperset(inst.all_nodes), \
+    assert owned_node_uuids.issuperset(inst.all_nodes), \
       "Instance %s's nodes changed while we kept the lock" % name
 
     inst_groups = CheckInstanceNodeGroups(cfg, name, owned_groups)
@@ -855,21 +869,22 @@ def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
 
   if moved:
     lu.LogInfo("Instances to be moved: %s",
-               utils.CommaJoin("%s (to %s)" %
-                               (name, _NodeEvacDest(use_nodes, group, nodes))
-                               for (name, group, nodes) in moved))
+               utils.CommaJoin(
+                 "%s (to %s)" %
+                 (name, _NodeEvacDest(use_nodes, group, node_names))
+                 for (name, group, node_names) in moved))
 
   return [map(compat.partial(_SetOpEarlyRelease, early_release),
               map(opcodes.OpCode.LoadOpCode, ops))
           for ops in jobs]
 
 
-def _NodeEvacDest(use_nodes, group, nodes):
+def _NodeEvacDest(use_nodes, group, node_names):
   """Returns group or nodes depending on caller's choice.
 
   """
   if use_nodes:
-    return utils.CommaJoin(nodes)
+    return utils.CommaJoin(node_names)
   else:
     return group
 
@@ -890,12 +905,12 @@ def MapInstanceDisksToNodes(instances):
   """Creates a map from (node, volume) to instance name.
 
   @type instances: list of L{objects.Instance}
-  @rtype: dict; tuple of (node name, volume name) as key, instance name as value
+  @rtype: dict; tuple of (node uuid, volume name) as key, instance name as value
 
   """
-  return dict(((node, vol), inst.name)
+  return dict(((node_uuid, vol), inst.name)
               for inst in instances
-              for (node, vols) in inst.MapLVsByNode().items()
+              for (node_uuid, vols) in inst.MapLVsByNode().items()
               for vol in vols)
 
 
@@ -960,12 +975,13 @@ def CheckInstanceState(lu, instance, req_states, msg=None):
                                errors.ECODE_STATE)
 
   if constants.ADMINST_UP not in req_states:
-    pnode = instance.primary_node
-    if not lu.cfg.GetNodeInfo(pnode).offline:
+    pnode_uuid = instance.primary_node
+    if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
       all_hvparams = lu.cfg.GetClusterInfo().hvparams
-      ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor],
-                                        all_hvparams)[pnode]
-      ins_l.Raise("Can't contact node %s for instance information" % pnode,
+      ins_l = lu.rpc.call_instance_list(
+                [pnode_uuid], [instance.hypervisor], all_hvparams)[pnode_uuid]
+      ins_l.Raise("Can't contact node %s for instance information" %
+                  lu.cfg.GetNodeName(pnode_uuid),
                   prereq=True, ecode=errors.ECODE_ENVIRON)
       if instance.name in ins_l.payload:
         raise errors.OpPrereqError("Instance %s is running, %s" %
@@ -1011,16 +1027,16 @@ def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
                                  " iallocator", errors.ECODE_INVAL)
 
 
-def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
+def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq):
   faulty = []
 
   for dev in instance.disks:
-    cfg.SetDiskID(dev, node_name)
+    cfg.SetDiskID(dev, node_uuid)
 
-  result = rpc_runner.call_blockdev_getmirrorstatus(node_name,
-                                                    (instance.disks,
-                                                     instance))
-  result.Raise("Failed to get disk status from node %s" % node_name,
+  result = rpc_runner.call_blockdev_getmirrorstatus(
+             node_uuid, (instance.disks, instance))
+  result.Raise("Failed to get disk status from node %s" %
+               cfg.GetNodeName(node_uuid),
                prereq=prereq, ecode=errors.ECODE_ENVIRON)
 
   for idx, bdev_status in enumerate(result.payload):
@@ -1030,16 +1046,17 @@ def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
   return faulty
 
 
-def CheckNodeOnline(lu, node, msg=None):
+def CheckNodeOnline(lu, node_uuid, msg=None):
   """Ensure that a given node is online.
 
   @param lu: the LU on behalf of which we make the check
-  @param node: the node to check
+  @param node_uuid: the node to check
   @param msg: if passed, should be a message to replace the default one
   @raise errors.OpPrereqError: if the node is offline
 
   """
   if msg is None:
     msg = "Can't use offline node"
-  if lu.cfg.GetNodeInfo(node).offline:
-    raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
+  if lu.cfg.GetNodeInfo(node_uuid).offline:
+    raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)),
+                               errors.ECODE_STATE)
index 676803c..219857c 100644 (file)
@@ -152,14 +152,14 @@ class LUGroupAssignNodes(NoHooksLU):
   def ExpandNames(self):
     # These raise errors.OpPrereqError on their own:
     self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
-    self.op.nodes = GetWantedNodes(self, self.op.nodes)
+    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
 
     # We want to lock all the affected nodes and groups. We have readily
     # available the list of nodes, and the *destination* group. To gather the
     # list of "source" groups, we need to fetch node information later on.
     self.needed_locks = {
       locking.LEVEL_NODEGROUP: set([self.group_uuid]),
-      locking.LEVEL_NODE: self.op.nodes,
+      locking.LEVEL_NODE: self.op.node_uuids,
       }
 
   def DeclareLocks(self, level):
@@ -168,7 +168,7 @@ class LUGroupAssignNodes(NoHooksLU):
 
       # Try to get all affected nodes' groups without having the group or node
       # lock yet. Needs verification later in the code flow.
-      groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
+      groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)
 
       self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
 
@@ -178,10 +178,10 @@ class LUGroupAssignNodes(NoHooksLU):
     """
     assert self.needed_locks[locking.LEVEL_NODEGROUP]
     assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
-            frozenset(self.op.nodes))
+            frozenset(self.op.node_uuids))
 
     expected_locks = (set([self.group_uuid]) |
-                      self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
+                      self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
     actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
     if actual_locks != expected_locks:
       raise errors.OpExecError("Nodes changed groups since locks were acquired,"
@@ -198,8 +198,8 @@ class LUGroupAssignNodes(NoHooksLU):
                                (self.op.group_name, self.group_uuid))
 
     (new_splits, previous_splits) = \
-      self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
-                                             for node in self.op.nodes],
+      self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
+                                             for uuid in self.op.node_uuids],
                                             self.node_data, instance_data)
 
     if new_splits:
@@ -222,7 +222,7 @@ class LUGroupAssignNodes(NoHooksLU):
     """Assign nodes to a new group.
 
     """
-    mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
+    mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids]
 
     self.cfg.AssignGroupNodes(mods)
 
@@ -240,7 +240,7 @@ class LUGroupAssignNodes(NoHooksLU):
     Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
     considered.
 
-    @type changes: list of (node_name, new_group_uuid) pairs.
+    @type changes: list of (node_uuid, new_group_uuid) pairs.
     @param changes: list of node assignments to consider.
     @param node_data: a dict with data for all nodes
     @param instance_data: a dict with all instances to consider
@@ -250,26 +250,22 @@ class LUGroupAssignNodes(NoHooksLU):
       split and this change does not fix.
 
     """
-    changed_nodes = dict((node, group) for node, group in changes
-                         if node_data[node].group != group)
+    changed_nodes = dict((uuid, group) for uuid, group in changes
+                         if node_data[uuid].group != group)
 
     all_split_instances = set()
     previously_split_instances = set()
 
-    def InstanceNodes(instance):
-      return [instance.primary_node] + list(instance.secondary_nodes)
-
     for inst in instance_data.values():
       if inst.disk_template not in constants.DTS_INT_MIRROR:
         continue
 
-      instance_nodes = InstanceNodes(inst)
-
-      if len(set(node_data[node].group for node in instance_nodes)) > 1:
+      if len(set(node_data[node_uuid].group
+                 for node_uuid in inst.all_nodes)) > 1:
         previously_split_instances.add(inst.name)
 
-      if len(set(changed_nodes.get(node, node_data[node].group)
-                 for node in instance_nodes)) > 1:
+      if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
+                 for node_uuid in inst.all_nodes)) > 1:
         all_split_instances.add(inst.name)
 
     return (list(all_split_instances - previously_split_instances),
@@ -333,8 +329,8 @@ class GroupQuery(QueryBase):
 
       for node in all_nodes.values():
         if node.group in group_to_nodes:
-          group_to_nodes[node.group].append(node.name)
-          node_to_group[node.name] = node.group
+          group_to_nodes[node.group].append(node.uuid)
+          node_to_group[node.uuid] = node.group
 
       if do_instances:
         all_instances = lu.cfg.GetAllInstancesInfo()
@@ -561,7 +557,7 @@ class LUGroupRemove(LogicalUnit):
 
     """
     # Verify that the group is empty.
-    group_nodes = [node.name
+    group_nodes = [node.uuid
                    for node in self.cfg.GetAllNodesInfo().values()
                    if node.group == self.group_uuid]
 
@@ -654,7 +650,7 @@ class LUGroupRename(LogicalUnit):
     all_nodes.pop(mn, None)
 
     run_nodes = [mn]
-    run_nodes.extend(node.name for node in all_nodes.values()
+    run_nodes.extend(node.uuid for node in all_nodes.values()
                      if node.group == self.group_uuid)
 
     return (run_nodes, run_nodes)
@@ -743,15 +739,16 @@ class LUGroupEvacuate(LogicalUnit):
       # Lock all nodes in group to be evacuated and target groups
       owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
       assert self.group_uuid in owned_groups
-      member_nodes = [node_name
-                      for group in owned_groups
-                      for node_name in self.cfg.GetNodeGroup(group).members]
-      self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
+      member_node_uuids = [node_uuid
+                           for group in owned_groups
+                           for node_uuid in
+                             self.cfg.GetNodeGroup(group).members]
+      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
 
   def CheckPrereq(self):
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
-    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
 
     assert owned_groups.issuperset(self.req_target_uuids)
     assert self.group_uuid in owned_groups
@@ -764,7 +761,7 @@ class LUGroupEvacuate(LogicalUnit):
 
     # Check if node groups for locked instances are still correct
     CheckInstancesNodeGroups(self.cfg, self.instances,
-                             owned_groups, owned_nodes, self.group_uuid)
+                             owned_groups, owned_node_uuids, self.group_uuid)
 
     if self.req_target_uuids:
       # User requested specific target groups
@@ -876,13 +873,13 @@ class LUGroupVerifyDisks(NoHooksLU):
 
       # Lock all nodes in group to be verified
       assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
-      member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
-      self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
+      member_node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members
+      self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
 
   def CheckPrereq(self):
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
-    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
 
     assert self.group_uuid in owned_groups
 
@@ -894,7 +891,7 @@ class LUGroupVerifyDisks(NoHooksLU):
 
     # Check if node groups for locked instances are still correct
     CheckInstancesNodeGroups(self.cfg, self.instances,
-                             owned_groups, owned_nodes, self.group_uuid)
+                             owned_groups, owned_node_uuids, self.group_uuid)
 
   def Exec(self, feedback_fn):
     """Verify integrity of cluster disks.
@@ -913,23 +910,24 @@ class LUGroupVerifyDisks(NoHooksLU):
       [inst for inst in self.instances.values() if inst.disks_active])
 
     if nv_dict:
-      nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
-                             set(self.cfg.GetVmCapableNodeList()))
+      node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
+                                  set(self.cfg.GetVmCapableNodeList()))
 
-      node_lvs = self.rpc.call_lv_list(nodes, [])
+      node_lvs = self.rpc.call_lv_list(node_uuids, [])
 
-      for (node, node_res) in node_lvs.items():
+      for (node_uuid, node_res) in node_lvs.items():
         if node_res.offline:
           continue
 
         msg = node_res.fail_msg
         if msg:
-          logging.warning("Error enumerating LVs on node %s: %s", node, msg)
-          res_nodes[node] = msg
+          logging.warning("Error enumerating LVs on node %s: %s",
+                          self.cfg.GetNodeName(node_uuid), msg)
+          res_nodes[node_uuid] = msg
           continue
 
         for lv_name, (_, _, lv_online) in node_res.payload.items():
-          inst = nv_dict.pop((node, lv_name), None)
+          inst = nv_dict.pop((node_uuid, lv_name), None)
           if not (lv_online or inst is None):
             res_instances.add(inst)
 
index d77ba68..97df4c3 100644 (file)
@@ -49,10 +49,10 @@ from ganeti.cmdlib.common import INSTANCE_DOWN, \
   LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
   IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
   AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
-  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
+  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
 from ganeti.cmdlib.instance_storage import CreateDisks, \
   CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
-  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
+  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
   CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
   StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
   CheckSpindlesExclusiveStorage
@@ -106,14 +106,14 @@ def _CheckOpportunisticLocking(op):
                                errors.ECODE_INVAL)
 
 
-def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
+def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
   """Wrapper around IAReqInstanceAlloc.
 
   @param op: The instance opcode
   @param disks: The computed disks
   @param nics: The computed nics
   @param beparams: The full filled beparams
-  @param node_whitelist: List of nodes which should appear as online to the
+  @param node_name_whitelist: List of nodes which should appear as online to the
     allocator (unless the node is already marked offline)
 
   @returns: A filled L{iallocator.IAReqInstanceAlloc}
@@ -130,7 +130,7 @@ def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
                                        disks=disks,
                                        nics=[n.ToDict() for n in nics],
                                        hypervisor=op.hypervisor,
-                                       node_whitelist=node_whitelist)
+                                       node_whitelist=node_name_whitelist)
 
 
 def _ComputeFullBeParams(op, cluster):
@@ -245,16 +245,16 @@ def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
   return nics
 
 
-def _CheckForConflictingIp(lu, ip, node):
+def _CheckForConflictingIp(lu, ip, node_uuid):
   """In case of conflicting IP address raise error.
 
   @type ip: string
   @param ip: IP address
-  @type node: string
-  @param node: node name
+  @type node_uuid: string
+  @param node_uuid: node UUID
 
   """
-  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
+  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
   if conf_net is not None:
     raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
                                 " network %s, but the target NIC does not." %
@@ -521,11 +521,13 @@ class LUInstanceCreate(LogicalUnit):
         self.opportunistic_locks[locking.LEVEL_NODE] = True
         self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
     else:
-      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
-      nodelist = [self.op.pnode]
+      (self.op.pnode_uuid, self.op.pnode) = \
+        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
+      nodelist = [self.op.pnode_uuid]
       if self.op.snode is not None:
-        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
-        nodelist.append(self.op.snode)
+        (self.op.snode_uuid, self.op.snode) = \
+          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
+        nodelist.append(self.op.snode_uuid)
       self.needed_locks[locking.LEVEL_NODE] = nodelist
 
     # in case of import lock the source node too
@@ -545,9 +547,10 @@ class LUInstanceCreate(LogicalUnit):
                                      " requires a source node option",
                                      errors.ECODE_INVAL)
       else:
-        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
+        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
+          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
         if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
-          self.needed_locks[locking.LEVEL_NODE].append(src_node)
+          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
         if not os.path.isabs(src_path):
           self.op.src_path = src_path = \
             utils.PathJoin(pathutils.EXPORT_DIR, src_path)
@@ -569,7 +572,7 @@ class LUInstanceCreate(LogicalUnit):
     #     in a nodegroup that has the desired network connected to
     req = _CreateInstanceAllocRequest(self.op, self.disks,
                                       self.nics, self.be_full,
-                                      node_whitelist)
+                                      self.cfg.GetNodeNames(node_whitelist))
     ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
     ial.Run(self.op.iallocator)
@@ -586,7 +589,8 @@ class LUInstanceCreate(LogicalUnit):
                                  (self.op.iallocator, ial.info),
                                  ecode)
 
-    self.op.pnode = ial.result[0]
+    (self.op.pnode_uuid, self.op.pnode) = \
+      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
     self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
                  self.op.instance_name, self.op.iallocator,
                  utils.CommaJoin(ial.result))
@@ -594,7 +598,8 @@ class LUInstanceCreate(LogicalUnit):
     assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
 
     if req.RequiredNodes() == 2:
-      self.op.snode = ial.result[1]
+      (self.op.snode_uuid, self.op.snode) = \
+        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -612,8 +617,8 @@ class LUInstanceCreate(LogicalUnit):
 
     env.update(BuildInstanceHookEnv(
       name=self.op.instance_name,
-      primary_node=self.op.pnode,
-      secondary_nodes=self.secondaries,
+      primary_node_name=self.op.pnode,
+      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
       status=self.op.start,
       os_type=self.op.os_type,
       minmem=self.be_full[constants.BE_MINMEM],
@@ -635,7 +640,7 @@ class LUInstanceCreate(LogicalUnit):
     """Build hooks nodes.
 
     """
-    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
+    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
     return nl, nl
 
   def _ReadExportInfo(self):
@@ -649,10 +654,10 @@ class LUInstanceCreate(LogicalUnit):
     """
     assert self.op.mode == constants.INSTANCE_IMPORT
 
-    src_node = self.op.src_node
+    src_node_uuid = self.op.src_node_uuid
     src_path = self.op.src_path
 
-    if src_node is None:
+    if src_node_uuid is None:
       locked_nodes = self.owned_locks(locking.LEVEL_NODE)
       exp_list = self.rpc.call_export_list(locked_nodes)
       found = False
@@ -661,7 +666,9 @@ class LUInstanceCreate(LogicalUnit):
           continue
         if src_path in exp_list[node].payload:
           found = True
-          self.op.src_node = src_node = node
+          self.op.src_node = node
+          self.op.src_node_uuid = src_node_uuid = \
+            self.cfg.GetNodeInfoByName(node).uuid
           self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
                                                        src_path)
           break
@@ -669,8 +676,8 @@ class LUInstanceCreate(LogicalUnit):
         raise errors.OpPrereqError("No export found for relative path %s" %
                                    src_path, errors.ECODE_INVAL)
 
-    CheckNodeOnline(self, src_node)
-    result = self.rpc.call_export_info(src_node, src_path)
+    CheckNodeOnline(self, src_node_uuid)
+    result = self.rpc.call_export_info(src_node_uuid, src_path)
     result.Raise("No export or invalid export found in dir %s" % src_path)
 
     export_info = objects.SerializableConfigParser.Loads(str(result.payload))
@@ -679,7 +686,7 @@ class LUInstanceCreate(LogicalUnit):
                                    errors.ECODE_ENVIRON)
 
     ei_version = export_info.get(constants.INISECT_EXP, "version")
-    if (int(ei_version) != constants.EXPORT_VERSION):
+    if int(ei_version) != constants.EXPORT_VERSION:
       raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
                                  (ei_version, constants.EXPORT_VERSION),
                                  errors.ECODE_ENVIRON)
@@ -940,7 +947,8 @@ class LUInstanceCreate(LogicalUnit):
       self._RunAllocator()
 
     # Release all unneeded node locks
-    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
+    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
+                               self.op.src_node_uuid])
     ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
     ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
     ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
@@ -952,9 +960,9 @@ class LUInstanceCreate(LogicalUnit):
     #### node related checks
 
     # check primary node
-    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
+    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
     assert self.pnode is not None, \
-      "Cannot retrieve locked node %s" % self.op.pnode
+      "Cannot retrieve locked node %s" % self.op.pnode_uuid
     if pnode.offline:
       raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
                                  pnode.name, errors.ECODE_STATE)
@@ -973,7 +981,7 @@ class LUInstanceCreate(LogicalUnit):
       net_uuid = nic.network
       if net_uuid is not None:
         nobj = self.cfg.GetNetwork(net_uuid)
-        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
+        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
         if netparams is None:
           raise errors.OpPrereqError("No netparams found for network"
                                      " %s. Propably not connected to"
@@ -1003,19 +1011,19 @@ class LUInstanceCreate(LogicalUnit):
 
       # net is None, ip None or given
       elif self.op.conflicts_check:
-        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
+        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
 
     # mirror node verification
     if self.op.disk_template in constants.DTS_INT_MIRROR:
-      if self.op.snode == pnode.name:
+      if self.op.snode_uuid == pnode.uuid:
         raise errors.OpPrereqError("The secondary node cannot be the"
                                    " primary node", errors.ECODE_INVAL)
-      CheckNodeOnline(self, self.op.snode)
-      CheckNodeNotDrained(self, self.op.snode)
-      CheckNodeVmCapable(self, self.op.snode)
-      self.secondaries.append(self.op.snode)
+      CheckNodeOnline(self, self.op.snode_uuid)
+      CheckNodeNotDrained(self, self.op.snode_uuid)
+      CheckNodeVmCapable(self, self.op.snode_uuid)
+      self.secondaries.append(self.op.snode_uuid)
 
-      snode = self.cfg.GetNodeInfo(self.op.snode)
+      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
       if pnode.group != snode.group:
         self.LogWarning("The primary and secondary nodes are in two"
                         " different node groups; the disk parameters"
@@ -1034,7 +1042,7 @@ class LUInstanceCreate(LogicalUnit):
     for disk in self.disks:
       CheckSpindlesExclusiveStorage(disk, excl_stor, True)
 
-    nodenames = [pnode.name] + self.secondaries
+    node_uuids = [pnode.uuid] + self.secondaries
 
     if not self.adopt_disks:
       if self.op.disk_template == constants.DT_RBD:
@@ -1048,7 +1056,7 @@ class LUInstanceCreate(LogicalUnit):
       elif self.op.disk_template in utils.GetLvmDiskTemplates():
         # Check lv size requirements, if not adopting
         req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
-        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
+        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
       else:
         # FIXME: add checks for other, non-adopting, non-lvm disk templates
         pass
@@ -1069,11 +1077,11 @@ class LUInstanceCreate(LogicalUnit):
           raise errors.OpPrereqError("LV named %s used by another instance" %
                                      lv_name, errors.ECODE_NOTUNIQUE)
 
-      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
+      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
       vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
 
-      node_lvs = self.rpc.call_lv_list([pnode.name],
-                                       vg_names.payload.keys())[pnode.name]
+      node_lvs = self.rpc.call_lv_list([pnode.uuid],
+                                       vg_names.payload.keys())[pnode.uuid]
       node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
       node_lvs = node_lvs.payload
 
@@ -1109,8 +1117,8 @@ class LUInstanceCreate(LogicalUnit):
                                     constants.ADOPTABLE_BLOCKDEV_ROOT),
                                    errors.ECODE_INVAL)
 
-      node_disks = self.rpc.call_bdev_sizes([pnode.name],
-                                            list(all_disks))[pnode.name]
+      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
+                                            list(all_disks))[pnode.uuid]
       node_disks.Raise("Cannot get block device information from node %s" %
                        pnode.name)
       node_disks = node_disks.payload
@@ -1144,13 +1152,13 @@ class LUInstanceCreate(LogicalUnit):
              (pnode.group, group_info.name, utils.CommaJoin(res)))
       raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
-    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
+    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
 
-    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
+    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
     # check OS parameters (remotely)
-    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
+    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
 
-    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
+    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
 
     #TODO: _CheckExtParams (remotely)
     # Check parameters for extstorage
@@ -1160,12 +1168,12 @@ class LUInstanceCreate(LogicalUnit):
     if self.op.start:
       hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
                                 self.op.hvparams)
-      CheckNodeFreeMemory(self, self.pnode.name,
+      CheckNodeFreeMemory(self, self.pnode.uuid,
                           "creating instance %s" % self.op.instance_name,
                           self.be_full[constants.BE_MAXMEM],
                           self.op.hypervisor, hvfull)
 
-    self.dry_run_result = list(nodenames)
+    self.dry_run_result = list(node_uuids)
 
   def Exec(self, feedback_fn):
     """Create and add the instance to the cluster.
@@ -1188,11 +1196,10 @@ class LUInstanceCreate(LogicalUnit):
     # This is ugly but we got a chicken-egg problem here
     # We can only take the group disk parameters, as the instance
     # has no disks yet (we are generating them right here).
-    node = self.cfg.GetNodeInfo(pnode_name)
-    nodegroup = self.cfg.GetNodeGroup(node.group)
+    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
     disks = GenerateDiskTemplate(self,
                                  self.op.disk_template,
-                                 instance, pnode_name,
+                                 instance, self.pnode.uuid,
                                  self.secondaries,
                                  self.disks,
                                  self.instance_file_storage_dir,
@@ -1202,7 +1209,7 @@ class LUInstanceCreate(LogicalUnit):
                                  self.cfg.GetGroupDiskParams(nodegroup))
 
     iobj = objects.Instance(name=instance, os=self.op.os_type,
-                            primary_node=pnode_name,
+                            primary_node=self.pnode.uuid,
                             nics=self.nics, disks=disks,
                             disk_template=self.op.disk_template,
                             disks_active=False,
@@ -1227,8 +1234,8 @@ class LUInstanceCreate(LogicalUnit):
         for t_dsk, a_dsk in zip(tmp_disks, self.disks):
           rename_to.append(t_dsk.logical_id)
           t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
-          self.cfg.SetDiskID(t_dsk, pnode_name)
-        result = self.rpc.call_blockdev_rename(pnode_name,
+          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
+        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
                                                zip(tmp_disks, rename_to))
         result.Raise("Failed to rename adoped LVs")
     else:
@@ -1250,7 +1257,7 @@ class LUInstanceCreate(LogicalUnit):
 
     if self.op.mode == constants.INSTANCE_IMPORT:
       # Release unused nodes
-      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
+      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
     else:
       # Release all nodes
       ReleaseLocks(self, locking.LEVEL_NODE)
@@ -1296,14 +1303,14 @@ class LUInstanceCreate(LogicalUnit):
       # preceding code might or might have not done it, depending on
       # disk template and other options
       for disk in iobj.disks:
-        self.cfg.SetDiskID(disk, pnode_name)
+        self.cfg.SetDiskID(disk, self.pnode.uuid)
       if self.op.mode == constants.INSTANCE_CREATE:
         if not self.op.no_install:
           pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
                         not self.op.wait_for_sync)
           if pause_sync:
             feedback_fn("* pausing disk sync to install instance OS")
-            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
+            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
                                                               (iobj.disks,
                                                                iobj), True)
             for idx, success in enumerate(result.payload):
@@ -1314,11 +1321,11 @@ class LUInstanceCreate(LogicalUnit):
           feedback_fn("* running the instance OS create scripts...")
           # FIXME: pass debug option from opcode to backend
           os_add_result = \
-            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
+            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
                                           self.op.debug_level)
           if pause_sync:
             feedback_fn("* resuming disk sync")
-            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
+            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
                                                               (iobj.disks,
                                                                iobj), False)
             for idx, success in enumerate(result.payload):
@@ -1349,7 +1356,8 @@ class LUInstanceCreate(LogicalUnit):
 
           import_result = \
             masterd.instance.TransferInstanceData(self, feedback_fn,
-                                                  self.op.src_node, pnode_name,
+                                                  self.op.src_node_uuid,
+                                                  self.pnode.uuid,
                                                   self.pnode.secondary_ip,
                                                   iobj, transfers)
           if not compat.all(import_result):
@@ -1368,7 +1376,7 @@ class LUInstanceCreate(LogicalUnit):
                              self.op.source_shutdown_timeout)
           timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
 
-          assert iobj.primary_node == self.pnode.name
+          assert iobj.primary_node == self.pnode.uuid
           disk_results = \
             masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
                                           self.source_x509_ca,
@@ -1389,7 +1397,7 @@ class LUInstanceCreate(LogicalUnit):
         # Run rename script on newly imported instance
         assert iobj.name == instance
         feedback_fn("Running rename script for %s" % instance)
-        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
+        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
                                                    rename_from,
                                                    self.op.debug_level)
         result.Warn("Failed to run rename script for %s on node %s" %
@@ -1402,7 +1410,7 @@ class LUInstanceCreate(LogicalUnit):
       self.cfg.Update(iobj, feedback_fn)
       logging.info("Starting instance %s on node %s", instance, pnode_name)
       feedback_fn("* starting instance...")
-      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
+      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
                                             False, self.op.reason)
       result.Raise("Could not start instance")
 
@@ -1503,24 +1511,25 @@ class LUInstanceRename(LogicalUnit):
                                                      new_file_storage_dir)
       result.Raise("Could not rename on node %s directory '%s' to '%s'"
                    " (but the instance has been renamed in Ganeti)" %
-                   (inst.primary_node, old_file_storage_dir,
-                    new_file_storage_dir))
+                   (self.cfg.GetNodeName(inst.primary_node),
+                    old_file_storage_dir, new_file_storage_dir))
 
     StartInstanceDisks(self, inst, None)
     # update info on disks
     info = GetInstanceInfoText(inst)
     for (idx, disk) in enumerate(inst.disks):
-      for node in inst.all_nodes:
-        self.cfg.SetDiskID(disk, node)
-        result = self.rpc.call_blockdev_setinfo(node, disk, info)
-        result.Warn("Error setting info on node %s for disk %s" % (node, idx),
-                    self.LogWarning)
+      for node_uuid in inst.all_nodes:
+        self.cfg.SetDiskID(disk, node_uuid)
+        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
+        result.Warn("Error setting info on node %s for disk %s" %
+                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
     try:
       result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
                                                  old_name, self.op.debug_level)
       result.Warn("Could not run OS rename script for instance %s on node %s"
                   " (but the instance has been renamed in Ganeti)" %
-                  (inst.name, inst.primary_node), self.LogWarning)
+                  (inst.name, self.cfg.GetNodeName(inst.primary_node)),
+                  self.LogWarning)
     finally:
       ShutdownInstanceDisks(self, inst)
 
@@ -1583,7 +1592,7 @@ class LUInstanceRemove(LogicalUnit):
     """
     instance = self.instance
     logging.info("Shutting down instance %s on node %s",
-                 instance.name, instance.primary_node)
+                 instance.name, self.cfg.GetNodeName(instance.primary_node))
 
     result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
                                              self.op.shutdown_timeout,
@@ -1592,7 +1601,7 @@ class LUInstanceRemove(LogicalUnit):
       result.Warn("Warning: can't shutdown instance", feedback_fn)
     else:
       result.Raise("Could not shutdown instance %s on node %s" %
-                   (instance.name, instance.primary_node))
+                   (instance.name, self.cfg.GetNodeName(instance.primary_node)))
 
     assert (self.owned_locks(locking.LEVEL_NODE) ==
             self.owned_locks(locking.LEVEL_NODE_RES))
@@ -1613,9 +1622,10 @@ class LUInstanceMove(LogicalUnit):
 
   def ExpandNames(self):
     self._ExpandAndLockInstance()
-    target_node = ExpandNodeName(self.cfg, self.op.target_node)
-    self.op.target_node = target_node
-    self.needed_locks[locking.LEVEL_NODE] = [target_node]
+    (self.op.target_node_uuid, self.op.target_node) = \
+      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
+                            self.op.target_node)
+    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
     self.needed_locks[locking.LEVEL_NODE_RES] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
 
@@ -1647,7 +1657,7 @@ class LUInstanceMove(LogicalUnit):
     nl = [
       self.cfg.GetMasterNode(),
       self.instance.primary_node,
-      self.op.target_node,
+      self.op.target_node_uuid,
       ]
     return (nl, nl)
 
@@ -1665,15 +1675,14 @@ class LUInstanceMove(LogicalUnit):
       raise errors.OpPrereqError("Disk template %s not suitable for copying" %
                                  instance.disk_template, errors.ECODE_STATE)
 
-    node = self.cfg.GetNodeInfo(self.op.target_node)
-    assert node is not None, \
+    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
+    assert target_node is not None, \
       "Cannot retrieve locked node %s" % self.op.target_node
 
-    self.target_node = target_node = node.name
-
-    if target_node == instance.primary_node:
+    self.target_node_uuid = target_node.uuid
+    if target_node.uuid == instance.primary_node:
       raise errors.OpPrereqError("Instance %s is already on the node %s" %
-                                 (instance.name, target_node),
+                                 (instance.name, target_node.name),
                                  errors.ECODE_STATE)
 
     bep = self.cfg.GetClusterInfo().FillBE(instance)
@@ -1683,19 +1692,19 @@ class LUInstanceMove(LogicalUnit):
         raise errors.OpPrereqError("Instance disk %d has a complex layout,"
                                    " cannot copy" % idx, errors.ECODE_STATE)
 
-    CheckNodeOnline(self, target_node)
-    CheckNodeNotDrained(self, target_node)
-    CheckNodeVmCapable(self, target_node)
+    CheckNodeOnline(self, target_node.uuid)
+    CheckNodeNotDrained(self, target_node.uuid)
+    CheckNodeVmCapable(self, target_node.uuid)
     cluster = self.cfg.GetClusterInfo()
-    group_info = self.cfg.GetNodeGroup(node.group)
+    group_info = self.cfg.GetNodeGroup(target_node.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
+    CheckTargetNodeIPolicy(self, ipolicy, instance, target_node, self.cfg,
                            ignore=self.op.ignore_ipolicy)
 
     if instance.admin_state == constants.ADMINST_UP:
       # check memory requirements on the secondary node
       CheckNodeFreeMemory(
-          self, target_node, "failing over instance %s" %
+          self, target_node.uuid, "failing over instance %s" %
           instance.name, bep[constants.BE_MAXMEM], instance.hypervisor,
           self.cfg.GetClusterInfo().hvparams[instance.hypervisor])
     else:
@@ -1703,7 +1712,7 @@ class LUInstanceMove(LogicalUnit):
                    " instance will not be started")
 
     # check bridge existance
-    CheckInstanceBridgesExist(self, instance, node=target_node)
+    CheckInstanceBridgesExist(self, instance, node_uuid=target_node.uuid)
 
   def Exec(self, feedback_fn):
     """Move an instance.
@@ -1714,29 +1723,30 @@ class LUInstanceMove(LogicalUnit):
     """
     instance = self.instance
 
-    source_node = instance.primary_node
-    target_node = self.target_node
+    source_node = self.cfg.GetNodeInfo(instance.primary_node)
+    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
 
     self.LogInfo("Shutting down instance %s on source node %s",
-                 instance.name, source_node)
+                 instance.name, source_node.name)
 
     assert (self.owned_locks(locking.LEVEL_NODE) ==
             self.owned_locks(locking.LEVEL_NODE_RES))
 
-    result = self.rpc.call_instance_shutdown(source_node, instance,
+    result = self.rpc.call_instance_shutdown(source_node.uuid, instance,
                                              self.op.shutdown_timeout,
                                              self.op.reason)
     if self.op.ignore_consistency:
       result.Warn("Could not shutdown instance %s on node %s. Proceeding"
                   " anyway. Please make sure node %s is down. Error details" %
-                  (instance.name, source_node, source_node), self.LogWarning)
+                  (instance.name, source_node.name, source_node.name),
+                  self.LogWarning)
     else:
       result.Raise("Could not shutdown instance %s on node %s" %
-                   (instance.name, source_node))
+                   (instance.name, source_node.name))
 
     # create the target disks
     try:
-      CreateDisks(self, instance, target_node=target_node)
+      CreateDisks(self, instance, target_node_uuid=target_node.uuid)
     except errors.OpExecError:
       self.LogWarning("Device creation failed")
       self.cfg.ReleaseDRBDMinors(instance.name)
@@ -1748,16 +1758,17 @@ class LUInstanceMove(LogicalUnit):
     # activate, get path, copy the data over
     for idx, disk in enumerate(instance.disks):
       self.LogInfo("Copying data for disk %d", idx)
-      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
-                                               instance.name, True, idx)
+      result = self.rpc.call_blockdev_assemble(target_node.uuid,
+                                               (disk, instance), instance.name,
+                                               True, idx)
       if result.fail_msg:
         self.LogWarning("Can't assemble newly created disk %d: %s",
                         idx, result.fail_msg)
         errs.append(result.fail_msg)
         break
       dev_path = result.payload
-      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
-                                             target_node, dev_path,
+      result = self.rpc.call_blockdev_export(source_node.uuid, (disk, instance),
+                                             target_node.name, dev_path,
                                              cluster_name)
       if result.fail_msg:
         self.LogWarning("Can't copy data over for disk %d: %s",
@@ -1768,22 +1779,22 @@ class LUInstanceMove(LogicalUnit):
     if errs:
       self.LogWarning("Some disks failed to copy, aborting")
       try:
-        RemoveDisks(self, instance, target_node=target_node)
+        RemoveDisks(self, instance, target_node_uuid=target_node.uuid)
       finally:
         self.cfg.ReleaseDRBDMinors(instance.name)
         raise errors.OpExecError("Errors during disk copy: %s" %
                                  (",".join(errs),))
 
-    instance.primary_node = target_node
+    instance.primary_node = target_node.uuid
     self.cfg.Update(instance, feedback_fn)
 
     self.LogInfo("Removing the disks on the original node")
-    RemoveDisks(self, instance, target_node=source_node)
+    RemoveDisks(self, instance, target_node_uuid=source_node.uuid)
 
     # Only start the instance if it's marked as up
     if instance.admin_state == constants.ADMINST_UP:
       self.LogInfo("Starting instance %s on node %s",
-                   instance.name, target_node)
+                   instance.name, target_node.name)
 
       disks_ok, _ = AssembleInstanceDisks(self, instance,
                                           ignore_secondaries=True)
@@ -1791,14 +1802,14 @@ class LUInstanceMove(LogicalUnit):
         ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Can't activate the instance's disks")
 
-      result = self.rpc.call_instance_start(target_node,
+      result = self.rpc.call_instance_start(target_node.uuid,
                                             (instance, None, None), False,
                                             self.op.reason)
       msg = result.fail_msg
       if msg:
         ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance %s on node %s: %s" %
-                                 (instance.name, target_node, msg))
+                                 (instance.name, target_node.name, msg))
 
 
 class LUInstanceMultiAlloc(NoHooksLU):
@@ -1866,10 +1877,12 @@ class LUInstanceMultiAlloc(NoHooksLU):
     else:
       nodeslist = []
       for inst in self.op.instances:
-        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
+        (inst.pnode_uuid, inst.pnode) = \
+          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
         nodeslist.append(inst.pnode)
         if inst.snode is not None:
-          inst.snode = ExpandNodeName(self.cfg, inst.snode)
+          (inst.snode_uuid, inst.snode) = \
+            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
           nodeslist.append(inst.snode)
 
       self.needed_locks[locking.LEVEL_NODE] = nodeslist
@@ -1887,7 +1900,8 @@ class LUInstanceMultiAlloc(NoHooksLU):
 
     if self.op.opportunistic_locking:
       # Only consider nodes for which a lock is held
-      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
+      node_whitelist = self.cfg.GetNodeNames(
+                         list(self.owned_locks(locking.LEVEL_NODE)))
     else:
       node_whitelist = None
 
@@ -1935,13 +1949,14 @@ class LUInstanceMultiAlloc(NoHooksLU):
     (allocatable, failed) = self.ia_result
 
     jobs = []
-    for (name, nodes) in allocatable:
+    for (name, node_names) in allocatable:
       op = op2inst.pop(name)
 
-      if len(nodes) > 1:
-        (op.pnode, op.snode) = nodes
-      else:
-        (op.pnode,) = nodes
+      (op.pnode_uuid, op.pnode) = \
+        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
+      if len(node_names) > 1:
+        (op.snode_uuid, op.snode) = \
+          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
 
       jobs.append([op])
 
@@ -1982,7 +1997,7 @@ def _PrepareContainerMods(mods, private_fn):
   return [(op, idx, params, fn()) for (op, idx, params) in mods]
 
 
-def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_specs):
+def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
   """Checks if nodes have enough physical CPUs
 
   This function checks if all given nodes have the needed number of
@@ -1992,8 +2007,8 @@ def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_specs):
 
   @type lu: C{LogicalUnit}
   @param lu: a logical unit from which we get configuration data
-  @type nodenames: C{list}
-  @param nodenames: the list of node names to check
+  @type node_uuids: C{list}
+  @param node_uuids: the list of node UUIDs to check
   @type requested: C{int}
   @param requested: the minimum acceptable number of physical CPUs
   @type hypervisor_specs: list of pairs (string, dict of strings)
@@ -2003,20 +2018,21 @@ def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_specs):
       or we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info(nodenames, None, hypervisor_specs, None)
-  for node in nodenames:
-    info = nodeinfo[node]
-    info.Raise("Cannot get current information from node %s" % node,
+  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs, None)
+  for node_uuid in node_uuids:
+    info = nodeinfo[node_uuid]
+    node_name = lu.cfg.GetNodeName(node_uuid)
+    info.Raise("Cannot get current information from node %s" % node_name,
                prereq=True, ecode=errors.ECODE_ENVIRON)
     (_, _, (hv_info, )) = info.payload
     num_cpus = hv_info.get("cpu_total", None)
     if not isinstance(num_cpus, int):
       raise errors.OpPrereqError("Can't compute the number of physical CPUs"
                                  " on node %s, result was '%s'" %
-                                 (node, num_cpus), errors.ECODE_ENVIRON)
+                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
     if requested > num_cpus:
       raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
-                                 "required" % (node, num_cpus, requested),
+                                 "required" % (node_name, num_cpus, requested),
                                  errors.ECODE_NORES)
 
 
@@ -2336,7 +2352,8 @@ class LUInstanceSetParams(LogicalUnit):
                     self._VerifyNicModification)
 
     if self.op.pnode:
-      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
+      (self.op.pnode_uuid, self.op.pnode) = \
+        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
 
   def ExpandNames(self):
     self._ExpandAndLockInstance()
@@ -2359,8 +2376,10 @@ class LUInstanceSetParams(LogicalUnit):
     elif level == locking.LEVEL_NODE:
       self._LockInstancesNodes()
       if self.op.disk_template and self.op.remote_node:
-        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
-        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
+        (self.op.remote_node_uuid, self.op.remote_node) = \
+          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
+                                self.op.remote_node)
+        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
     elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
       # Copy node locks
       self.needed_locks[locking.LEVEL_NODE_RES] = \
@@ -2409,7 +2428,7 @@ class LUInstanceSetParams(LogicalUnit):
     return (nl, nl)
 
   def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
-                              old_params, cluster, pnode):
+                              old_params, cluster, pnode_uuid):
 
     update_params_dict = dict([(key, params[key])
                                for key in constants.NICS_PARAMETERS
@@ -2428,7 +2447,7 @@ class LUInstanceSetParams(LogicalUnit):
       old_net_obj = self.cfg.GetNetwork(old_net_uuid)
 
     if new_net_uuid:
-      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
+      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
       if not netparams:
         raise errors.OpPrereqError("No netparams found for the network"
                                    " %s, probably not connected" %
@@ -2445,9 +2464,10 @@ class LUInstanceSetParams(LogicalUnit):
     new_mode = new_filled_params[constants.NIC_MODE]
     if new_mode == constants.NIC_MODE_BRIDGED:
       bridge = new_filled_params[constants.NIC_LINK]
-      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
+      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
       if msg:
-        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
+        msg = "Error checking bridges on node '%s': %s" % \
+                (self.cfg.GetNodeName(pnode_uuid), msg)
         if self.op.force:
           self.warn.append(msg)
         else:
@@ -2528,7 +2548,7 @@ class LUInstanceSetParams(LogicalUnit):
                                        errors.ECODE_NOTUNIQUE)
         # new network is None so check if new IP is a conflicting IP
         elif self.op.conflicts_check:
-          _CheckForConflictingIp(self, new_ip, pnode)
+          _CheckForConflictingIp(self, new_ip, pnode_uuid)
 
       # release old IP if old network is not None
       if old_ip and old_net_uuid:
@@ -2552,7 +2572,7 @@ class LUInstanceSetParams(LogicalUnit):
     """CheckPrereq checks related to a new disk template."""
     # Arguments are passed to avoid configuration lookups
     instance = self.instance
-    pnode = instance.primary_node
+    pnode_uuid = instance.primary_node
     cluster = self.cluster
     if instance.disk_template == self.op.disk_template:
       raise errors.OpPrereqError("Instance already has disk template %s" %
@@ -2567,21 +2587,21 @@ class LUInstanceSetParams(LogicalUnit):
     CheckInstanceState(self, instance, INSTANCE_DOWN,
                        msg="cannot change disk template")
     if self.op.disk_template in constants.DTS_INT_MIRROR:
-      if self.op.remote_node == pnode:
+      if self.op.remote_node_uuid == pnode_uuid:
         raise errors.OpPrereqError("Given new secondary node %s is the same"
                                    " as the primary node of the instance" %
                                    self.op.remote_node, errors.ECODE_STATE)
-      CheckNodeOnline(self, self.op.remote_node)
-      CheckNodeNotDrained(self, self.op.remote_node)
+      CheckNodeOnline(self, self.op.remote_node_uuid)
+      CheckNodeNotDrained(self, self.op.remote_node_uuid)
       # FIXME: here we assume that the old instance type is DT_PLAIN
       assert instance.disk_template == constants.DT_PLAIN
       disks = [{constants.IDISK_SIZE: d.size,
                 constants.IDISK_VG: d.logical_id[0]}
                for d in instance.disks]
       required = ComputeDiskSizePerVG(self.op.disk_template, disks)
-      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
+      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
 
-      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
+      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
       snode_group = self.cfg.GetNodeGroup(snode_info.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               snode_group)
@@ -2617,7 +2637,7 @@ class LUInstanceSetParams(LogicalUnit):
     self.diskparams = self.cfg.GetInstanceDiskParams(instance)
 
     excl_stor = compat.any(
-      rpc.GetExclusiveStorageForNodeNames(self.cfg, instance.all_nodes).values()
+      rpc.GetExclusiveStorageForNodes(self.cfg, instance.all_nodes).values()
       )
 
     # Check disk modifications. This is done here and not in CheckArguments
@@ -2697,25 +2717,26 @@ class LUInstanceSetParams(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
-    pnode = instance.primary_node
+    pnode_uuid = instance.primary_node
 
     self.warn = []
 
-    if (self.op.pnode is not None and self.op.pnode != pnode and
+    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
         not self.op.force):
       # verify that the instance is not up
       instance_info = self.rpc.call_instance_info(
-          pnode, instance.name, instance.hypervisor, instance.hvparams)
+          pnode_uuid, instance.name, instance.hypervisor, instance.hvparams)
       if instance_info.fail_msg:
         self.warn.append("Can't get instance runtime information: %s" %
                          instance_info.fail_msg)
       elif instance_info.payload:
-        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
+        raise errors.OpPrereqError("Instance is still running on %s" %
+                                   self.cfg.GetNodeName(pnode_uuid),
                                    errors.ECODE_STATE)
 
-    assert pnode in self.owned_locks(locking.LEVEL_NODE)
-    nodelist = list(instance.all_nodes)
-    pnode_info = self.cfg.GetNodeInfo(pnode)
+    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
+    node_uuids = list(instance.all_nodes)
+    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
 
     #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
     assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
@@ -2752,7 +2773,7 @@ class LUInstanceSetParams(LogicalUnit):
 
       # local check
       hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
-      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
+      CheckHVParams(self, node_uuids, instance.hypervisor, hv_new)
       self.hv_proposed = self.hv_new = hv_new # the new actual values
       self.hv_inst = i_hvdict # the new dict (without defaults)
     else:
@@ -2807,7 +2828,7 @@ class LUInstanceSetParams(LogicalUnit):
     # osparams processing
     if self.op.osparams:
       i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
-      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
+      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
       self.os_inst = i_osdict # the new dict (without defaults)
     else:
       self.os_inst = {}
@@ -2815,26 +2836,27 @@ class LUInstanceSetParams(LogicalUnit):
     #TODO(dynmem): do the appropriate check involving MINMEM
     if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
         be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
-      mem_check_list = [pnode]
+      mem_check_list = [pnode_uuid]
       if be_new[constants.BE_AUTO_BALANCE]:
         # either we changed auto_balance to yes or it was from before
         mem_check_list.extend(instance.secondary_nodes)
       instance_info = self.rpc.call_instance_info(
-          pnode, instance.name, instance.hypervisor, instance.hvparams)
+          pnode_uuid, instance.name, instance.hypervisor, instance.hvparams)
       hvspecs = [(instance.hypervisor, cluster.hvparams[instance.hypervisor])]
       nodeinfo = self.rpc.call_node_info(mem_check_list, None,
                                          hvspecs, False)
-      pninfo = nodeinfo[pnode]
+      pninfo = nodeinfo[pnode_uuid]
       msg = pninfo.fail_msg
       if msg:
         # Assume the primary node is unreachable and go ahead
         self.warn.append("Can't get info from primary node %s: %s" %
-                         (pnode, msg))
+                         (self.cfg.GetNodeName(pnode_uuid), msg))
       else:
         (_, _, (pnhvinfo, )) = pninfo.payload
         if not isinstance(pnhvinfo.get("memory_free", None), int):
           self.warn.append("Node data from primary node %s doesn't contain"
-                           " free memory information" % pnode)
+                           " free memory information" %
+                           self.cfg.GetNodeName(pnode_uuid))
         elif instance_info.fail_msg:
           self.warn.append("Can't get instance runtime information: %s" %
                            instance_info.fail_msg)
@@ -2857,28 +2879,32 @@ class LUInstanceSetParams(LogicalUnit):
                                        miss_mem, errors.ECODE_NORES)
 
       if be_new[constants.BE_AUTO_BALANCE]:
-        for node, nres in nodeinfo.items():
-          if node not in instance.secondary_nodes:
+        for node_uuid, nres in nodeinfo.items():
+          if node_uuid not in instance.secondary_nodes:
             continue
-          nres.Raise("Can't get info from secondary node %s" % node,
-                     prereq=True, ecode=errors.ECODE_STATE)
+          nres.Raise("Can't get info from secondary node %s" %
+                     self.cfg.GetNodeName(node_uuid), prereq=True,
+                     ecode=errors.ECODE_STATE)
           (_, _, (nhvinfo, )) = nres.payload
           if not isinstance(nhvinfo.get("memory_free", None), int):
             raise errors.OpPrereqError("Secondary node %s didn't return free"
-                                       " memory information" % node,
+                                       " memory information" %
+                                       self.cfg.GetNodeName(node_uuid),
                                        errors.ECODE_STATE)
           #TODO(dynmem): do the appropriate check involving MINMEM
           elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
             raise errors.OpPrereqError("This change will prevent the instance"
                                        " from failover to its secondary node"
-                                       " %s, due to not enough memory" % node,
+                                       " %s, due to not enough memory" %
+                                       self.cfg.GetNodeName(node_uuid),
                                        errors.ECODE_STATE)
 
     if self.op.runtime_mem:
       remote_info = self.rpc.call_instance_info(
          instance.primary_node, instance.name, instance.hypervisor,
          instance.hvparams)
-      remote_info.Raise("Error checking node %s" % instance.primary_node)
+      remote_info.Raise("Error checking node %s" %
+                        self.cfg.GetNodeName(instance.primary_node))
       if not remote_info.payload: # not running already
         raise errors.OpPrereqError("Instance %s is not running" %
                                    instance.name, errors.ECODE_STATE)
@@ -2904,12 +2930,12 @@ class LUInstanceSetParams(LogicalUnit):
 
     def _PrepareNicCreate(_, params, private):
       self._PrepareNicModification(params, private, None, None,
-                                   {}, cluster, pnode)
+                                   {}, cluster, pnode_uuid)
       return (None, None)
 
     def _PrepareNicMod(_, nic, params, private):
       self._PrepareNicModification(params, private, nic.ip, nic.network,
-                                   nic.nicparams, cluster, pnode)
+                                   nic.nicparams, cluster, pnode_uuid)
       return None
 
     def _PrepareNicRemove(_, params, __):
@@ -2982,8 +3008,8 @@ class LUInstanceSetParams(LogicalUnit):
     """
     feedback_fn("Converting template to drbd")
     instance = self.instance
-    pnode = instance.primary_node
-    snode = self.op.remote_node
+    pnode_uuid = instance.primary_node
+    snode_uuid = self.op.remote_node_uuid
 
     assert instance.disk_template == constants.DT_PLAIN
 
@@ -2993,47 +3019,48 @@ class LUInstanceSetParams(LogicalUnit):
                   constants.IDISK_NAME: d.name}
                  for d in instance.disks]
     new_disks = GenerateDiskTemplate(self, self.op.disk_template,
-                                     instance.name, pnode, [snode],
+                                     instance.name, pnode_uuid, [snode_uuid],
                                      disk_info, None, None, 0, feedback_fn,
                                      self.diskparams)
     anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
                                         self.diskparams)
-    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
-    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
+    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
+    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
     info = GetInstanceInfoText(instance)
     feedback_fn("Creating additional volumes...")
     # first, create the missing data and meta devices
     for disk in anno_disks:
       # unfortunately this is... not too nice
-      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
+      CreateSingleBlockDev(self, pnode_uuid, instance, disk.children[1],
                            info, True, p_excl_stor)
       for child in disk.children:
-        CreateSingleBlockDev(self, snode, instance, child, info, True,
+        CreateSingleBlockDev(self, snode_uuid, instance, child, info, True,
                              s_excl_stor)
     # at this stage, all new LVs have been created, we can rename the
     # old ones
     feedback_fn("Renaming original volumes...")
     rename_list = [(o, n.children[0].logical_id)
                    for (o, n) in zip(instance.disks, new_disks)]
-    result = self.rpc.call_blockdev_rename(pnode, rename_list)
+    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
     result.Raise("Failed to rename original LVs")
 
     feedback_fn("Initializing DRBD devices...")
     # all child devices are in place, we can now create the DRBD devices
     try:
       for disk in anno_disks:
-        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
-          f_create = node == pnode
-          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
+        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
+                                       (snode_uuid, s_excl_stor)]:
+          f_create = node_uuid == pnode_uuid
+          CreateSingleBlockDev(self, node_uuid, instance, disk, info, f_create,
                                excl_stor)
     except errors.GenericError, e:
       feedback_fn("Initializing of DRBD devices failed;"
                   " renaming back original volumes...")
       for disk in new_disks:
-        self.cfg.SetDiskID(disk, pnode)
+        self.cfg.SetDiskID(disk, pnode_uuid)
       rename_back_list = [(n.children[0], o.logical_id)
                           for (n, o) in zip(new_disks, instance.disks)]
-      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
+      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
       result.Raise("Failed to rename LVs back after error %s" % str(e))
       raise
 
@@ -3063,8 +3090,8 @@ class LUInstanceSetParams(LogicalUnit):
     assert len(instance.secondary_nodes) == 1
     assert instance.disk_template == constants.DT_DRBD8
 
-    pnode = instance.primary_node
-    snode = instance.secondary_nodes[0]
+    pnode_uuid = instance.primary_node
+    snode_uuid = instance.secondary_nodes[0]
     feedback_fn("Converting template to plain")
 
     old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
@@ -3093,20 +3120,22 @@ class LUInstanceSetParams(LogicalUnit):
 
     feedback_fn("Removing volumes on the secondary node...")
     for disk in old_disks:
-      self.cfg.SetDiskID(disk, snode)
-      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
+      self.cfg.SetDiskID(disk, snode_uuid)
+      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
       if msg:
         self.LogWarning("Could not remove block device %s on node %s,"
-                        " continuing anyway: %s", disk.iv_name, snode, msg)
+                        " continuing anyway: %s", disk.iv_name,
+                        self.cfg.GetNodeName(snode_uuid), msg)
 
     feedback_fn("Removing unneeded volumes on the primary node...")
     for idx, disk in enumerate(old_disks):
       meta = disk.children[1]
-      self.cfg.SetDiskID(meta, pnode)
-      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
+      self.cfg.SetDiskID(meta, pnode_uuid)
+      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
       if msg:
         self.LogWarning("Could not remove metadata for disk %d on node %s,"
-                        " continuing anyway: %s", idx, pnode, msg)
+                        " continuing anyway: %s", idx,
+                        self.cfg.GetNodeName(pnode_uuid), msg)
 
   def _CreateNewDisk(self, idx, params, _):
     """Creates a new disk.
@@ -3161,12 +3190,14 @@ class LUInstanceSetParams(LogicalUnit):
 
     """
     (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
-    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
-      self.cfg.SetDiskID(disk, node)
-      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
+    for node_uuid, disk in anno_disk.ComputeNodeTree(
+                             self.instance.primary_node):
+      self.cfg.SetDiskID(disk, node_uuid)
+      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
       if msg:
         self.LogWarning("Could not remove disk/%d on node '%s': %s,"
-                        " continuing anyway", idx, node, msg)
+                        " continuing anyway", idx,
+                        self.cfg.GetNodeName(node_uuid), msg)
 
     # if this is a DRBD disk, return its port to the pool
     if root.dev_type in constants.LDS_DRBD:
@@ -3240,8 +3271,8 @@ class LUInstanceSetParams(LogicalUnit):
     instance = self.instance
 
     # New primary node
-    if self.op.pnode:
-      instance.primary_node = self.op.pnode
+    if self.op.pnode_uuid:
+      instance.primary_node = self.op.pnode_uuid
 
     # runtime memory
     if self.op.runtime_mem:
@@ -3260,8 +3291,8 @@ class LUInstanceSetParams(LogicalUnit):
     if self.op.disk_template:
       if __debug__:
         check_nodes = set(instance.all_nodes)
-        if self.op.remote_node:
-          check_nodes.add(self.op.remote_node)
+        if self.op.remote_node_uuid:
+          check_nodes.add(self.op.remote_node_uuid)
         for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
           owned = self.owned_locks(level)
           assert not (check_nodes - owned), \
@@ -3392,9 +3423,9 @@ class LUInstanceChangeGroup(LogicalUnit):
         # Lock all nodes in all potential target groups
         lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
                        self.cfg.GetInstanceNodeGroups(self.op.instance_name))
-        member_nodes = [node_name
+        member_nodes = [node_uuid
                         for group in lock_groups
-                        for node_name in self.cfg.GetNodeGroup(group).members]
+                        for node_uuid in self.cfg.GetNodeGroup(group).members]
         self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
       else:
         # Lock all nodes as all groups are potential targets
index 7e47eee..dbae8c0 100644 (file)
@@ -31,7 +31,7 @@ from ganeti.masterd import iallocator
 from ganeti import utils
 from ganeti.cmdlib.base import LogicalUnit, Tasklet
 from ganeti.cmdlib.common import ExpandInstanceName, \
-  CheckIAllocatorOrNode, ExpandNodeName
+  CheckIAllocatorOrNode, ExpandNodeUuidAndName
 from ganeti.cmdlib.instance_storage import CheckDiskConsistency, \
   ExpandCheckDisks, ShutdownInstanceDisks, AssembleInstanceDisks
 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
@@ -48,7 +48,8 @@ def _ExpandNamesForMigration(lu):
 
   """
   if lu.op.target_node is not None:
-    lu.op.target_node = ExpandNodeName(lu.cfg, lu.op.target_node)
+    (lu.op.target_node_uuid, lu.op.target_node) = \
+      ExpandNodeUuidAndName(lu.cfg, lu.op.target_node_uuid, lu.op.target_node)
 
   lu.needed_locks[locking.LEVEL_NODE] = []
   lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
@@ -81,7 +82,7 @@ def _DeclareLocksForMigration(lu, level):
         lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
       else:
         lu.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
-                                               lu.op.target_node]
+                                               lu.op.target_node_uuid]
       del lu.recalculate_locks[locking.LEVEL_NODE]
     else:
       lu._LockInstancesNodes() # pylint: disable=W0212
@@ -133,18 +134,17 @@ class LUInstanceFailover(LogicalUnit):
 
     """
     instance = self._migrater.instance
-    source_node = instance.primary_node
-    target_node = self.op.target_node
+    source_node_uuid = instance.primary_node
     env = {
       "IGNORE_CONSISTENCY": self.op.ignore_consistency,
       "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
-      "OLD_PRIMARY": source_node,
-      "NEW_PRIMARY": target_node,
+      "OLD_PRIMARY": self.cfg.GetNodeName(source_node_uuid),
+      "NEW_PRIMARY": self.op.target_node,
       }
 
     if instance.disk_template in constants.DTS_INT_MIRROR:
-      env["OLD_SECONDARY"] = instance.secondary_nodes[0]
-      env["NEW_SECONDARY"] = source_node
+      env["OLD_SECONDARY"] = self.cfg.GetNodeName(instance.secondary_nodes[0])
+      env["NEW_SECONDARY"] = self.cfg.GetNodeName(source_node_uuid)
     else:
       env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
 
@@ -195,20 +195,19 @@ class LUInstanceMigrate(LogicalUnit):
 
     """
     instance = self._migrater.instance
-    source_node = instance.primary_node
-    target_node = self.op.target_node
+    source_node_uuid = instance.primary_node
     env = BuildInstanceHookEnvByObject(self, instance)
     env.update({
       "MIGRATE_LIVE": self._migrater.live,
       "MIGRATE_CLEANUP": self.op.cleanup,
-      "OLD_PRIMARY": source_node,
-      "NEW_PRIMARY": target_node,
+      "OLD_PRIMARY": self.cfg.GetNodeName(source_node_uuid),
+      "NEW_PRIMARY": self.op.target_node,
       "ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
       })
 
     if instance.disk_template in constants.DTS_INT_MIRROR:
-      env["OLD_SECONDARY"] = target_node
-      env["NEW_SECONDARY"] = source_node
+      env["OLD_SECONDARY"] = self.cfg.GetNodeName(instance.secondary_nodes[0])
+      env["NEW_SECONDARY"] = self.cfg.GetNodeName(source_node_uuid)
     else:
       env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
 
@@ -219,8 +218,8 @@ class LUInstanceMigrate(LogicalUnit):
 
     """
     instance = self._migrater.instance
-    snodes = list(instance.secondary_nodes)
-    nl = [self.cfg.GetMasterNode(), instance.primary_node] + snodes
+    snode_uuids = list(instance.secondary_nodes)
+    nl = [self.cfg.GetMasterNode(), instance.primary_node] + snode_uuids
     return (nl, nl)
 
 
@@ -234,8 +233,9 @@ class TLMigrateInstance(Tasklet):
   @ivar cleanup: Wheater we cleanup from a failed migration
   @type iallocator: string
   @ivar iallocator: The iallocator used to determine target_node
-  @type target_node: string
-  @ivar target_node: If given, the target_node to reallocate the instance to
+  @type target_node_uuid: string
+  @ivar target_node_uuid: If given, the target node UUID to reallocate the
+      instance to
   @type failover: boolean
   @ivar failover: Whether operation results in failover or migration
   @type fallback: boolean
@@ -309,12 +309,12 @@ class TLMigrateInstance(Tasklet):
         assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
         self._RunAllocator()
       else:
-        # We set set self.target_node as it is required by
+        # We set set self.target_node_uuid as it is required by
         # BuildHooksEnv
-        self.target_node = self.lu.op.target_node
+        self.target_node_uuid = self.lu.op.target_node_uuid
 
       # Check that the target node is correct in terms of instance policy
-      nodeinfo = self.cfg.GetNodeInfo(self.target_node)
+      nodeinfo = self.cfg.GetNodeInfo(self.target_node_uuid)
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
@@ -323,31 +323,32 @@ class TLMigrateInstance(Tasklet):
 
       # self.target_node is already populated, either directly or by the
       # iallocator run
-      target_node = self.target_node
-      if self.target_node == instance.primary_node:
-        raise errors.OpPrereqError("Cannot migrate instance %s"
-                                   " to its primary (%s)" %
-                                   (instance.name, instance.primary_node),
-                                   errors.ECODE_STATE)
+      target_node_uuid = self.target_node_uuid
+      if self.target_node_uuid == instance.primary_node:
+        raise errors.OpPrereqError(
+          "Cannot migrate instance %s to its primary (%s)" %
+          (instance.name, self.cfg.GetNodeName(instance.primary_node)),
+          errors.ECODE_STATE)
 
       if len(self.lu.tasklets) == 1:
         # It is safe to release locks only when we're the only tasklet
         # in the LU
         ReleaseLocks(self.lu, locking.LEVEL_NODE,
-                     keep=[instance.primary_node, self.target_node])
+                     keep=[instance.primary_node, self.target_node_uuid])
         ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
 
     else:
       assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
 
-      secondary_nodes = instance.secondary_nodes
-      if not secondary_nodes:
+      secondary_node_uuids = instance.secondary_nodes
+      if not secondary_node_uuids:
         raise errors.ConfigurationError("No secondary node but using"
                                         " %s disk template" %
                                         instance.disk_template)
-      target_node = secondary_nodes[0]
-      if self.lu.op.iallocator or (self.lu.op.target_node and
-                                   self.lu.op.target_node != target_node):
+      target_node_uuid = secondary_node_uuids[0]
+      if self.lu.op.iallocator or \
+        (self.lu.op.target_node_uuid and
+         self.lu.op.target_node_uuid != target_node_uuid):
         if self.failover:
           text = "failed over"
         else:
@@ -358,7 +359,7 @@ class TLMigrateInstance(Tasklet):
                                    " node can be passed)" %
                                    (instance.disk_template, text),
                                    errors.ECODE_INVAL)
-      nodeinfo = self.cfg.GetNodeInfo(target_node)
+      nodeinfo = self.cfg.GetNodeInfo(target_node_uuid)
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
@@ -371,7 +372,7 @@ class TLMigrateInstance(Tasklet):
     if (not self.cleanup and
          (not self.failover or instance.admin_state == constants.ADMINST_UP)):
       self.tgt_free_mem = CheckNodeFreeMemory(
-          self.lu, target_node, "migrating instance %s" % instance.name,
+          self.lu, target_node_uuid, "migrating instance %s" % instance.name,
           i_be[constants.BE_MINMEM], instance.hypervisor,
           self.cfg.GetClusterInfo().hvparams[instance.hypervisor])
     else:
@@ -386,10 +387,10 @@ class TLMigrateInstance(Tasklet):
       self.failover = True
 
     # check bridge existance
-    CheckInstanceBridgesExist(self.lu, instance, node=target_node)
+    CheckInstanceBridgesExist(self.lu, instance, node_uuid=target_node_uuid)
 
     if not self.cleanup:
-      CheckNodeNotDrained(self.lu, target_node)
+      CheckNodeNotDrained(self.lu, target_node_uuid)
       if not self.failover:
         result = self.rpc.call_instance_migratable(instance.primary_node,
                                                    instance)
@@ -431,7 +432,7 @@ class TLMigrateInstance(Tasklet):
           instance.primary_node, instance.name, instance.hypervisor,
           cluster.hvparams[instance.hypervisor])
       remote_info.Raise("Error checking instance on node %s" %
-                        instance.primary_node)
+                        self.cfg.GetNodeName(instance.primary_node))
       instance_running = bool(remote_info.payload)
       if instance_running:
         self.current_mem = int(remote_info.payload["memory"])
@@ -443,8 +444,9 @@ class TLMigrateInstance(Tasklet):
     assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
 
     # FIXME: add a self.ignore_ipolicy option
-    req = iallocator.IAReqRelocate(name=self.instance_name,
-                                   relocate_from=[self.instance.primary_node])
+    req = iallocator.IAReqRelocate(
+          name=self.instance_name,
+          relocate_from_node_uuids=[self.instance.primary_node])
     ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
     ial.Run(self.lu.op.iallocator)
@@ -454,7 +456,7 @@ class TLMigrateInstance(Tasklet):
                                  " iallocator '%s': %s" %
                                  (self.lu.op.iallocator, ial.info),
                                  errors.ECODE_NORES)
-    self.target_node = ial.result[0]
+    self.target_node_uuid = self.cfg.GetNodeInfoByName(ial.result[0]).uuid
     self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
                     self.instance_name, self.lu.op.iallocator,
                     utils.CommaJoin(ial.result))
@@ -469,13 +471,14 @@ class TLMigrateInstance(Tasklet):
     all_done = False
     while not all_done:
       all_done = True
-      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
+      result = self.rpc.call_drbd_wait_sync(self.all_node_uuids,
                                             self.nodes_ip,
                                             (self.instance.disks,
                                              self.instance))
       min_percent = 100
-      for node, nres in result.items():
-        nres.Raise("Cannot resync disks on node %s" % node)
+      for node_uuid, nres in result.items():
+        nres.Raise("Cannot resync disks on node %s" %
+                   self.cfg.GetNodeName(node_uuid))
         node_done, node_percent = nres.payload
         all_done = all_done and node_done
         if node_percent is not None:
@@ -485,28 +488,32 @@ class TLMigrateInstance(Tasklet):
           self.feedback_fn("   - progress: %.1f%%" % min_percent)
         time.sleep(2)
 
-  def _EnsureSecondary(self, node):
+  def _EnsureSecondary(self, node_uuid):
     """Demote a node to secondary.
 
     """
-    self.feedback_fn("* switching node %s to secondary mode" % node)
+    self.feedback_fn("* switching node %s to secondary mode" %
+                     self.cfg.GetNodeName(node_uuid))
 
     for dev in self.instance.disks:
-      self.cfg.SetDiskID(dev, node)
+      self.cfg.SetDiskID(dev, node_uuid)
 
-    result = self.rpc.call_blockdev_close(node, self.instance.name,
+    result = self.rpc.call_blockdev_close(node_uuid, self.instance.name,
                                           self.instance.disks)
-    result.Raise("Cannot change disk to secondary on node %s" % node)
+    result.Raise("Cannot change disk to secondary on node %s" %
+                 self.cfg.GetNodeName(node_uuid))
 
   def _GoStandalone(self):
     """Disconnect from the network.
 
     """
     self.feedback_fn("* changing into standalone mode")
-    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
+    result = self.rpc.call_drbd_disconnect_net(self.all_node_uuids,
+                                               self.nodes_ip,
                                                self.instance.disks)
-    for node, nres in result.items():
-      nres.Raise("Cannot disconnect disks node %s" % node)
+    for node_uuid, nres in result.items():
+      nres.Raise("Cannot disconnect disks node %s" %
+                 self.cfg.GetNodeName(node_uuid))
 
   def _GoReconnect(self, multimaster):
     """Reconnect to the network.
@@ -517,11 +524,12 @@ class TLMigrateInstance(Tasklet):
     else:
       msg = "single-master"
     self.feedback_fn("* changing disks into %s mode" % msg)
-    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
+    result = self.rpc.call_drbd_attach_net(self.all_node_uuids, self.nodes_ip,
                                            (self.instance.disks, self.instance),
                                            self.instance.name, multimaster)
-    for node, nres in result.items():
-      nres.Raise("Cannot change disks config on node %s" % node)
+    for node_uuid, nres in result.items():
+      nres.Raise("Cannot change disks config on node %s" %
+                 self.cfg.GetNodeName(node_uuid))
 
   def _ExecCleanup(self):
     """Try to cleanup after a failed migration.
@@ -537,21 +545,22 @@ class TLMigrateInstance(Tasklet):
 
     """
     instance = self.instance
-    target_node = self.target_node
-    source_node = self.source_node
+    target_node_uuid = self.target_node_uuid
+    source_node_uuid = self.source_node_uuid
 
     # check running on only one node
     self.feedback_fn("* checking where the instance actually runs"
                      " (if this hangs, the hypervisor might be in"
                      " a bad state)")
     cluster_hvparams = self.cfg.GetClusterInfo().hvparams
-    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor],
+    ins_l = self.rpc.call_instance_list(self.all_node_uuids,
+                                        [instance.hypervisor],
                                         cluster_hvparams)
-    for node, result in ins_l.items():
-      result.Raise("Can't contact node %s" % node)
+    for node_uuid, result in ins_l.items():
+      result.Raise("Can't contact node %s" % node_uuid)
 
-    runningon_source = instance.name in ins_l[source_node].payload
-    runningon_target = instance.name in ins_l[target_node].payload
+    runningon_source = instance.name in ins_l[source_node_uuid].payload
+    runningon_target = instance.name in ins_l[target_node_uuid].payload
 
     if runningon_source and runningon_target:
       raise errors.OpExecError("Instance seems to be running on two nodes,"
@@ -568,17 +577,19 @@ class TLMigrateInstance(Tasklet):
     if runningon_target:
       # the migration has actually succeeded, we need to update the config
       self.feedback_fn("* instance running on secondary node (%s),"
-                       " updating config" % target_node)
-      instance.primary_node = target_node
+                       " updating config" %
+                       self.cfg.GetNodeName(target_node_uuid))
+      instance.primary_node = target_node_uuid
       self.cfg.Update(instance, self.feedback_fn)
-      demoted_node = source_node
+      demoted_node_uuid = source_node_uuid
     else:
       self.feedback_fn("* instance confirmed to be running on its"
-                       " primary node (%s)" % source_node)
-      demoted_node = target_node
+                       " primary node (%s)" %
+                       self.cfg.GetNodeName(source_node_uuid))
+      demoted_node_uuid = target_node_uuid
 
     if instance.disk_template in constants.DTS_INT_MIRROR:
-      self._EnsureSecondary(demoted_node)
+      self._EnsureSecondary(demoted_node_uuid)
       try:
         self._WaitUntilSync()
       except errors.OpExecError:
@@ -595,12 +606,11 @@ class TLMigrateInstance(Tasklet):
     """Try to revert the disk status after a failed migration.
 
     """
-    target_node = self.target_node
     if self.instance.disk_template in constants.DTS_EXT_MIRROR:
       return
 
     try:
-      self._EnsureSecondary(target_node)
+      self._EnsureSecondary(self.target_node_uuid)
       self._GoStandalone()
       self._GoReconnect(False)
       self._WaitUntilSync()
@@ -614,27 +624,23 @@ class TLMigrateInstance(Tasklet):
 
     """
     instance = self.instance
-    target_node = self.target_node
-    source_node = self.source_node
     migration_info = self.migration_info
 
-    abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
-                                                                 instance,
-                                                                 migration_info,
-                                                                 False)
+    abort_result = self.rpc.call_instance_finalize_migration_dst(
+                     self.target_node_uuid, instance, migration_info, False)
     abort_msg = abort_result.fail_msg
     if abort_msg:
       logging.error("Aborting migration failed on target node %s: %s",
-                    target_node, abort_msg)
+                    self.cfg.GetNodeName(self.target_node_uuid), abort_msg)
       # Don't raise an exception here, as we stil have to try to revert the
       # disk status, even if this step failed.
 
     abort_result = self.rpc.call_instance_finalize_migration_src(
-      source_node, instance, False, self.live)
+      self.source_node_uuid, instance, False, self.live)
     abort_msg = abort_result.fail_msg
     if abort_msg:
       logging.error("Aborting migration failed on source node %s: %s",
-                    source_node, abort_msg)
+                    self.cfg.GetNodeName(self.source_node_uuid), abort_msg)
 
   def _ExecMigration(self):
     """Migrate an instance.
@@ -649,19 +655,19 @@ class TLMigrateInstance(Tasklet):
 
     """
     instance = self.instance
-    target_node = self.target_node
-    source_node = self.source_node
+    target_node_uuid = self.target_node_uuid
+    source_node_uuid = self.source_node_uuid
 
     # Check for hypervisor version mismatch and warn the user.
     hvspecs = [(instance.hypervisor,
                 self.cfg.GetClusterInfo().hvparams[instance.hypervisor])]
-    nodeinfo = self.rpc.call_node_info([source_node, target_node],
+    nodeinfo = self.rpc.call_node_info([source_node_uuid, target_node_uuid],
                                        None, hvspecs, False)
     for ninfo in nodeinfo.values():
       ninfo.Raise("Unable to retrieve node information from node '%s'" %
                   ninfo.node)
-    (_, _, (src_info, )) = nodeinfo[source_node].payload
-    (_, _, (dst_info, )) = nodeinfo[target_node].payload
+    (_, _, (src_info, )) = nodeinfo[source_node_uuid].payload
+    (_, _, (dst_info, )) = nodeinfo[target_node_uuid].payload
 
     if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
         (constants.HV_NODEINFO_KEY_VERSION in dst_info)):
@@ -674,7 +680,8 @@ class TLMigrateInstance(Tasklet):
 
     self.feedback_fn("* checking disk consistency between source and target")
     for (idx, dev) in enumerate(instance.disks):
-      if not CheckDiskConsistency(self.lu, instance, dev, target_node, False):
+      if not CheckDiskConsistency(self.lu, instance, dev, target_node_uuid,
+                                  False):
         raise errors.OpExecError("Disk %s is degraded or not fully"
                                  " synchronized on target node,"
                                  " aborting migration" % idx)
@@ -684,7 +691,8 @@ class TLMigrateInstance(Tasklet):
         raise errors.OpExecError("Memory ballooning not allowed and not enough"
                                  " free memory to fit instance %s on target"
                                  " node %s (have %dMB, need %dMB)" %
-                                 (instance.name, target_node,
+                                 (instance.name,
+                                  self.cfg.GetNodeName(target_node_uuid),
                                   self.tgt_free_mem, self.current_mem))
       self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
       rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
@@ -693,11 +701,11 @@ class TLMigrateInstance(Tasklet):
       rpcres.Raise("Cannot modify instance runtime memory")
 
     # First get the migration information from the remote node
-    result = self.rpc.call_migration_info(source_node, instance)
+    result = self.rpc.call_migration_info(source_node_uuid, instance)
     msg = result.fail_msg
     if msg:
       log_err = ("Failed fetching source migration information from %s: %s" %
-                 (source_node, msg))
+                 (self.cfg.GetNodeName(source_node_uuid), msg))
       logging.error(log_err)
       raise errors.OpExecError(log_err)
 
@@ -705,16 +713,17 @@ class TLMigrateInstance(Tasklet):
 
     if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
       # Then switch the disks to master/master mode
-      self._EnsureSecondary(target_node)
+      self._EnsureSecondary(target_node_uuid)
       self._GoStandalone()
       self._GoReconnect(True)
       self._WaitUntilSync()
 
-    self.feedback_fn("* preparing %s to accept the instance" % target_node)
-    result = self.rpc.call_accept_instance(target_node,
+    self.feedback_fn("* preparing %s to accept the instance" %
+                     self.cfg.GetNodeName(target_node_uuid))
+    result = self.rpc.call_accept_instance(target_node_uuid,
                                            instance,
                                            migration_info,
-                                           self.nodes_ip[target_node])
+                                           self.nodes_ip[target_node_uuid])
 
     msg = result.fail_msg
     if msg:
@@ -726,11 +735,12 @@ class TLMigrateInstance(Tasklet):
       raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
                                (instance.name, msg))
 
-    self.feedback_fn("* migrating instance to %s" % target_node)
+    self.feedback_fn("* migrating instance to %s" %
+                     self.cfg.GetNodeName(target_node_uuid))
     cluster = self.cfg.GetClusterInfo()
     result = self.rpc.call_instance_migrate(
-        source_node, cluster.cluster_name, instance, self.nodes_ip[target_node],
-        self.live)
+        source_node_uuid, cluster.cluster_name, instance,
+        self.nodes_ip[target_node_uuid], self.live)
     msg = result.fail_msg
     if msg:
       logging.error("Instance migration failed, trying to revert"
@@ -744,7 +754,7 @@ class TLMigrateInstance(Tasklet):
     self.feedback_fn("* starting memory transfer")
     last_feedback = time.time()
     while True:
-      result = self.rpc.call_instance_get_migration_status(source_node,
+      result = self.rpc.call_instance_get_migration_status(source_node_uuid,
                                                            instance)
       msg = result.fail_msg
       ms = result.payload   # MigrationStatus instance
@@ -772,7 +782,7 @@ class TLMigrateInstance(Tasklet):
 
       time.sleep(self._MIGRATION_POLL_INTERVAL)
 
-    result = self.rpc.call_instance_finalize_migration_src(source_node,
+    result = self.rpc.call_instance_finalize_migration_src(source_node_uuid,
                                                            instance,
                                                            True,
                                                            self.live)
@@ -783,12 +793,12 @@ class TLMigrateInstance(Tasklet):
       raise errors.OpExecError("Could not finalize instance migration: %s" %
                                msg)
 
-    instance.primary_node = target_node
+    instance.primary_node = target_node_uuid
 
     # distribute new instance config to the other nodes
     self.cfg.Update(instance, self.feedback_fn)
 
-    result = self.rpc.call_instance_finalize_migration_dst(target_node,
+    result = self.rpc.call_instance_finalize_migration_dst(target_node_uuid,
                                                            instance,
                                                            migration_info,
                                                            True)
@@ -800,7 +810,7 @@ class TLMigrateInstance(Tasklet):
                                msg)
 
     if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
-      self._EnsureSecondary(source_node)
+      self._EnsureSecondary(source_node_uuid)
       self._WaitUntilSync()
       self._GoStandalone()
       self._GoReconnect(False)
@@ -810,16 +820,19 @@ class TLMigrateInstance(Tasklet):
     # successful migration, unmap the device from the source node.
     if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
       disks = ExpandCheckDisks(instance, instance.disks)
-      self.feedback_fn("* unmapping instance's disks from %s" % source_node)
+      self.feedback_fn("* unmapping instance's disks from %s" %
+                       self.cfg.GetNodeName(source_node_uuid))
       for disk in disks:
-        result = self.rpc.call_blockdev_shutdown(source_node, (disk, instance))
+        result = self.rpc.call_blockdev_shutdown(source_node_uuid,
+                                                 (disk, instance))
         msg = result.fail_msg
         if msg:
           logging.error("Migration was successful, but couldn't unmap the"
                         " block device %s on source node %s: %s",
-                        disk.iv_name, source_node, msg)
+                        disk.iv_name, self.cfg.GetNodeName(source_node_uuid),
+                        msg)
           logging.error("You need to unmap the device %s manually on %s",
-                        disk.iv_name, source_node)
+                        disk.iv_name, self.cfg.GetNodeName(source_node_uuid))
 
     self.feedback_fn("* done")
 
@@ -833,19 +846,20 @@ class TLMigrateInstance(Tasklet):
     instance = self.instance
     primary_node = self.cfg.GetNodeInfo(instance.primary_node)
 
-    source_node = instance.primary_node
-    target_node = self.target_node
+    source_node_uuid = instance.primary_node
+    target_node_uuid = self.target_node_uuid
 
     if instance.disks_active:
       self.feedback_fn("* checking disk consistency between source and target")
       for (idx, dev) in enumerate(instance.disks):
         # for drbd, these are drbd over lvm
-        if not CheckDiskConsistency(self.lu, instance, dev, target_node,
+        if not CheckDiskConsistency(self.lu, instance, dev, target_node_uuid,
                                     False):
           if primary_node.offline:
             self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
                              " target node %s" %
-                             (primary_node.name, idx, target_node))
+                             (primary_node.name, idx,
+                              self.cfg.GetNodeName(target_node_uuid)))
           elif not self.ignore_consistency:
             raise errors.OpExecError("Disk %s is degraded on target node,"
                                      " aborting failover" % idx)
@@ -855,9 +869,9 @@ class TLMigrateInstance(Tasklet):
 
     self.feedback_fn("* shutting down instance on source node")
     logging.info("Shutting down instance %s on node %s",
-                 instance.name, source_node)
+                 instance.name, self.cfg.GetNodeName(source_node_uuid))
 
-    result = self.rpc.call_instance_shutdown(source_node, instance,
+    result = self.rpc.call_instance_shutdown(source_node_uuid, instance,
                                              self.shutdown_timeout,
                                              self.lu.op.reason)
     msg = result.fail_msg
@@ -866,26 +880,29 @@ class TLMigrateInstance(Tasklet):
         self.lu.LogWarning("Could not shutdown instance %s on node %s,"
                            " proceeding anyway; please make sure node"
                            " %s is down; error details: %s",
-                           instance.name, source_node, source_node, msg)
+                           instance.name,
+                           self.cfg.GetNodeName(source_node_uuid),
+                           self.cfg.GetNodeName(source_node_uuid), msg)
       else:
         raise errors.OpExecError("Could not shutdown instance %s on"
                                  " node %s: %s" %
-                                 (instance.name, source_node, msg))
+                                 (instance.name,
+                                  self.cfg.GetNodeName(source_node_uuid), msg))
 
     self.feedback_fn("* deactivating the instance's disks on source node")
     if not ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
       raise errors.OpExecError("Can't shut down the instance's disks")
 
-    instance.primary_node = target_node
+    instance.primary_node = target_node_uuid
     # distribute new instance config to the other nodes
     self.cfg.Update(instance, self.feedback_fn)
 
     # Only start the instance if it's marked as up
     if instance.admin_state == constants.ADMINST_UP:
       self.feedback_fn("* activating the instance's disks on target node %s" %
-                       target_node)
+                       self.cfg.GetNodeName(target_node_uuid))
       logging.info("Starting instance %s on node %s",
-                   instance.name, target_node)
+                   instance.name, self.cfg.GetNodeName(target_node_uuid))
 
       disks_ok, _ = AssembleInstanceDisks(self.lu, instance,
                                           ignore_secondaries=True)
@@ -894,31 +911,33 @@ class TLMigrateInstance(Tasklet):
         raise errors.OpExecError("Can't activate the instance's disks")
 
       self.feedback_fn("* starting the instance on the target node %s" %
-                       target_node)
-      result = self.rpc.call_instance_start(target_node, (instance, None, None),
-                                            False, self.lu.op.reason)
+                       self.cfg.GetNodeName(target_node_uuid))
+      result = self.rpc.call_instance_start(target_node_uuid,
+                                            (instance, None, None), False,
+                                            self.lu.op.reason)
       msg = result.fail_msg
       if msg:
         ShutdownInstanceDisks(self.lu, instance)
         raise errors.OpExecError("Could not start instance %s on node %s: %s" %
-                                 (instance.name, target_node, msg))
+                                 (instance.name,
+                                  self.cfg.GetNodeName(target_node_uuid), msg))
 
   def Exec(self, feedback_fn):
     """Perform the migration.
 
     """
     self.feedback_fn = feedback_fn
-    self.source_node = self.instance.primary_node
+    self.source_node_uuid = self.instance.primary_node
 
     # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
     if self.instance.disk_template in constants.DTS_INT_MIRROR:
-      self.target_node = self.instance.secondary_nodes[0]
+      self.target_node_uuid = self.instance.secondary_nodes[0]
       # Otherwise self.target_node has been populated either
       # directly, or through an iallocator.
 
-    self.all_nodes = [self.source_node, self.target_node]
-    self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
-                         in self.cfg.GetMultiNodeInfo(self.all_nodes))
+    self.all_node_uuids = [self.source_node_uuid, self.target_node_uuid]
+    self.nodes_ip = dict((uuid, node.secondary_ip) for (uuid, node)
+                         in self.cfg.GetMultiNodeInfo(self.all_node_uuids))
 
     if self.failover:
       feedback_fn("Failover instance %s" % self.instance.name)
index 480ba32..789abbe 100644 (file)
@@ -36,8 +36,8 @@ from ganeti import objects
 from ganeti import utils
 from ganeti.cmdlib.base import LogicalUnit, NoHooksLU
 from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
-  CheckHVParams, CheckInstanceState, CheckNodeOnline, ExpandNodeName, \
-  GetUpdatedParams, CheckOSParams, ShareAll
+  CheckHVParams, CheckInstanceState, CheckNodeOnline, GetUpdatedParams, \
+  CheckOSParams, ShareAll
 from ganeti.cmdlib.instance_storage import StartInstanceDisks, \
   ShutdownInstanceDisks
 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
@@ -130,7 +130,8 @@ class LUInstanceStartup(LogicalUnit):
       remote_info = self.rpc.call_instance_info(
           instance.primary_node, instance.name, instance.hypervisor,
           cluster.hvparams[instance.hypervisor])
-      remote_info.Raise("Error checking node %s" % instance.primary_node,
+      remote_info.Raise("Error checking node %s" %
+                        self.cfg.GetNodeName(instance.primary_node),
                         prereq=True, ecode=errors.ECODE_ENVIRON)
       if not remote_info.payload: # not running already
         CheckNodeFreeMemory(
@@ -153,12 +154,10 @@ class LUInstanceStartup(LogicalUnit):
       assert self.op.ignore_offline_nodes
       self.LogInfo("Primary node offline, marked instance as started")
     else:
-      node_current = instance.primary_node
-
       StartInstanceDisks(self, instance, force)
 
       result = \
-        self.rpc.call_instance_start(node_current,
+        self.rpc.call_instance_start(instance.primary_node,
                                      (instance, self.op.hvparams,
                                       self.op.beparams),
                                      self.op.startup_paused, reason)
@@ -224,7 +223,6 @@ class LUInstanceShutdown(LogicalUnit):
 
     """
     instance = self.instance
-    node_current = instance.primary_node
     timeout = self.op.timeout
     reason = self.op.reason
 
@@ -237,8 +235,8 @@ class LUInstanceShutdown(LogicalUnit):
       assert self.op.ignore_offline_nodes
       self.LogInfo("Primary node offline, marked instance as stopped")
     else:
-      result = self.rpc.call_instance_shutdown(node_current, instance, timeout,
-                                               reason)
+      result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
+                                               timeout, reason)
       msg = result.fail_msg
       if msg:
         self.LogWarning("Could not shutdown instance: %s", msg)
@@ -292,17 +290,17 @@ class LUInstanceReinstall(LogicalUnit):
 
     if self.op.os_type is not None:
       # OS verification
-      pnode = ExpandNodeName(self.cfg, instance.primary_node)
-      CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
+      CheckNodeHasOS(self, instance.primary_node, self.op.os_type,
+                     self.op.force_variant)
       instance_os = self.op.os_type
     else:
       instance_os = instance.os
 
-    nodelist = list(instance.all_nodes)
+    node_uuids = list(instance.all_nodes)
 
     if self.op.osparams:
       i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
-      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
+      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
       self.os_inst = i_osdict # the new dict (without defaults)
     else:
       self.os_inst = None
@@ -329,7 +327,7 @@ class LUInstanceReinstall(LogicalUnit):
                                              (inst, self.os_inst), True,
                                              self.op.debug_level)
       result.Raise("Could not install OS for instance %s on node %s" %
-                   (inst.name, inst.primary_node))
+                   (inst.name, self.cfg.GetNodeName(inst.primary_node)))
     finally:
       ShutdownInstanceDisks(self, inst)
 
@@ -396,22 +394,23 @@ class LUInstanceReboot(LogicalUnit):
     remote_info = self.rpc.call_instance_info(
         instance.primary_node, instance.name, instance.hypervisor,
         cluster.hvparams[instance.hypervisor])
-    remote_info.Raise("Error checking node %s" % instance.primary_node)
+    remote_info.Raise("Error checking node %s" %
+                      self.cfg.GetNodeName(instance.primary_node))
     instance_running = bool(remote_info.payload)
 
-    node_current = instance.primary_node
+    current_node_uuid = instance.primary_node
 
     if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
                                             constants.INSTANCE_REBOOT_HARD]:
       for disk in instance.disks:
-        self.cfg.SetDiskID(disk, node_current)
-      result = self.rpc.call_instance_reboot(node_current, instance,
+        self.cfg.SetDiskID(disk, current_node_uuid)
+      result = self.rpc.call_instance_reboot(current_node_uuid, instance,
                                              reboot_type,
                                              self.op.shutdown_timeout, reason)
       result.Raise("Could not reboot instance")
     else:
       if instance_running:
-        result = self.rpc.call_instance_shutdown(node_current, instance,
+        result = self.rpc.call_instance_shutdown(current_node_uuid, instance,
                                                  self.op.shutdown_timeout,
                                                  reason)
         result.Raise("Could not shutdown instance for full reboot")
@@ -420,7 +419,7 @@ class LUInstanceReboot(LogicalUnit):
         self.LogInfo("Instance %s was already stopped, starting now",
                      instance.name)
       StartInstanceDisks(self, instance, ignore_secondaries)
-      result = self.rpc.call_instance_start(node_current,
+      result = self.rpc.call_instance_start(current_node_uuid,
                                             (instance, None, None), False,
                                             reason)
       msg = result.fail_msg
@@ -432,11 +431,12 @@ class LUInstanceReboot(LogicalUnit):
     self.cfg.MarkInstanceUp(instance.name)
 
 
-def GetInstanceConsole(cluster, instance):
+def GetInstanceConsole(cluster, instance, primary_node):
   """Returns console information for an instance.
 
   @type cluster: L{objects.Cluster}
   @type instance: L{objects.Instance}
+  @type primary_node: L{objects.Node}
   @rtype: dict
 
   """
@@ -445,7 +445,7 @@ def GetInstanceConsole(cluster, instance):
   # instance and then saving the defaults in the instance itself.
   hvparams = cluster.FillHV(instance)
   beparams = cluster.FillBE(instance)
-  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
+  console = hyper.GetInstanceConsole(instance, primary_node, hvparams, beparams)
 
   assert console.instance == instance.name
   assert console.Validate()
@@ -483,13 +483,14 @@ class LUInstanceConsole(NoHooksLU):
 
     """
     instance = self.instance
-    node = instance.primary_node
+    node_uuid = instance.primary_node
 
     cluster_hvparams = self.cfg.GetClusterInfo().hvparams
-    node_insts = self.rpc.call_instance_list([node],
+    node_insts = self.rpc.call_instance_list([node_uuid],
                                              [instance.hypervisor],
-                                             cluster_hvparams)[node]
-    node_insts.Raise("Can't get node information from %s" % node)
+                                             cluster_hvparams)[node_uuid]
+    node_insts.Raise("Can't get node information from %s" %
+                     self.cfg.GetNodeName(node_uuid))
 
     if instance.name not in node_insts.payload:
       if instance.admin_state == constants.ADMINST_UP:
@@ -501,6 +502,8 @@ class LUInstanceConsole(NoHooksLU):
       raise errors.OpExecError("Instance %s is not running (state %s)" %
                                (instance.name, state))
 
-    logging.debug("Connecting to console of %s on %s", instance.name, node)
+    logging.debug("Connecting to console of %s on %s", instance.name,
+                  self.cfg.GetNodeName(node_uuid))
 
-    return GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
+    return GetInstanceConsole(self.cfg.GetClusterInfo(), instance,
+                              self.cfg.GetNodeInfo(instance.primary_node))
index 792e777..6ac9f6f 100644 (file)
@@ -104,31 +104,31 @@ class InstanceQuery(QueryBase):
 
     instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
 
-    instance_list = [all_info[name] for name in instance_names]
-    nodes = frozenset(itertools.chain(*(inst.all_nodes
-                                        for inst in instance_list)))
+    instance_list = [all_info[node] for node in instance_names]
+    node_uuids = frozenset(itertools.chain(*(inst.all_nodes
+                                             for inst in instance_list)))
     hv_list = list(set([inst.hypervisor for inst in instance_list]))
-    bad_nodes = []
-    offline_nodes = []
+    bad_node_uuids = []
+    offline_node_uuids = []
     wrongnode_inst = set()
 
     # Gather data as requested
     if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
       live_data = {}
-      node_data = lu.rpc.call_all_instances_info(nodes, hv_list,
+      node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list,
                                                  cluster.hvparams)
-      for name in nodes:
-        result = node_data[name]
+      for node_uuid in node_uuids:
+        result = node_data[node_uuid]
         if result.offline:
           # offline nodes will be in both lists
           assert result.fail_msg
-          offline_nodes.append(name)
+          offline_node_uuids.append(node_uuid)
         if result.fail_msg:
-          bad_nodes.append(name)
+          bad_node_uuids.append(node_uuid)
         elif result.payload:
           for inst in result.payload:
             if inst in all_info:
-              if all_info[inst].primary_node == name:
+              if all_info[inst].primary_node == node_uuid:
                 live_data.update(result.payload)
               else:
                 wrongnode_inst.add(inst)
@@ -136,7 +136,7 @@ class InstanceQuery(QueryBase):
               # orphan instance; we don't list it here as we don't
               # handle this case yet in the output of instance listing
               logging.warning("Orphan instance '%s' found on node %s",
-                              inst, name)
+                              inst, lu.cfg.GetNodeName(node_uuid))
               # else no instance is alive
     else:
       live_data = {}
@@ -156,7 +156,9 @@ class InstanceQuery(QueryBase):
       for inst in instance_list:
         if inst.name in live_data:
           # Instance is running
-          consinfo[inst.name] = GetInstanceConsole(cluster, inst)
+          consinfo[inst.name] = \
+            GetInstanceConsole(cluster, inst,
+                               lu.cfg.GetNodeInfo(inst.primary_node))
         else:
           consinfo[inst.name] = None
       assert set(consinfo.keys()) == set(instance_names)
@@ -164,9 +166,7 @@ class InstanceQuery(QueryBase):
       consinfo = None
 
     if query.IQ_NODES in self.requested_data:
-      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
-                                            instance_list)))
-      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
+      nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids))
       groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
                     for uuid in set(map(operator.attrgetter("group"),
                                         nodes.values())))
@@ -182,9 +182,9 @@ class InstanceQuery(QueryBase):
       networks = None
 
     return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
-                                   disk_usage, offline_nodes, bad_nodes,
-                                   live_data, wrongnode_inst, consinfo,
-                                   nodes, groups, networks)
+                                   disk_usage, offline_node_uuids,
+                                   bad_node_uuids, live_data, wrongnode_inst,
+                                   consinfo, nodes, groups, networks)
 
 
 class LUInstanceQuery(NoHooksLU):
@@ -273,7 +273,7 @@ class LUInstanceQueryData(NoHooksLU):
     """
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
-    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
     owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
 
     if self.wanted_names is None:
@@ -283,24 +283,24 @@ class LUInstanceQueryData(NoHooksLU):
     instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
 
     if self.op.use_locking:
-      CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
-                               None)
+      CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
+                               owned_node_uuids, None)
     else:
       assert not (owned_instances or owned_groups or
-                  owned_nodes or owned_networks)
+                  owned_node_uuids or owned_networks)
 
     self.wanted_instances = instances.values()
 
-  def _ComputeBlockdevStatus(self, node, instance, dev):
+  def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
     """Returns the status of a block device
 
     """
-    if self.op.static or not node:
+    if self.op.static or not node_uuid:
       return None
 
-    self.cfg.SetDiskID(dev, node)
+    self.cfg.SetDiskID(dev, node_uuid)
 
-    result = self.rpc.call_blockdev_find(node, dev)
+    result = self.rpc.call_blockdev_find(node_uuid, dev)
     if result.offline:
       return None
 
@@ -314,34 +314,46 @@ class LUInstanceQueryData(NoHooksLU):
             status.sync_percent, status.estimated_time,
             status.is_degraded, status.ldisk_status)
 
-  def _ComputeDiskStatus(self, instance, snode, dev):
+  def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
     """Compute block device status.
 
     """
     (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
 
-    return self._ComputeDiskStatusInner(instance, snode, anno_dev)
+    return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
+                                        anno_dev)
 
-  def _ComputeDiskStatusInner(self, instance, snode, dev):
+  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
+                              dev):
     """Compute block device status.
 
     @attention: The device has to be annotated already.
 
     """
+    drbd_info = None
     if dev.dev_type in constants.LDS_DRBD:
       # we change the snode then (otherwise we use the one passed in)
       if dev.logical_id[0] == instance.primary_node:
-        snode = dev.logical_id[1]
+        snode_uuid = dev.logical_id[1]
       else:
-        snode = dev.logical_id[0]
+        snode_uuid = dev.logical_id[0]
+      drbd_info = {
+        "primary_node": node_uuid2name_fn(instance.primary_node),
+        "primary_minor": dev.logical_id[3],
+        "secondary_node": node_uuid2name_fn(snode_uuid),
+        "secondary_minor": dev.logical_id[4],
+        "port": dev.logical_id[2],
+        "secret": dev.logical_id[5],
+      }
 
     dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
                                               instance, dev)
-    dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
+    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
 
     if dev.children:
       dev_children = map(compat.partial(self._ComputeDiskStatusInner,
-                                        instance, snode),
+                                        instance, snode_uuid,
+                                        node_uuid2name_fn),
                          dev.children)
     else:
       dev_children = []
@@ -350,6 +362,7 @@ class LUInstanceQueryData(NoHooksLU):
       "iv_name": dev.iv_name,
       "dev_type": dev.dev_type,
       "logical_id": dev.logical_id,
+      "drbd_info": drbd_info,
       "physical_id": dev.physical_id,
       "pstatus": dev_pstatus,
       "sstatus": dev_sstatus,
@@ -367,13 +380,12 @@ class LUInstanceQueryData(NoHooksLU):
 
     cluster = self.cfg.GetClusterInfo()
 
-    node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
-    nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
+    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
+    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
 
     groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
                                                  for node in nodes.values()))
 
-    group2name_fn = lambda uuid: groups[uuid].name
     for instance in self.wanted_instances:
       pnode = nodes[instance.primary_node]
 
@@ -387,7 +399,7 @@ class LUInstanceQueryData(NoHooksLU):
         remote_info = self.rpc.call_instance_info(
             instance.primary_node, instance.name, instance.hypervisor,
             cluster.hvparams[instance.hypervisor])
-        remote_info.Raise("Error checking node %s" % instance.primary_node)
+        remote_info.Raise("Error checking node %s" % pnode.name)
         remote_info = remote_info.payload
         if remote_info and "state" in remote_info:
           remote_state = "up"
@@ -397,20 +409,24 @@ class LUInstanceQueryData(NoHooksLU):
           else:
             remote_state = instance.admin_state
 
-      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
+      group2name_fn = lambda uuid: groups[uuid].name
+      node_uuid2name_fn = lambda uuid: nodes[uuid].name
+
+      disks = map(compat.partial(self._ComputeDiskStatus, instance,
+                                 node_uuid2name_fn),
                   instance.disks)
 
-      snodes_group_uuids = [nodes[snode_name].group
-                            for snode_name in instance.secondary_nodes]
+      snodes_group_uuids = [nodes[snode_uuid].group
+                            for snode_uuid in instance.secondary_nodes]
 
       result[instance.name] = {
         "name": instance.name,
         "config_state": instance.admin_state,
         "run_state": remote_state,
-        "pnode": instance.primary_node,
+        "pnode": pnode.name,
         "pnode_group_uuid": pnode.group,
         "pnode_group_name": group2name_fn(pnode.group),
-        "snodes": instance.secondary_nodes,
+        "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
         "snodes_group_uuids": snodes_group_uuids,
         "snodes_group_names": map(group2name_fn, snodes_group_uuids),
         "os": instance.os,
index d6b0414..4c86d2e 100644 (file)
@@ -38,9 +38,9 @@ from ganeti import opcodes
 from ganeti import rpc
 from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
 from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
-  AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeName, \
+  AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
   CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
-  IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks
+  IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes
 from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
   CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
   BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
@@ -65,7 +65,7 @@ _DISK_TEMPLATE_DEVICE_TYPE = {
   }
 
 
-def CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
                          excl_stor):
   """Create a single block device on a given node.
 
@@ -73,7 +73,7 @@ def CreateSingleBlockDev(lu, node, instance, device, info, force_open,
   created in advance.
 
   @param lu: the lu on whose behalf we execute
-  @param node: the node on which to create the device
+  @param node_uuid: the node on which to create the device
   @type instance: L{objects.Instance}
   @param instance: the instance which owns the device
   @type device: L{objects.Disk}
@@ -89,17 +89,19 @@ def CreateSingleBlockDev(lu, node, instance, device, info, force_open,
   @param excl_stor: Whether exclusive_storage is active for the node
 
   """
-  lu.cfg.SetDiskID(device, node)
-  result = lu.rpc.call_blockdev_create(node, device, device.size,
+  lu.cfg.SetDiskID(device, node_uuid)
+  result = lu.rpc.call_blockdev_create(node_uuid, device, device.size,
                                        instance.name, force_open, info,
                                        excl_stor)
   result.Raise("Can't create block device %s on"
-               " node %s for instance %s" % (device, node, instance.name))
+               " node %s for instance %s" % (device,
+                                             lu.cfg.GetNodeName(node_uuid),
+                                             instance.name))
   if device.physical_id is None:
     device.physical_id = result.payload
 
 
-def _CreateBlockDevInner(lu, node, instance, device, force_create,
+def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
                          info, force_open, excl_stor):
   """Create a tree of block devices on a given node.
 
@@ -111,7 +113,7 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
   @attention: The device has to be annotated already.
 
   @param lu: the lu on whose behalf we execute
-  @param node: the node on which to create the device
+  @param node_uuid: the node on which to create the device
   @type instance: L{objects.Instance}
   @param instance: the instance which owns the device
   @type device: L{objects.Disk}
@@ -139,18 +141,18 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
 
     if device.children:
       for child in device.children:
-        devs = _CreateBlockDevInner(lu, node, instance, child, force_create,
-                                    info, force_open, excl_stor)
+        devs = _CreateBlockDevInner(lu, node_uuid, instance, child,
+                                    force_create, info, force_open, excl_stor)
         created_devices.extend(devs)
 
     if not force_create:
       return created_devices
 
-    CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+    CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
                          excl_stor)
     # The device has been completely created, so there is no point in keeping
     # its subdevices in the list. We just add the device itself instead.
-    created_devices = [(node, device)]
+    created_devices = [(node_uuid, device)]
     return created_devices
 
   except errors.DeviceCreationError, e:
@@ -160,26 +162,26 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
     raise errors.DeviceCreationError(str(e), created_devices)
 
 
-def IsExclusiveStorageEnabledNodeName(cfg, nodename):
+def IsExclusiveStorageEnabledNodeUuid(cfg, node_uuid):
   """Whether exclusive_storage is in effect for the given node.
 
   @type cfg: L{config.ConfigWriter}
   @param cfg: The cluster configuration
-  @type nodename: string
-  @param nodename: The node
+  @type node_uuid: string
+  @param node_uuid: The node UUID
   @rtype: bool
   @return: The effective value of exclusive_storage
   @raise errors.OpPrereqError: if no node exists with the given name
 
   """
-  ni = cfg.GetNodeInfo(nodename)
+  ni = cfg.GetNodeInfo(node_uuid)
   if ni is None:
-    raise errors.OpPrereqError("Invalid node name %s" % nodename,
+    raise errors.OpPrereqError("Invalid node UUID %s" % node_uuid,
                                errors.ECODE_NOENT)
   return IsExclusiveStorageEnabledNode(cfg, ni)
 
 
-def _CreateBlockDev(lu, node, instance, device, force_create, info,
+def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
                     force_open):
   """Wrapper around L{_CreateBlockDevInner}.
 
@@ -187,8 +189,8 @@ def _CreateBlockDev(lu, node, instance, device, force_create, info,
 
   """
   (disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
-  excl_stor = IsExclusiveStorageEnabledNodeName(lu.cfg, node)
-  return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
+  excl_stor = IsExclusiveStorageEnabledNodeUuid(lu.cfg, node_uuid)
+  return _CreateBlockDevInner(lu, node_uuid, instance, disk, force_create, info,
                               force_open, excl_stor)
 
 
@@ -203,14 +205,14 @@ def _UndoCreateDisks(lu, disks_created):
   @param disks_created: the result returned by L{CreateDisks}
 
   """
-  for (node, disk) in disks_created:
-    lu.cfg.SetDiskID(disk, node)
-    result = lu.rpc.call_blockdev_remove(node, disk)
+  for (node_uuid, disk) in disks_created:
+    lu.cfg.SetDiskID(disk, node_uuid)
+    result = lu.rpc.call_blockdev_remove(node_uuid, disk)
     result.Warn("Failed to remove newly-created disk %s on node %s" %
-                (disk, node), logging.warning)
+                (disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
 
 
-def CreateDisks(lu, instance, to_skip=None, target_node=None, disks=None):
+def CreateDisks(lu, instance, to_skip=None, target_node_uuid=None, disks=None):
   """Create all disks for an instance.
 
   This abstracts away some work from AddInstance.
@@ -221,8 +223,8 @@ def CreateDisks(lu, instance, to_skip=None, target_node=None, disks=None):
   @param instance: the instance whose disks we should create
   @type to_skip: list
   @param to_skip: list of indices to skip
-  @type target_node: string
-  @param target_node: if passed, overrides the target node for creation
+  @type target_node_uuid: string
+  @param target_node_uuid: if passed, overrides the target node for creation
   @type disks: list of {objects.Disk}
   @param disks: the disks to create; if not specified, all the disks of the
       instance are created
@@ -232,33 +234,35 @@ def CreateDisks(lu, instance, to_skip=None, target_node=None, disks=None):
 
   """
   info = GetInstanceInfoText(instance)
-  if target_node is None:
-    pnode = instance.primary_node
-    all_nodes = instance.all_nodes
+  if target_node_uuid is None:
+    pnode_uuid = instance.primary_node
+    all_node_uuids = instance.all_nodes
   else:
-    pnode = target_node
-    all_nodes = [pnode]
+    pnode_uuid = target_node_uuid
+    all_node_uuids = [pnode_uuid]
 
   if disks is None:
     disks = instance.disks
 
   if instance.disk_template in constants.DTS_FILEBASED:
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
-    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
+    result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
 
     result.Raise("Failed to create directory '%s' on"
-                 " node %s" % (file_storage_dir, pnode))
+                 " node %s" % (file_storage_dir,
+                               lu.cfg.GetNodeName(pnode_uuid)))
 
   disks_created = []
   for idx, device in enumerate(disks):
     if to_skip and idx in to_skip:
       continue
     logging.info("Creating disk %s for instance '%s'", idx, instance.name)
-    for node in all_nodes:
-      f_create = node == pnode
+    for node_uuid in all_node_uuids:
+      f_create = node_uuid == pnode_uuid
       try:
-        _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
-        disks_created.append((node, device))
+        _CreateBlockDev(lu, node_uuid, instance, device, f_create, info,
+                        f_create)
+        disks_created.append((node_uuid, device))
       except errors.DeviceCreationError, e:
         logging.warning("Creating disk %s for instance '%s' failed",
                         idx, instance.name)
@@ -375,7 +379,7 @@ def CheckRADOSFreeSpace():
   pass
 
 
-def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
+def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
                          iv_name, p_minor, s_minor):
   """Generate a drbd8 device complete with its children.
 
@@ -394,7 +398,7 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
                           params={})
   dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
   drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
-                          logical_id=(primary, secondary, port,
+                          logical_id=(primary_uuid, secondary_uuid, port,
                                       p_minor, s_minor,
                                       shared_secret),
                           children=[dev_data, dev_meta],
@@ -404,7 +408,7 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
 
 
 def GenerateDiskTemplate(
-  lu, template_name, instance_name, primary_node, secondary_nodes,
+  lu, template_name, instance_name, primary_node_uuid, secondary_node_uuids,
   disk_info, file_storage_dir, file_driver, base_index,
   feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
   _req_shr_file_storage=opcodes.RequireSharedFileStorage):
@@ -418,11 +422,11 @@ def GenerateDiskTemplate(
   if template_name == constants.DT_DISKLESS:
     pass
   elif template_name == constants.DT_DRBD8:
-    if len(secondary_nodes) != 1:
+    if len(secondary_node_uuids) != 1:
       raise errors.ProgrammerError("Wrong template configuration")
-    remote_node = secondary_nodes[0]
+    remote_node_uuid = secondary_node_uuids[0]
     minors = lu.cfg.AllocateDRBDMinor(
-      [primary_node, remote_node] * len(disk_info), instance_name)
+      [primary_node_uuid, remote_node_uuid] * len(disk_info), instance_name)
 
     (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
                                                        full_disk_params)
@@ -437,7 +441,7 @@ def GenerateDiskTemplate(
       disk_index = idx + base_index
       data_vg = disk.get(constants.IDISK_VG, vgname)
       meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
-      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
+      disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
                                       disk[constants.IDISK_SIZE],
                                       [data_vg, meta_vg],
                                       names[idx * 2:idx * 2 + 2],
@@ -447,7 +451,7 @@ def GenerateDiskTemplate(
       disk_dev.name = disk.get(constants.IDISK_NAME, None)
       disks.append(disk_dev)
   else:
-    if secondary_nodes:
+    if secondary_node_uuids:
       raise errors.ProgrammerError("Wrong template configuration")
 
     if template_name == constants.DT_FILE:
@@ -619,10 +623,10 @@ class LUInstanceRecreateDisks(LogicalUnit):
                                  " %s" % (self.op.iallocator, ial.info),
                                  errors.ECODE_NORES)
 
-    self.op.nodes = ial.result
+    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, ial.result)
     self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
                  self.op.instance_name, self.op.iallocator,
-                 utils.CommaJoin(ial.result))
+                 utils.CommaJoin(self.op.nodes))
 
   def CheckArguments(self):
     if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
@@ -654,8 +658,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
 
     if self.op.nodes:
-      self.op.nodes = [ExpandNodeName(self.cfg, n) for n in self.op.nodes]
-      self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
+      (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
+      self.needed_locks[locking.LEVEL_NODE] = list(self.op.node_uuids)
     else:
       self.needed_locks[locking.LEVEL_NODE] = []
       if self.op.iallocator:
@@ -725,18 +729,18 @@ class LUInstanceRecreateDisks(LogicalUnit):
     instance = self.cfg.GetInstanceInfo(self.op.instance_name)
     assert instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
-    if self.op.nodes:
-      if len(self.op.nodes) != len(instance.all_nodes):
+    if self.op.node_uuids:
+      if len(self.op.node_uuids) != len(instance.all_nodes):
         raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
                                    " %d replacement nodes were specified" %
                                    (instance.name, len(instance.all_nodes),
-                                    len(self.op.nodes)),
+                                    len(self.op.node_uuids)),
                                    errors.ECODE_INVAL)
       assert instance.disk_template != constants.DT_DRBD8 or \
-             len(self.op.nodes) == 2
+             len(self.op.node_uuids) == 2
       assert instance.disk_template != constants.DT_PLAIN or \
-             len(self.op.nodes) == 1
-      primary_node = self.op.nodes[0]
+             len(self.op.node_uuids) == 1
+      primary_node = self.op.node_uuids[0]
     else:
       primary_node = instance.primary_node
     if not self.op.iallocator:
@@ -757,7 +761,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     # if we replace nodes *and* the old primary is offline, we don't
     # check the instance state
     old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
-    if not ((self.op.iallocator or self.op.nodes) and old_pnode.offline):
+    if not ((self.op.iallocator or self.op.node_uuids) and old_pnode.offline):
       CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
                          msg="cannot recreate disks")
 
@@ -771,7 +775,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
       raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
                                  errors.ECODE_INVAL)
 
-    if ((self.op.nodes or self.op.iallocator) and
+    if ((self.op.node_uuids or self.op.iallocator) and
          sorted(self.disks.keys()) != range(len(instance.disks))):
       raise errors.OpPrereqError("Can't recreate disks partially and"
                                  " change the nodes at the same time",
@@ -782,18 +786,18 @@ class LUInstanceRecreateDisks(LogicalUnit):
     if self.op.iallocator:
       self._RunAllocator()
       # Release unneeded node and node resource locks
-      ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes)
-      ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes)
+      ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
+      ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
       ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
 
     assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
 
-    if self.op.nodes:
-      nodes = self.op.nodes
+    if self.op.node_uuids:
+      node_uuids = self.op.node_uuids
     else:
-      nodes = instance.all_nodes
+      node_uuids = instance.all_nodes
     excl_stor = compat.any(
-      rpc.GetExclusiveStorageForNodeNames(self.cfg, nodes).values()
+      rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
       )
     for new_params in self.disks.values():
       CheckSpindlesExclusiveStorage(new_params, excl_stor, False)
@@ -819,14 +823,15 @@ class LUInstanceRecreateDisks(LogicalUnit):
         continue
 
       # update secondaries for disks, if needed
-      if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
+      if self.op.node_uuids and disk.dev_type == constants.LD_DRBD8:
         # need to update the nodes and minors
-        assert len(self.op.nodes) == 2
+        assert len(self.op.node_uuids) == 2
         assert len(disk.logical_id) == 6 # otherwise disk internals
                                          # have changed
         (_, _, old_port, _, _, old_secret) = disk.logical_id
-        new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
-        new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
+        new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
+                                                instance.name)
+        new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
                   new_minors[0], new_minors[1], old_secret)
         assert len(disk.logical_id) == len(new_id)
       else:
@@ -847,12 +852,12 @@ class LUInstanceRecreateDisks(LogicalUnit):
                     spindles=changes.get(constants.IDISK_SPINDLES, None))
 
     # change primary node, if needed
-    if self.op.nodes:
-      instance.primary_node = self.op.nodes[0]
+    if self.op.node_uuids:
+      instance.primary_node = self.op.node_uuids[0]
       self.LogWarning("Changing the instance's nodes, you will have to"
                       " remove any disks left on the older nodes manually")
 
-    if self.op.nodes:
+    if self.op.node_uuids:
       self.cfg.Update(instance, feedback_fn)
 
     # All touched nodes must be locked
@@ -868,7 +873,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
       WipeOrCleanupDisks(self, instance, disks=wipedisks, cleanup=new_disks)
 
 
-def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
+def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
   """Checks if nodes have enough free disk space in the specified VG.
 
   This function checks if all given nodes have the needed amount of
@@ -878,8 +883,8 @@ def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
 
   @type lu: C{LogicalUnit}
   @param lu: a logical unit from which we get configuration data
-  @type nodenames: C{list}
-  @param nodenames: the list of node names to check
+  @type node_uuids: C{list}
+  @param node_uuids: the list of node UUIDs to check
   @type vg: C{str}
   @param vg: the volume group to check
   @type requested: C{int}
@@ -888,31 +893,33 @@ def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
       or we cannot check the node
 
   """
-  es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, nodenames)
+  es_flags = rpc.GetExclusiveStorageForNodes(lu.cfg, node_uuids)
   # FIXME: This maps everything to storage type 'lvm-vg' to maintain
   # the current functionality. Refactor to make it more flexible.
   hvname = lu.cfg.GetHypervisorType()
   hvparams = lu.cfg.GetClusterInfo().hvparams
-  nodeinfo = lu.rpc.call_node_info(nodenames, [(constants.ST_LVM_VG, vg)],
+  nodeinfo = lu.rpc.call_node_info(node_uuids, [(constants.ST_LVM_VG, vg)],
                                    [(hvname, hvparams[hvname])], es_flags)
-  for node in nodenames:
+  for node in node_uuids:
+    node_name = lu.cfg.GetNodeName(node)
+
     info = nodeinfo[node]
-    info.Raise("Cannot get current information from node %s" % node,
+    info.Raise("Cannot get current information from node %s" % node_name,
                prereq=True, ecode=errors.ECODE_ENVIRON)
     (_, (vg_info, ), _) = info.payload
     vg_free = vg_info.get("vg_free", None)
     if not isinstance(vg_free, int):
       raise errors.OpPrereqError("Can't compute free disk space on node"
                                  " %s for vg %s, result was '%s'" %
-                                 (node, vg, vg_free), errors.ECODE_ENVIRON)
+                                 (node_name, vg, vg_free), errors.ECODE_ENVIRON)
     if requested > vg_free:
       raise errors.OpPrereqError("Not enough disk space on target node %s"
                                  " vg %s: required %d MiB, available %d MiB" %
-                                 (node, vg, requested, vg_free),
+                                 (node_name, vg, requested, vg_free),
                                  errors.ECODE_NORES)
 
 
-def CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
+def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
   """Checks if nodes have enough free disk space in all the VGs.
 
   This function checks if all given nodes have the needed amount of
@@ -922,8 +929,8 @@ def CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
 
   @type lu: C{LogicalUnit}
   @param lu: a logical unit from which we get configuration data
-  @type nodenames: C{list}
-  @param nodenames: the list of node names to check
+  @type node_uuids: C{list}
+  @param node_uuids: the list of node UUIDs to check
   @type req_sizes: C{dict}
   @param req_sizes: the hash of vg and corresponding amount of disk in
       MiB to check for
@@ -932,7 +939,7 @@ def CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
 
   """
   for vg, req_size in req_sizes.items():
-    _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
+    _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, req_size)
 
 
 def _DiskSizeInBytesToMebibytes(lu, size):
@@ -977,22 +984,23 @@ def WipeDisks(lu, instance, disks=None):
     start offset
 
   """
-  node = instance.primary_node
+  node_uuid = instance.primary_node
+  node_name = lu.cfg.GetNodeName(node_uuid)
 
   if disks is None:
     disks = [(idx, disk, 0)
              for (idx, disk) in enumerate(instance.disks)]
 
   for (_, device, _) in disks:
-    lu.cfg.SetDiskID(device, node)
+    lu.cfg.SetDiskID(device, node_uuid)
 
   logging.info("Pausing synchronization of disks of instance '%s'",
                instance.name)
-  result = lu.rpc.call_blockdev_pause_resume_sync(node,
+  result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
                                                   (map(compat.snd, disks),
                                                    instance),
                                                   True)
-  result.Raise("Failed to pause disk synchronization on node '%s'" % node)
+  result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
 
   for idx, success in enumerate(result.payload):
     if not success:
@@ -1021,7 +1029,8 @@ def WipeDisks(lu, instance, disks=None):
       lu.LogInfo("* Wiping disk %s%s", idx, info_text)
 
       logging.info("Wiping disk %d for instance %s on node %s using"
-                   " chunk size %s", idx, instance.name, node, wipe_chunk_size)
+                   " chunk size %s", idx, instance.name, node_name,
+                   wipe_chunk_size)
 
       while offset < size:
         wipe_size = min(wipe_chunk_size, size - offset)
@@ -1029,8 +1038,8 @@ def WipeDisks(lu, instance, disks=None):
         logging.debug("Wiping disk %d, offset %s, chunk %s",
                       idx, offset, wipe_size)
 
-        result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
-                                           wipe_size)
+        result = lu.rpc.call_blockdev_wipe(node_uuid, (device, instance),
+                                           offset, wipe_size)
         result.Raise("Could not wipe disk %d at offset %d for size %d" %
                      (idx, offset, wipe_size))
 
@@ -1045,14 +1054,14 @@ def WipeDisks(lu, instance, disks=None):
     logging.info("Resuming synchronization of disks for instance '%s'",
                  instance.name)
 
-    result = lu.rpc.call_blockdev_pause_resume_sync(node,
+    result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
                                                     (map(compat.snd, disks),
                                                      instance),
                                                     False)
 
     if result.fail_msg:
       lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
-                    node, result.fail_msg)
+                    node_name, result.fail_msg)
     else:
       for idx, success in enumerate(result.payload):
         if not success:
@@ -1113,10 +1122,11 @@ def WaitForSync(lu, instance, disks=None, oneshot=False):
   if not oneshot:
     lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
 
-  node = instance.primary_node
+  node_uuid = instance.primary_node
+  node_name = lu.cfg.GetNodeName(node_uuid)
 
   for dev in disks:
-    lu.cfg.SetDiskID(dev, node)
+    lu.cfg.SetDiskID(dev, node_uuid)
 
   # TODO: Convert to utils.Retry
 
@@ -1126,14 +1136,14 @@ def WaitForSync(lu, instance, disks=None, oneshot=False):
     max_time = 0
     done = True
     cumul_degraded = False
-    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, (disks, instance))
+    rstats = lu.rpc.call_blockdev_getmirrorstatus(node_uuid, (disks, instance))
     msg = rstats.fail_msg
     if msg:
-      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
+      lu.LogWarning("Can't get any data from node %s: %s", node_name, msg)
       retries += 1
       if retries >= 10:
         raise errors.RemoteError("Can't contact node %s for mirror data,"
-                                 " aborting." % node)
+                                 " aborting." % node_name)
       time.sleep(6)
       continue
     rstats = rstats.payload
@@ -1141,7 +1151,7 @@ def WaitForSync(lu, instance, disks=None, oneshot=False):
     for i, mstat in enumerate(rstats):
       if mstat is None:
         lu.LogWarning("Can't compute data for node %s/%s",
-                      node, disks[i].iv_name)
+                      node_name, disks[i].iv_name)
         continue
 
       cumul_degraded = (cumul_degraded or
@@ -1191,15 +1201,15 @@ def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
   disks = ExpandCheckDisks(instance, disks)
 
   for disk in disks:
-    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
-      lu.cfg.SetDiskID(top_disk, node)
-      result = lu.rpc.call_blockdev_shutdown(node, (top_disk, instance))
+    for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
+      lu.cfg.SetDiskID(top_disk, node_uuid)
+      result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
       msg = result.fail_msg
       if msg:
         lu.LogWarning("Could not shutdown block device %s on node %s: %s",
-                      disk.iv_name, node, msg)
-        if ((node == instance.primary_node and not ignore_primary) or
-            (node != instance.primary_node and not result.offline)):
+                      disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
+        if ((node_uuid == instance.primary_node and not ignore_primary) or
+            (node_uuid != instance.primary_node and not result.offline)):
           all_result = False
   return all_result
 
@@ -1259,20 +1269,21 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
 
   # 1st pass, assemble on all nodes in secondary mode
   for idx, inst_disk in enumerate(disks):
-    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
+    for node_uuid, node_disk in inst_disk.ComputeNodeTree(
+                                  instance.primary_node):
       if ignore_size:
         node_disk = node_disk.Copy()
         node_disk.UnsetSize()
-      lu.cfg.SetDiskID(node_disk, node)
-      result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
-                                             False, idx)
+      lu.cfg.SetDiskID(node_disk, node_uuid)
+      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
+                                             iname, False, idx)
       msg = result.fail_msg
       if msg:
-        is_offline_secondary = (node in instance.secondary_nodes and
+        is_offline_secondary = (node_uuid in instance.secondary_nodes and
                                 result.offline)
         lu.LogWarning("Could not prepare block device %s on node %s"
                       " (is_primary=False, pass=1): %s",
-                      inst_disk.iv_name, node, msg)
+                      inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
         if not (ignore_secondaries or is_offline_secondary):
           disks_ok = False
 
@@ -1282,25 +1293,27 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
   for idx, inst_disk in enumerate(disks):
     dev_path = None
 
-    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
-      if node != instance.primary_node:
+    for node_uuid, node_disk in inst_disk.ComputeNodeTree(
+                                  instance.primary_node):
+      if node_uuid != instance.primary_node:
         continue
       if ignore_size:
         node_disk = node_disk.Copy()
         node_disk.UnsetSize()
-      lu.cfg.SetDiskID(node_disk, node)
-      result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
-                                             True, idx)
+      lu.cfg.SetDiskID(node_disk, node_uuid)
+      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
+                                             iname, True, idx)
       msg = result.fail_msg
       if msg:
         lu.LogWarning("Could not prepare block device %s on node %s"
                       " (is_primary=True, pass=2): %s",
-                      inst_disk.iv_name, node, msg)
+                      inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
         disks_ok = False
       else:
         dev_path = result.payload
 
-    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
+    device_info.append((lu.cfg.GetNodeName(instance.primary_node),
+                        inst_disk.iv_name, dev_path))
 
   # leave the disks configured for the primary node
   # this is a workaround that would be fixed better by
@@ -1382,9 +1395,9 @@ class LUInstanceGrowDisk(LogicalUnit):
     instance = self.cfg.GetInstanceInfo(self.op.instance_name)
     assert instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
-    nodenames = list(instance.all_nodes)
-    for node in nodenames:
-      CheckNodeOnline(self, node)
+    node_uuids = list(instance.all_nodes)
+    for node_uuid in node_uuids:
+      CheckNodeOnline(self, node_uuid)
 
     self.instance = instance
 
@@ -1411,14 +1424,14 @@ class LUInstanceGrowDisk(LogicalUnit):
                                    utils.FormatUnit(self.delta, "h"),
                                    errors.ECODE_INVAL)
 
-    self._CheckDiskSpace(nodenames, self.disk.ComputeGrowth(self.delta))
+    self._CheckDiskSpace(node_uuids, self.disk.ComputeGrowth(self.delta))
 
-  def _CheckDiskSpace(self, nodenames, req_vgspace):
+  def _CheckDiskSpace(self, node_uuids, req_vgspace):
     template = self.instance.disk_template
     if template not in (constants.DTS_NO_FREE_SPACE_CHECK):
       # TODO: check the free disk space for file, when that feature will be
       # supported
-      nodes = map(self.cfg.GetNodeInfo, nodenames)
+      nodes = map(self.cfg.GetNodeInfo, node_uuids)
       es_nodes = filter(lambda n: IsExclusiveStorageEnabledNode(self.cfg, n),
                         nodes)
       if es_nodes:
@@ -1426,7 +1439,7 @@ class LUInstanceGrowDisk(LogicalUnit):
         # at free space; for now, let's simply abort the operation.
         raise errors.OpPrereqError("Cannot grow disks when exclusive_storage"
                                    " is enabled", errors.ECODE_STATE)
-      CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
+      CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
 
   def Exec(self, feedback_fn):
     """Execute disk grow.
@@ -1451,11 +1464,12 @@ class LUInstanceGrowDisk(LogicalUnit):
                  utils.FormatUnit(self.target, "h")))
 
     # First run all grow ops in dry-run mode
-    for node in instance.all_nodes:
-      self.cfg.SetDiskID(disk, node)
-      result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
-                                           True, True)
-      result.Raise("Dry-run grow request failed to node %s" % node)
+    for node_uuid in instance.all_nodes:
+      self.cfg.SetDiskID(disk, node_uuid)
+      result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance),
+                                           self.delta, True, True)
+      result.Raise("Dry-run grow request failed to node %s" %
+                   self.cfg.GetNodeName(node_uuid))
 
     if wipe_disks:
       # Get disk size from primary node for wiping
@@ -1481,18 +1495,20 @@ class LUInstanceGrowDisk(LogicalUnit):
 
     # We know that (as far as we can test) operations across different
     # nodes will succeed, time to run it for real on the backing storage
-    for node in instance.all_nodes:
-      self.cfg.SetDiskID(disk, node)
-      result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
-                                           False, True)
-      result.Raise("Grow request failed to node %s" % node)
+    for node_uuid in instance.all_nodes:
+      self.cfg.SetDiskID(disk, node_uuid)
+      result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance),
+                                           self.delta, False, True)
+      result.Raise("Grow request failed to node %s" %
+                   self.cfg.GetNodeName(node_uuid))
 
     # And now execute it for logical storage, on the primary node
-    node = instance.primary_node
-    self.cfg.SetDiskID(disk, node)
-    result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
-                                         False, False)
-    result.Raise("Grow request failed to node %s" % node)
+    node_uuid = instance.primary_node
+    self.cfg.SetDiskID(disk, node_uuid)
+    result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance),
+                                         self.delta, False, False)
+    result.Raise("Grow request failed to node %s" %
+                 self.cfg.GetNodeName(node_uuid))
 
     disk.RecordGrow(self.delta)
     self.cfg.Update(instance, feedback_fn)
@@ -1567,13 +1583,15 @@ class LUInstanceReplaceDisks(LogicalUnit):
       "Conflicting options"
 
     if self.op.remote_node is not None:
-      self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
+      (self.op.remote_node_uuid, self.op.remote_node) = \
+        ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
+                              self.op.remote_node)
 
       # Warning: do not remove the locking of the new secondary here
       # unless DRBD8Dev.AddChildren is changed to work in parallel;
       # currently it doesn't since parallel invocations of
       # FindUnusedMinor will conflict
-      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
+      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node_uuid]
       self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
     else:
       self.needed_locks[locking.LEVEL_NODE] = []
@@ -1587,7 +1605,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
     self.needed_locks[locking.LEVEL_NODE_RES] = []
 
     self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
-                                   self.op.iallocator, self.op.remote_node,
+                                   self.op.iallocator, self.op.remote_node_uuid,
                                    self.op.disks, self.op.early_release,
                                    self.op.ignore_ipolicy)
 
@@ -1595,7 +1613,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODEGROUP:
-      assert self.op.remote_node is None
+      assert self.op.remote_node_uuid is None
       assert self.op.iallocator is not None
       assert not self.needed_locks[locking.LEVEL_NODEGROUP]
 
@@ -1607,15 +1625,15 @@ class LUInstanceReplaceDisks(LogicalUnit):
 
     elif level == locking.LEVEL_NODE:
       if self.op.iallocator is not None:
-        assert self.op.remote_node is None
+        assert self.op.remote_node_uuid is None
         assert not self.needed_locks[locking.LEVEL_NODE]
         assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
 
         # Lock member nodes of all locked groups
         self.needed_locks[locking.LEVEL_NODE] = \
-          [node_name
+          [node_uuid
            for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
-           for node_name in self.cfg.GetNodeGroup(group_uuid).members]
+           for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
       else:
         assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
 
@@ -1636,7 +1654,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
     env = {
       "MODE": self.op.mode,
       "NEW_SECONDARY": self.op.remote_node,
-      "OLD_SECONDARY": instance.secondary_nodes[0],
+      "OLD_SECONDARY": self.cfg.GetNodeName(instance.secondary_nodes[0]),
       }
     env.update(BuildInstanceHookEnvByObject(self, instance))
     return env
@@ -1650,8 +1668,8 @@ class LUInstanceReplaceDisks(LogicalUnit):
       self.cfg.GetMasterNode(),
       instance.primary_node,
       ]
-    if self.op.remote_node is not None:
-      nl.append(self.op.remote_node)
+    if self.op.remote_node_uuid is not None:
+      nl.append(self.op.remote_node_uuid)
     return nl, nl
 
   def CheckPrereq(self):
@@ -1749,7 +1767,7 @@ class LUInstanceDeactivateDisks(NoHooksLU):
       _SafeShutdownInstanceDisks(self, instance)
 
 
-def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
+def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
                                ldisk=False):
   """Check that mirrors are not degraded.
 
@@ -1760,18 +1778,19 @@ def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
   the device(s)) to the ldisk (representing the local storage status).
 
   """
-  lu.cfg.SetDiskID(dev, node)
+  lu.cfg.SetDiskID(dev, node_uuid)
 
   result = True
 
   if on_primary or dev.AssembleOnSecondary():
-    rstats = lu.rpc.call_blockdev_find(node, dev)
+    rstats = lu.rpc.call_blockdev_find(node_uuid, dev)
     msg = rstats.fail_msg
     if msg:
-      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
+      lu.LogWarning("Can't find disk on node %s: %s",
+                    lu.cfg.GetNodeName(node_uuid), msg)
       result = False
     elif not rstats.payload:
-      lu.LogWarning("Can't find disk on node %s", node)
+      lu.LogWarning("Can't find disk on node %s", lu.cfg.GetNodeName(node_uuid))
       result = False
     else:
       if ldisk:
@@ -1781,33 +1800,33 @@ def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
 
   if dev.children:
     for child in dev.children:
-      result = result and _CheckDiskConsistencyInner(lu, instance, child, node,
-                                                     on_primary)
+      result = result and _CheckDiskConsistencyInner(lu, instance, child,
+                                                     node_uuid, on_primary)
 
   return result
 
 
-def CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
+def CheckDiskConsistency(lu, instance, dev, node_uuid, on_primary, ldisk=False):
   """Wrapper around L{_CheckDiskConsistencyInner}.
 
   """
   (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
-  return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary,
+  return _CheckDiskConsistencyInner(lu, instance, disk, node_uuid, on_primary,
                                     ldisk=ldisk)
 
 
-def _BlockdevFind(lu, node, dev, instance):
+def _BlockdevFind(lu, node_uuid, dev, instance):
   """Wrapper around call_blockdev_find to annotate diskparams.
 
   @param lu: A reference to the lu object
-  @param node: The node to call out
+  @param node_uuid: The node to call out
   @param dev: The device to find
   @param instance: The instance object the device belongs to
   @returns The result of the rpc call
 
   """
   (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
-  return lu.rpc.call_blockdev_find(node, disk)
+  return lu.rpc.call_blockdev_find(node_uuid, disk)
 
 
 def _GenerateUniqueNames(lu, exts):
@@ -1829,7 +1848,7 @@ class TLReplaceDisks(Tasklet):
   Note: Locking is not within the scope of this class.
 
   """
-  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
+  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node_uuid,
                disks, early_release, ignore_ipolicy):
     """Initializes this class.
 
@@ -1840,26 +1859,28 @@ class TLReplaceDisks(Tasklet):
     self.instance_name = instance_name
     self.mode = mode
     self.iallocator_name = iallocator_name
-    self.remote_node = remote_node
+    self.remote_node_uuid = remote_node_uuid
     self.disks = disks
     self.early_release = early_release
     self.ignore_ipolicy = ignore_ipolicy
 
     # Runtime data
     self.instance = None
-    self.new_node = None
-    self.target_node = None
-    self.other_node = None
+    self.new_node_uuid = None
+    self.target_node_uuid = None
+    self.other_node_uuid = None
     self.remote_node_info = None
     self.node_secondary_ip = None
 
   @staticmethod
-  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
+  def _RunAllocator(lu, iallocator_name, instance_name,
+                    relocate_from_node_uuids):
     """Compute a new secondary node using an IAllocator.
 
     """
-    req = iallocator.IAReqRelocate(name=instance_name,
-                                   relocate_from=list(relocate_from))
+    req = iallocator.IAReqRelocate(
+          name=instance_name,
+          relocate_from_node_uuids=list(relocate_from_node_uuids))
     ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
 
     ial.Run(iallocator_name)
@@ -1870,18 +1891,23 @@ class TLReplaceDisks(Tasklet):
                                  errors.ECODE_NORES)
 
     remote_node_name = ial.result[0]
+    remote_node = lu.cfg.GetNodeInfoByName(remote_node_name)
+
+    if remote_node is None:
+      raise errors.OpPrereqError("Node %s not found in configuration" %
+                                 remote_node_name, errors.ECODE_NOENT)
 
     lu.LogInfo("Selected new secondary for instance '%s': %s",
                instance_name, remote_node_name)
 
-    return remote_node_name
+    return remote_node.uuid
 
-  def _FindFaultyDisks(self, node_name):
+  def _FindFaultyDisks(self, node_uuid):
     """Wrapper for L{FindFaultyInstanceDisks}.
 
     """
     return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
-                                   node_name, True)
+                                   node_uuid, True)
 
   def _CheckDisksActivated(self, instance):
     """Checks if the instance disks are activated.
@@ -1890,14 +1916,15 @@ class TLReplaceDisks(Tasklet):
     @return: True if they are activated, False otherwise
 
     """
-    nodes = instance.all_nodes
+    node_uuids = instance.all_nodes
 
     for idx, dev in enumerate(instance.disks):
-      for node in nodes:
-        self.lu.LogInfo("Checking disk/%d on %s", idx, node)
-        self.cfg.SetDiskID(dev, node)
+      for node_uuid in node_uuids:
+        self.lu.LogInfo("Checking disk/%d on %s", idx,
+                        self.cfg.GetNodeName(node_uuid))
+        self.cfg.SetDiskID(dev, node_uuid)
 
-        result = _BlockdevFind(self, node, dev, instance)
+        result = _BlockdevFind(self, node_uuid, dev, instance)
 
         if result.offline:
           continue
@@ -1927,29 +1954,30 @@ class TLReplaceDisks(Tasklet):
                                  errors.ECODE_FAULT)
 
     instance = self.instance
-    secondary_node = instance.secondary_nodes[0]
+    secondary_node_uuid = instance.secondary_nodes[0]
 
     if self.iallocator_name is None:
-      remote_node = self.remote_node
+      remote_node_uuid = self.remote_node_uuid
     else:
-      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
-                                       instance.name, instance.secondary_nodes)
+      remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
+                                            instance.name,
+                                            instance.secondary_nodes)
 
-    if remote_node is None:
+    if remote_node_uuid is None:
       self.remote_node_info = None
     else:
-      assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
-             "Remote node '%s' is not locked" % remote_node
+      assert remote_node_uuid in self.lu.owned_locks(locking.LEVEL_NODE), \
+             "Remote node '%s' is not locked" % remote_node_uuid
 
-      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
+      self.remote_node_info = self.cfg.GetNodeInfo(remote_node_uuid)
       assert self.remote_node_info is not None, \
-        "Cannot retrieve locked node %s" % remote_node
+        "Cannot retrieve locked node %s" % remote_node_uuid
 
-    if remote_node == self.instance.primary_node:
+    if remote_node_uuid == self.instance.primary_node:
       raise errors.OpPrereqError("The specified node is the primary node of"
                                  " the instance", errors.ECODE_INVAL)
 
-    if remote_node == secondary_node:
+    if remote_node_uuid == secondary_node_uuid:
       raise errors.OpPrereqError("The specified node is already the"
                                  " secondary node of the instance",
                                  errors.ECODE_INVAL)
@@ -1965,7 +1993,7 @@ class TLReplaceDisks(Tasklet):
                                    " first" % self.instance_name,
                                    errors.ECODE_STATE)
       faulty_primary = self._FindFaultyDisks(instance.primary_node)
-      faulty_secondary = self._FindFaultyDisks(secondary_node)
+      faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
 
       if faulty_primary and faulty_secondary:
         raise errors.OpPrereqError("Instance %s has faulty disks on more than"
@@ -1975,14 +2003,14 @@ class TLReplaceDisks(Tasklet):
 
       if faulty_primary:
         self.disks = faulty_primary
-        self.target_node = instance.primary_node
-        self.other_node = secondary_node
-        check_nodes = [self.target_node, self.other_node]
+        self.target_node_uuid = instance.primary_node
+        self.other_node_uuid = secondary_node_uuid
+        check_nodes = [self.target_node_uuid, self.other_node_uuid]
       elif faulty_secondary:
         self.disks = faulty_secondary
-        self.target_node = secondary_node
-        self.other_node = instance.primary_node
-        check_nodes = [self.target_node, self.other_node]
+        self.target_node_uuid = secondary_node_uuid
+        self.other_node_uuid = instance.primary_node
+        check_nodes = [self.target_node_uuid, self.other_node_uuid]
       else:
         self.disks = []
         check_nodes = []
@@ -1990,31 +2018,31 @@ class TLReplaceDisks(Tasklet):
     else:
       # Non-automatic modes
       if self.mode == constants.REPLACE_DISK_PRI:
-        self.target_node = instance.primary_node
-        self.other_node = secondary_node
-        check_nodes = [self.target_node, self.other_node]
+        self.target_node_uuid = instance.primary_node
+        self.other_node_uuid = secondary_node_uuid
+        check_nodes = [self.target_node_uuid, self.other_node_uuid]
 
       elif self.mode == constants.REPLACE_DISK_SEC:
-        self.target_node = secondary_node
-        self.other_node = instance.primary_node
-        check_nodes = [self.target_node, self.other_node]
+        self.target_node_uuid = secondary_node_uuid
+        self.other_node_uuid = instance.primary_node
+        check_nodes = [self.target_node_uuid, self.other_node_uuid]
 
       elif self.mode == constants.REPLACE_DISK_CHG:
-        self.new_node = remote_node
-        self.other_node = instance.primary_node
-        self.target_node = secondary_node
-        check_nodes = [self.new_node, self.other_node]
+        self.new_node_uuid = remote_node_uuid
+        self.other_node_uuid = instance.primary_node
+        self.target_node_uuid = secondary_node_uuid
+        check_nodes = [self.new_node_uuid, self.other_node_uuid]
 
-        CheckNodeNotDrained(self.lu, remote_node)
-        CheckNodeVmCapable(self.lu, remote_node)
+        CheckNodeNotDrained(self.lu, remote_node_uuid)
+        CheckNodeVmCapable(self.lu, remote_node_uuid)
 
-        old_node_info = self.cfg.GetNodeInfo(secondary_node)
+        old_node_info = self.cfg.GetNodeInfo(secondary_node_uuid)
         assert old_node_info is not None
         if old_node_info.offline and not self.early_release:
           # doesn't make sense to delay the release
           self.early_release = True
           self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
-                          " early-release mode", secondary_node)
+                          " early-release mode", secondary_node_uuid)
 
       else:
         raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
@@ -2035,13 +2063,13 @@ class TLReplaceDisks(Tasklet):
       CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
                              self.cfg, ignore=self.ignore_ipolicy)
 
-    for node in check_nodes:
-      CheckNodeOnline(self.lu, node)
+    for node_uuid in check_nodes:
+      CheckNodeOnline(self.lu, node_uuid)
 
-    touched_nodes = frozenset(node_name for node_name in [self.new_node,
-                                                          self.other_node,
-                                                          self.target_node]
-                              if node_name is not None)
+    touched_nodes = frozenset(node_uuid for node_uuid in [self.new_node_uuid,
+                                                          self.other_node_uuid,
+                                                          self.target_node_uuid]
+                              if node_uuid is not None)
 
     # Release unneeded node and node resource locks
     ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
@@ -2056,7 +2084,7 @@ class TLReplaceDisks(Tasklet):
       instance.FindDisk(disk_idx)
 
     # Get secondary node IP addresses
-    self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
+    self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)
                                   in self.cfg.GetMultiNodeInfo(touched_nodes))
 
   def Exec(self, feedback_fn):
@@ -2089,9 +2117,11 @@ class TLReplaceDisks(Tasklet):
 
     feedback_fn("Replacing disk(s) %s for instance '%s'" %
                 (utils.CommaJoin(self.disks), self.instance.name))
-    feedback_fn("Current primary node: %s" % self.instance.primary_node)
+    feedback_fn("Current primary node: %s" %
+                self.cfg.GetNodeName(self.instance.primary_node))
     feedback_fn("Current seconary node: %s" %
-                utils.CommaJoin(self.instance.secondary_nodes))
+                utils.CommaJoin(self.cfg.GetNodeNames(
+                                  self.instance.secondary_nodes)))
 
     activate_disks = not self.instance.disks_active
 
@@ -2101,7 +2131,7 @@ class TLReplaceDisks(Tasklet):
 
     try:
       # Should we replace the secondary node?
-      if self.new_node is not None:
+      if self.new_node_uuid is not None:
         fn = self._ExecDrbd8Secondary
       else:
         fn = self._ExecDrbd8DiskOnly
@@ -2126,57 +2156,59 @@ class TLReplaceDisks(Tasklet):
 
     return result
 
-  def _CheckVolumeGroup(self, nodes):
+  def _CheckVolumeGroup(self, node_uuids):
     self.lu.LogInfo("Checking volume groups")
 
     vgname = self.cfg.GetVGName()
 
     # Make sure volume group exists on all involved nodes
-    results = self.rpc.call_vg_list(nodes)
+    results = self.rpc.call_vg_list(node_uuids)
     if not results:
       raise errors.OpExecError("Can't list volume groups on the nodes")
 
-    for node in nodes:
-      res = results[node]
-      res.Raise("Error checking node %s" % node)
+    for node_uuid in node_uuids:
+      res = results[node_uuid]
+      res.Raise("Error checking node %s" % self.cfg.GetNodeName(node_uuid))
       if vgname not in res.payload:
         raise errors.OpExecError("Volume group '%s' not found on node %s" %
-                                 (vgname, node))
+                                 (vgname, self.cfg.GetNodeName(node_uuid)))
 
-  def _CheckDisksExistence(self, nodes):
+  def _CheckDisksExistence(self, node_uuids):
     # Check disk existence
     for idx, dev in enumerate(self.instance.disks):
       if idx not in self.disks:
         continue
 
-      for node in nodes:
-        self.lu.LogInfo("Checking disk/%d on %s", idx, node)
-        self.cfg.SetDiskID(dev, node)
+      for node_uuid in node_uuids:
+        self.lu.LogInfo("Checking disk/%d on %s", idx,
+                        self.cfg.GetNodeName(node_uuid))
+        self.cfg.SetDiskID(dev, node_uuid)
 
-        result = _BlockdevFind(self, node, dev, self.instance)
+        result = _BlockdevFind(self, node_uuid, dev, self.instance)
 
         msg = result.fail_msg
         if msg or not result.payload:
           if not msg:
             msg = "disk not found"
           raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
-                                   (idx, node, msg))
+                                   (idx, self.cfg.GetNodeName(node_uuid), msg))
 
-  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
+  def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
     for idx, dev in enumerate(self.instance.disks):
       if idx not in self.disks:
         continue
 
       self.lu.LogInfo("Checking disk/%d consistency on node %s" %
-                      (idx, node_name))
+                      (idx, self.cfg.GetNodeName(node_uuid)))
 
-      if not CheckDiskConsistency(self.lu, self.instance, dev, node_name,
+      if not CheckDiskConsistency(self.lu, self.instance, dev, node_uuid,
                                   on_primary, ldisk=ldisk):
         raise errors.OpExecError("Node %s has degraded storage, unsafe to"
                                  " replace disks for instance %s" %
-                                 (node_name, self.instance.name))
+                                 (self.cfg.GetNodeName(node_uuid),
+                                  self.instance.name))
 
-  def _CreateNewStorage(self, node_name):
+  def _CreateNewStorage(self, node_uuid):
     """Create new storage on the primary or secondary node.
 
     This is only used for same-node replaces, not for changing the
@@ -2190,9 +2222,10 @@ class TLReplaceDisks(Tasklet):
       if idx not in self.disks:
         continue
 
-      self.lu.LogInfo("Adding storage on %s for disk/%d", node_name, idx)
+      self.lu.LogInfo("Adding storage on %s for disk/%d",
+                      self.cfg.GetNodeName(node_uuid), idx)
 
-      self.cfg.SetDiskID(dev, node_name)
+      self.cfg.SetDiskID(dev, node_uuid)
 
       lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
       names = _GenerateUniqueNames(self.lu, lv_names)
@@ -2211,21 +2244,21 @@ class TLReplaceDisks(Tasklet):
       new_lvs = [lv_data, lv_meta]
       old_lvs = [child.Copy() for child in dev.children]
       iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
-      excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
+      excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, node_uuid)
 
       # we pass force_create=True to force the LVM creation
       for new_lv in new_lvs:
-        _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
+        _CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
                              GetInstanceInfoText(self.instance), False,
                              excl_stor)
 
     return iv_names
 
-  def _CheckDevices(self, node_name, iv_names):
+  def _CheckDevices(self, node_uuid, iv_names):
     for name, (dev, _, _) in iv_names.iteritems():
-      self.cfg.SetDiskID(dev, node_name)
+      self.cfg.SetDiskID(dev, node_uuid)
 
-      result = _BlockdevFind(self, node_name, dev, self.instance)
+      result = _BlockdevFind(self, node_uuid, dev, self.instance)
 
       msg = result.fail_msg
       if msg or not result.payload:
@@ -2237,14 +2270,14 @@ class TLReplaceDisks(Tasklet):
       if result.payload.is_degraded:
         raise errors.OpExecError("DRBD device %s is degraded!" % name)
 
-  def _RemoveOldStorage(self, node_name, iv_names):
+  def _RemoveOldStorage(self, node_uuid, iv_names):
     for name, (_, old_lvs, _) in iv_names.iteritems():
       self.lu.LogInfo("Remove logical volumes for %s", name)
 
       for lv in old_lvs:
-        self.cfg.SetDiskID(lv, node_name)
+        self.cfg.SetDiskID(lv, node_uuid)
 
-        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
+        msg = self.rpc.call_blockdev_remove(node_uuid, lv).fail_msg
         if msg:
           self.lu.LogWarning("Can't remove old LV: %s", msg,
                              hint="remove unused LVs manually")
@@ -2275,28 +2308,29 @@ class TLReplaceDisks(Tasklet):
 
     # Step: check device activation
     self.lu.LogStep(1, steps_total, "Check device existence")
-    self._CheckDisksExistence([self.other_node, self.target_node])
-    self._CheckVolumeGroup([self.target_node, self.other_node])
+    self._CheckDisksExistence([self.other_node_uuid, self.target_node_uuid])
+    self._CheckVolumeGroup([self.target_node_uuid, self.other_node_uuid])
 
     # Step: check other node consistency
     self.lu.LogStep(2, steps_total, "Check peer consistency")
-    self._CheckDisksConsistency(self.other_node,
-                                self.other_node == self.instance.primary_node,
-                                False)
+    self._CheckDisksConsistency(
+      self.other_node_uuid, self.other_node_uuid == self.instance.primary_node,
+      False)
 
     # Step: create new storage
     self.lu.LogStep(3, steps_total, "Allocate new storage")
-    iv_names = self._CreateNewStorage(self.target_node)
+    iv_names = self._CreateNewStorage(self.target_node_uuid)
 
     # Step: for each lv, detach+rename*2+attach
     self.lu.LogStep(4, steps_total, "Changing drbd configuration")
     for dev, old_lvs, new_lvs in iv_names.itervalues():
       self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
 
-      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
+      result = self.rpc.call_blockdev_removechildren(self.target_node_uuid, dev,
                                                      old_lvs)
       result.Raise("Can't detach drbd from local storage on node"
-                   " %s for device %s" % (self.target_node, dev.iv_name))
+                   " %s for device %s" %
+                   (self.cfg.GetNodeName(self.target_node_uuid), dev.iv_name))
       #dev.children = []
       #cfg.Update(instance)
 
@@ -2314,44 +2348,47 @@ class TLReplaceDisks(Tasklet):
       # Build the rename list based on what LVs exist on the node
       rename_old_to_new = []
       for to_ren in old_lvs:
-        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
+        result = self.rpc.call_blockdev_find(self.target_node_uuid, to_ren)
         if not result.fail_msg and result.payload:
           # device exists
           rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
 
       self.lu.LogInfo("Renaming the old LVs on the target node")
-      result = self.rpc.call_blockdev_rename(self.target_node,
+      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
                                              rename_old_to_new)
-      result.Raise("Can't rename old LVs on node %s" % self.target_node)
+      result.Raise("Can't rename old LVs on node %s" %
+                   self.cfg.GetNodeName(self.target_node_uuid))
 
       # Now we rename the new LVs to the old LVs
       self.lu.LogInfo("Renaming the new LVs on the target node")
       rename_new_to_old = [(new, old.physical_id)
                            for old, new in zip(old_lvs, new_lvs)]
-      result = self.rpc.call_blockdev_rename(self.target_node,
+      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
                                              rename_new_to_old)
-      result.Raise("Can't rename new LVs on node %s" % self.target_node)
+      result.Raise("Can't rename new LVs on node %s" %
+                   self.cfg.GetNodeName(self.target_node_uuid))
 
       # Intermediate steps of in memory modifications
       for old, new in zip(old_lvs, new_lvs):
         new.logical_id = old.logical_id
-        self.cfg.SetDiskID(new, self.target_node)
+        self.cfg.SetDiskID(new, self.target_node_uuid)
 
       # We need to modify old_lvs so that removal later removes the
       # right LVs, not the newly added ones; note that old_lvs is a
       # copy here
       for disk in old_lvs:
         disk.logical_id = ren_fn(disk, temp_suffix)
-        self.cfg.SetDiskID(disk, self.target_node)
+        self.cfg.SetDiskID(disk, self.target_node_uuid)
 
       # Now that the new lvs have the old name, we can add them to the device
-      self.lu.LogInfo("Adding new mirror component on %s", self.target_node)
-      result = self.rpc.call_blockdev_addchildren(self.target_node,
+      self.lu.LogInfo("Adding new mirror component on %s",
+                      self.cfg.GetNodeName(self.target_node_uuid))
+      result = self.rpc.call_blockdev_addchildren(self.target_node_uuid,
                                                   (dev, self.instance), new_lvs)
       msg = result.fail_msg
       if msg:
         for new_lv in new_lvs:
-          msg2 = self.rpc.call_blockdev_remove(self.target_node,
+          msg2 = self.rpc.call_blockdev_remove(self.target_node_uuid,
                                                new_lv).fail_msg
           if msg2:
             self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
@@ -2363,7 +2400,7 @@ class TLReplaceDisks(Tasklet):
 
     if self.early_release:
       self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
-      self._RemoveOldStorage(self.target_node, iv_names)
+      self._RemoveOldStorage(self.target_node_uuid, iv_names)
       # TODO: Check if releasing locks early still makes sense
       ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
     else:
@@ -2389,7 +2426,7 @@ class TLReplaceDisks(Tasklet):
     # Step: remove old storage
     if not self.early_release:
       self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
-      self._RemoveOldStorage(self.target_node, iv_names)
+      self._RemoveOldStorage(self.target_node_uuid, iv_names)
 
   def _ExecDrbd8Secondary(self, feedback_fn):
     """Replace the secondary node for DRBD 8.
@@ -2426,13 +2463,14 @@ class TLReplaceDisks(Tasklet):
     # Step: create new storage
     self.lu.LogStep(3, steps_total, "Allocate new storage")
     disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
-    excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node)
+    excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg,
+                                                  self.new_node_uuid)
     for idx, dev in enumerate(disks):
       self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
-                      (self.new_node, idx))
+                      (self.cfg.GetNodeName(self.new_node_uuid), idx))
       # we pass force_create=True to force LVM creation
       for new_lv in dev.children:
-        _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
+        _CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance, new_lv,
                              True, GetInstanceInfoText(self.instance), False,
                              excl_stor)
 
@@ -2440,15 +2478,15 @@ class TLReplaceDisks(Tasklet):
     # after this, we must manually remove the drbd minors on both the
     # error and the success paths
     self.lu.LogStep(4, steps_total, "Changing drbd configuration")
-    minors = self.cfg.AllocateDRBDMinor([self.new_node
-                                         for dev in self.instance.disks],
+    minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
+                                         for _ in self.instance.disks],
                                         self.instance.name)
     logging.debug("Allocated minors %r", minors)
 
     iv_names = {}
     for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
       self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
-                      (self.new_node, idx))
+                      (self.cfg.GetNodeName(self.new_node_uuid), idx))
       # create new devices on new_node; note that we create two IDs:
       # one without port, so the drbd will be activated without
       # networking information on the new node at this stage, and one
@@ -2460,9 +2498,9 @@ class TLReplaceDisks(Tasklet):
         assert self.instance.primary_node == o_node2, "Three-node instance?"
         p_minor = o_minor2
 
-      new_alone_id = (self.instance.primary_node, self.new_node, None,
+      new_alone_id = (self.instance.primary_node, self.new_node_uuid, None,
                       p_minor, new_minor, o_secret)
-      new_net_id = (self.instance.primary_node, self.new_node, o_port,
+      new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
                     p_minor, new_minor, o_secret)
 
       iv_names[idx] = (dev, dev.children, new_net_id)
@@ -2476,7 +2514,7 @@ class TLReplaceDisks(Tasklet):
       (anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
                                             self.cfg)
       try:
-        CreateSingleBlockDev(self.lu, self.new_node, self.instance,
+        CreateSingleBlockDev(self.lu, self.new_node_uuid, self.instance,
                              anno_new_drbd,
                              GetInstanceInfoText(self.instance), False,
                              excl_stor)
@@ -2487,8 +2525,8 @@ class TLReplaceDisks(Tasklet):
     # We have new devices, shutdown the drbd on the old secondary
     for idx, dev in enumerate(self.instance.disks):
       self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
-      self.cfg.SetDiskID(dev, self.target_node)
-      msg = self.rpc.call_blockdev_shutdown(self.target_node,
+      self.cfg.SetDiskID(dev, self.target_node_uuid)
+      msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
                                             (dev, self.instance)).fail_msg
       if msg:
         self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
@@ -2523,7 +2561,7 @@ class TLReplaceDisks(Tasklet):
     self.lu.LogInfo("Attaching primary drbds to new secondary"
                     " (standalone => connected)")
     result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
-                                            self.new_node],
+                                            self.new_node_uuid],
                                            self.node_secondary_ip,
                                            (self.instance.disks, self.instance),
                                            self.instance.name,
@@ -2532,7 +2570,7 @@ class TLReplaceDisks(Tasklet):
       msg = to_result.fail_msg
       if msg:
         self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
-                           to_node, msg,
+                           self.cfg.GetNodeName(to_node), msg,
                            hint=("please do a gnt-instance info to see the"
                                  " status of disks"))
 
@@ -2540,7 +2578,7 @@ class TLReplaceDisks(Tasklet):
 
     if self.early_release:
       self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
-      self._RemoveOldStorage(self.target_node, iv_names)
+      self._RemoveOldStorage(self.target_node_uuid, iv_names)
       # TODO: Check if releasing locks early still makes sense
       ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
     else:
@@ -2563,4 +2601,4 @@ class TLReplaceDisks(Tasklet):
     # Step: remove old storage
     if not self.early_release:
       self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
-      self._RemoveOldStorage(self.target_node, iv_names)
+      self._RemoveOldStorage(self.target_node_uuid, iv_names)
index c1a1f25..eefb14f 100644 (file)
@@ -35,19 +35,19 @@ from ganeti.cmdlib.common import AnnotateDiskParams, \
   ComputeIPolicyInstanceViolation
 
 
-def BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
-                         minmem, maxmem, vcpus, nics, disk_template, disks,
-                         bep, hvp, hypervisor_name, tags):
+def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type,
+                         status, minmem, maxmem, vcpus, nics, disk_template,
+                         disks, bep, hvp, hypervisor_name, tags):
   """Builds instance related env variables for hooks
 
   This builds the hook environment from individual variables.
 
   @type name: string
   @param name: the name of the instance
-  @type primary_node: string
-  @param primary_node: the name of the instance's primary node
-  @type secondary_nodes: list
-  @param secondary_nodes: list of secondary nodes as strings
+  @type primary_node_name: string
+  @param primary_node_name: the name of the instance's primary node
+  @type secondary_node_names: list
+  @param secondary_node_names: list of secondary nodes as strings
   @type os_type: string
   @param os_type: the name of the instance's OS
   @type status: string
@@ -80,8 +80,8 @@ def BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   env = {
     "OP_TARGET": name,
     "INSTANCE_NAME": name,
-    "INSTANCE_PRIMARY": primary_node,
-    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
+    "INSTANCE_PRIMARY": primary_node_name,
+    "INSTANCE_SECONDARIES": " ".join(secondary_node_names),
     "INSTANCE_OS_TYPE": os_type,
     "INSTANCE_STATUS": status,
     "INSTANCE_MINMEM": minmem,
@@ -160,8 +160,8 @@ def BuildInstanceHookEnvByObject(lu, instance, override=None):
   hvp = cluster.FillHV(instance)
   args = {
     "name": instance.name,
-    "primary_node": instance.primary_node,
-    "secondary_nodes": instance.secondary_nodes,
+    "primary_node_name": lu.cfg.GetNodeName(instance.primary_node),
+    "secondary_node_names": lu.cfg.GetNodeNames(instance.secondary_nodes),
     "os_type": instance.os,
     "status": instance.admin_state,
     "maxmem": bep[constants.BE_MAXMEM],
@@ -189,29 +189,29 @@ def GetClusterDomainSecret():
                                strict=True)
 
 
-def CheckNodeNotDrained(lu, node):
+def CheckNodeNotDrained(lu, node_uuid):
   """Ensure that a given node is not drained.
 
   @param lu: the LU on behalf of which we make the check
-  @param node: the node to check
+  @param node_uuid: the node to check
   @raise errors.OpPrereqError: if the node is drained
 
   """
-  if lu.cfg.GetNodeInfo(node).drained:
-    raise errors.OpPrereqError("Can't use drained node %s" % node,
+  if lu.cfg.GetNodeInfo(node_uuid).drained:
+    raise errors.OpPrereqError("Can't use drained node %s" % node_uuid,
                                errors.ECODE_STATE)
 
 
-def CheckNodeVmCapable(lu, node):
+def CheckNodeVmCapable(lu, node_uuid):
   """Ensure that a given node is vm capable.
 
   @param lu: the LU on behalf of which we make the check
-  @param node: the node to check
+  @param node_uuid: the node to check
   @raise errors.OpPrereqError: if the node is not vm capable
 
   """
-  if not lu.cfg.GetNodeInfo(node).vm_capable:
-    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
+  if not lu.cfg.GetNodeInfo(node_uuid).vm_capable:
+    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid,
                                errors.ECODE_STATE)
 
 
@@ -237,7 +237,7 @@ def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
   lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
 
 
-def RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
+def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
   """Remove all disks for an instance.
 
   This abstracts away some work from `AddInstance()` and
@@ -248,8 +248,9 @@ def RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
   @param lu: the logical unit on whose behalf we execute
   @type instance: L{objects.Instance}
   @param instance: the instance whose disks we should remove
-  @type target_node: string
-  @param target_node: used to override the node on which to remove the disks
+  @type target_node_uuid: string
+  @param target_node_uuid: used to override the node on which to remove the
+          disks
   @rtype: boolean
   @return: the success of the removal
 
@@ -260,17 +261,18 @@ def RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
   ports_to_release = set()
   anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
   for (idx, device) in enumerate(anno_disks):
-    if target_node:
-      edata = [(target_node, device)]
+    if target_node_uuid:
+      edata = [(target_node_uuid, device)]
     else:
       edata = device.ComputeNodeTree(instance.primary_node)
-    for node, disk in edata:
-      lu.cfg.SetDiskID(disk, node)
-      result = lu.rpc.call_blockdev_remove(node, disk)
+    for node_uuid, disk in edata:
+      lu.cfg.SetDiskID(disk, node_uuid)
+      result = lu.rpc.call_blockdev_remove(node_uuid, disk)
       if result.fail_msg:
         lu.LogWarning("Could not remove disk %s on node %s,"
-                      " continuing anyway: %s", idx, node, result.fail_msg)
-        if not (result.offline and node != instance.primary_node):
+                      " continuing anyway: %s", idx,
+                      lu.cfg.GetNodeName(node_uuid), result.fail_msg)
+        if not (result.offline and node_uuid != instance.primary_node):
           all_result = False
 
     # if this is a DRBD disk, return its port to the pool
@@ -283,14 +285,14 @@ def RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
 
   if instance.disk_template in constants.DTS_FILEBASED:
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
-    if target_node:
-      tgt = target_node
+    if target_node_uuid:
+      tgt = target_node_uuid
     else:
       tgt = instance.primary_node
     result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
     if result.fail_msg:
       lu.LogWarning("Could not remove directory '%s' on node %s: %s",
-                    file_storage_dir, instance.primary_node, result.fail_msg)
+                    file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg)
       all_result = False
 
   return all_result
@@ -450,7 +452,7 @@ def GetInstanceInfoText(instance):
   return "originstname+%s" % instance.name
 
 
-def CheckNodeFreeMemory(lu, node, reason, requested, hvname, hvparams):
+def CheckNodeFreeMemory(lu, node_uuid, reason, requested, hvname, hvparams):
   """Checks if a node has enough free memory.
 
   This function checks if a given node has the needed amount of free
@@ -460,8 +462,8 @@ def CheckNodeFreeMemory(lu, node, reason, requested, hvname, hvparams):
 
   @type lu: C{LogicalUnit}
   @param lu: a logical unit from which we get configuration data
-  @type node: C{str}
-  @param node: the node to check
+  @type node_uuid: C{str}
+  @param node_uuid: the node to check
   @type reason: C{str}
   @param reason: string to use in the error message
   @type requested: C{int}
@@ -476,60 +478,63 @@ def CheckNodeFreeMemory(lu, node, reason, requested, hvname, hvparams):
       we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info([node], None, [(hvname, hvparams)], False)
-  nodeinfo[node].Raise("Can't get data from node %s" % node,
-                       prereq=True, ecode=errors.ECODE_ENVIRON)
-  (_, _, (hv_info, )) = nodeinfo[node].payload
+  node_name = lu.cfg.GetNodeName(node_uuid)
+  nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)],
+                                   False)
+  nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name,
+                            prereq=True, ecode=errors.ECODE_ENVIRON)
+  (_, _, (hv_info, )) = nodeinfo[node_uuid].payload
 
   free_mem = hv_info.get("memory_free", None)
   if not isinstance(free_mem, int):
     raise errors.OpPrereqError("Can't compute free memory on node %s, result"
-                               " was '%s'" % (node, free_mem),
+                               " was '%s'" % (node_name, free_mem),
                                errors.ECODE_ENVIRON)
   if requested > free_mem:
     raise errors.OpPrereqError("Not enough memory on node %s for %s:"
                                " needed %s MiB, available %s MiB" %
-                               (node, reason, requested, free_mem),
+                               (node_name, reason, requested, free_mem),
                                errors.ECODE_NORES)
   return free_mem
 
 
-def CheckInstanceBridgesExist(lu, instance, node=None):
+def CheckInstanceBridgesExist(lu, instance, node_uuid=None):
   """Check that the brigdes needed by an instance exist.
 
   """
-  if node is None:
-    node = instance.primary_node
-  CheckNicsBridgesExist(lu, instance.nics, node)
+  if node_uuid is None:
+    node_uuid = instance.primary_node
+  CheckNicsBridgesExist(lu, instance.nics, node_uuid)
 
 
-def CheckNicsBridgesExist(lu, target_nics, target_node):
+def CheckNicsBridgesExist(lu, nics, node_uuid):
   """Check that the brigdes needed by a list of nics exist.
 
   """
   cluster = lu.cfg.GetClusterInfo()
-  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
+  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in nics]
   brlist = [params[constants.NIC_LINK] for params in paramslist
             if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
   if brlist:
-    result = lu.rpc.call_bridges_exist(target_node, brlist)
+    result = lu.rpc.call_bridges_exist(node_uuid, brlist)
     result.Raise("Error checking bridges on destination node '%s'" %
-                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
+                 lu.cfg.GetNodeName(node_uuid), prereq=True,
+                 ecode=errors.ECODE_ENVIRON)
 
 
-def CheckNodeHasOS(lu, node, os_name, force_variant):
+def CheckNodeHasOS(lu, node_uuid, os_name, force_variant):
   """Ensure that a node supports a given OS.
 
   @param lu: the LU on behalf of which we make the check
-  @param node: the node to check
+  @param node_uuid: the node to check
   @param os_name: the OS to query about
   @param force_variant: whether to ignore variant errors
   @raise errors.OpPrereqError: if the node is not supporting the OS
 
   """
-  result = lu.rpc.call_os_get(node, os_name)
+  result = lu.rpc.call_os_get(node_uuid, os_name)
   result.Raise("OS '%s' not in supported OS list for node %s" %
-               (os_name, node),
+               (os_name, lu.cfg.GetNodeName(node_uuid)),
                prereq=True, ecode=errors.ECODE_INVAL)
   if not force_variant:
     _CheckOSVariant(result.payload, os_name)
index 893b3f2..9770f1f 100644 (file)
@@ -47,13 +47,14 @@ class LUOobCommand(NoHooksLU):
 
     """
     if self.op.node_names:
-      self.op.node_names = GetWantedNodes(self, self.op.node_names)
-      lock_names = self.op.node_names
+      (self.op.node_uuids, self.op.node_names) = \
+        GetWantedNodes(self, self.op.node_names)
+      lock_node_uuids = self.op.node_uuids
     else:
-      lock_names = locking.ALL_SET
+      lock_node_uuids = locking.ALL_SET
 
     self.needed_locks = {
-      locking.LEVEL_NODE: lock_names,
+      locking.LEVEL_NODE: lock_node_uuids,
       }
 
     self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
@@ -73,54 +74,54 @@ class LUOobCommand(NoHooksLU):
 
     """
     self.nodes = []
-    self.master_node = self.cfg.GetMasterNode()
+    self.master_node_uuid = self.cfg.GetMasterNode()
+    master_node_obj = self.cfg.GetNodeInfo(self.master_node_uuid)
 
     assert self.op.power_delay >= 0.0
 
-    if self.op.node_names:
+    if self.op.node_uuids:
       if (self.op.command in self._SKIP_MASTER and
-          self.master_node in self.op.node_names):
-        master_node_obj = self.cfg.GetNodeInfo(self.master_node)
+          master_node_obj.uuid in self.op.node_uuids):
         master_oob_handler = SupportsOob(self.cfg, master_node_obj)
 
         if master_oob_handler:
           additional_text = ("run '%s %s %s' if you want to operate on the"
                              " master regardless") % (master_oob_handler,
                                                       self.op.command,
-                                                      self.master_node)
+                                                      master_node_obj.name)
         else:
           additional_text = "it does not support out-of-band operations"
 
         raise errors.OpPrereqError(("Operating on the master node %s is not"
                                     " allowed for %s; %s") %
-                                   (self.master_node, self.op.command,
+                                   (master_node_obj.name, self.op.command,
                                     additional_text), errors.ECODE_INVAL)
     else:
-      self.op.node_names = self.cfg.GetNodeList()
+      self.op.node_uuids = self.cfg.GetNodeList()
       if self.op.command in self._SKIP_MASTER:
-        self.op.node_names.remove(self.master_node)
+        self.op.node_uuids.remove(master_node_obj.uuid)
 
     if self.op.command in self._SKIP_MASTER:
-      assert self.master_node not in self.op.node_names
+      assert master_node_obj.uuid not in self.op.node_uuids
 
-    for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
+    for node_uuid in self.op.node_uuids:
+      node = self.cfg.GetNodeInfo(node_uuid)
       if node is None:
-        raise errors.OpPrereqError("Node %s not found" % node_name,
+        raise errors.OpPrereqError("Node %s not found" % node_uuid,
                                    errors.ECODE_NOENT)
-      else:
-        self.nodes.append(node)
+
+      self.nodes.append(node)
 
       if (not self.op.ignore_status and
           (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
         raise errors.OpPrereqError(("Cannot power off node %s because it is"
-                                    " not marked offline") % node_name,
+                                    " not marked offline") % node.name,
                                    errors.ECODE_STATE)
 
   def Exec(self, feedback_fn):
     """Execute OOB and return result if we expect any.
 
     """
-    master_node = self.master_node
     ret = []
 
     for idx, node in enumerate(utils.NiceSort(self.nodes,
@@ -136,7 +137,7 @@ class LUOobCommand(NoHooksLU):
 
       logging.info("Executing out-of-band command '%s' using '%s' on %s",
                    self.op.command, oob_program, node.name)
-      result = self.rpc.call_run_oob(master_node, oob_program,
+      result = self.rpc.call_run_oob(self.master_node_uuid, oob_program,
                                      self.op.command, node.name,
                                      self.op.timeout)
 
@@ -234,7 +235,7 @@ class ExtStorageQuery(QueryBase):
 
     # The following variables interact with _QueryBase._GetNames
     if self.names:
-      self.wanted = self.names
+      self.wanted = [lu.cfg.GetNodeInfoByName(name).uuid for name in self.names]
     else:
       self.wanted = locking.ALL_SET
 
@@ -247,16 +248,16 @@ class ExtStorageQuery(QueryBase):
   def _DiagnoseByProvider(rlist):
     """Remaps a per-node return list into an a per-provider per-node dictionary
 
-    @param rlist: a map with node names as keys and ExtStorage objects as values
+    @param rlist: a map with node uuids as keys and ExtStorage objects as values
 
     @rtype: dict
     @return: a dictionary with extstorage providers as keys and as
-        value another map, with nodes as keys and tuples of
+        value another map, with node uuids as keys and tuples of
         (path, status, diagnose, parameters) as values, eg::
 
-          {"provider1": {"node1": [(/usr/lib/..., True, "", [])]
-                         "node2": [(/srv/..., False, "missing file")]
-                         "node3": [(/srv/..., True, "", [])]
+          {"provider1": {"node_uuid1": [(/usr/lib/..., True, "", [])]
+                         "node_uuid2": [(/srv/..., False, "missing file")]
+                         "node_uuid3": [(/srv/..., True, "", [])]
           }
 
     """
@@ -264,9 +265,9 @@ class ExtStorageQuery(QueryBase):
     # we build here the list of nodes that didn't fail the RPC (at RPC
     # level), so that nodes with a non-responding node daemon don't
     # make all OSes invalid
-    good_nodes = [node_name for node_name in rlist
-                  if not rlist[node_name].fail_msg]
-    for node_name, nr in rlist.items():
+    good_nodes = [node_uuid for node_uuid in rlist
+                  if not rlist[node_uuid].fail_msg]
+    for node_uuid, nr in rlist.items():
       if nr.fail_msg or not nr.payload:
         continue
       for (name, path, status, diagnose, params) in nr.payload:
@@ -274,11 +275,11 @@ class ExtStorageQuery(QueryBase):
           # build a list of nodes for this os containing empty lists
           # for each node in node_list
           all_es[name] = {}
-          for nname in good_nodes:
-            all_es[name][nname] = []
+          for nuuid in good_nodes:
+            all_es[name][nuuid] = []
         # convert params from [name, help] to (name, help)
         params = [tuple(v) for v in params]
-        all_es[name][node_name].append((path, status, diagnose, params))
+        all_es[name][node_uuid].append((path, status, diagnose, params))
     return all_es
 
   def _GetQueryData(self, lu):
@@ -291,7 +292,7 @@ class ExtStorageQuery(QueryBase):
                            if level != locking.LEVEL_CLUSTER) or
                 self.do_locking or self.use_locking)
 
-    valid_nodes = [node.name
+    valid_nodes = [node.uuid
                    for node in lu.cfg.GetAllNodesInfo().values()
                    if not node.offline and node.vm_capable]
     pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
@@ -382,10 +383,10 @@ class LURestrictedCommand(NoHooksLU):
 
   def ExpandNames(self):
     if self.op.nodes:
-      self.op.nodes = GetWantedNodes(self, self.op.nodes)
+      (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
 
     self.needed_locks = {
-      locking.LEVEL_NODE: self.op.nodes,
+      locking.LEVEL_NODE: self.op.node_uuids,
       }
     self.share_locks = {
       locking.LEVEL_NODE: not self.op.use_locking,
@@ -403,17 +404,19 @@ class LURestrictedCommand(NoHooksLU):
     owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
 
     # Check if correct locks are held
-    assert set(self.op.nodes).issubset(owned_nodes)
+    assert set(self.op.node_uuids).issubset(owned_nodes)
 
-    rpcres = self.rpc.call_restricted_command(self.op.nodes, self.op.command)
+    rpcres = self.rpc.call_restricted_command(self.op.node_uuids,
+                                              self.op.command)
 
     result = []
 
-    for node_name in self.op.nodes:
-      nres = rpcres[node_name]
+    for node_uuid in self.op.node_uuids:
+      nres = rpcres[node_uuid]
       if nres.fail_msg:
         msg = ("Command '%s' on node '%s' failed: %s" %
-               (self.op.command, node_name, nres.fail_msg))
+               (self.op.command, self.cfg.GetNodeName(node_uuid),
+                nres.fail_msg))
         result.append((False, msg))
       else:
         result.append((True, nres.payload))
index 2f2dcd7..9befa61 100644 (file)
@@ -608,8 +608,8 @@ class LUNetworkConnect(LogicalUnit):
     return ret
 
   def BuildHooksNodes(self):
-    nodes = self.cfg.GetNodeGroup(self.group_uuid).members
-    return (nodes, nodes)
+    node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members
+    return (node_uuids, node_uuids)
 
   def CheckPrereq(self):
     owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
index ac5cc3b..d766aa3 100644 (file)
@@ -41,7 +41,7 @@ from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
 from ganeti.cmdlib.common import CheckParamsNotGlobal, \
   MergeAndVerifyHvState, MergeAndVerifyDiskState, \
   IsExclusiveStorageEnabledNode, CheckNodePVs, \
-  RedistributeAncillaryFiles, ExpandNodeName, ShareAll, SupportsOob, \
+  RedistributeAncillaryFiles, ExpandNodeUuidAndName, ShareAll, SupportsOob, \
   CheckInstanceState, INSTANCE_DOWN, GetUpdatedParams, \
   AdjustCandidatePool, CheckIAllocatorOrNode, LoadNodeEvacResult, \
   GetWantedNodes, MapInstanceDisksToNodes, RunPostHook, \
@@ -64,18 +64,21 @@ def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
 
   @type lu: L{LogicalUnit}
   @param lu: the LU on behalf of which we make the check
-  @type node: string
+  @type node: L{objects.Node}
   @param node: the node to check
   @type secondary_ip: string
   @param secondary_ip: the ip to check
   @type prereq: boolean
   @param prereq: whether to throw a prerequisite or an execute error
-  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
+  @raise errors.OpPrereqError: if the node doesn't have the ip,
+  and prereq=True
   @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
 
   """
-  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
-  result.Raise("Failure checking secondary ip on node %s" % node,
+  # this can be called with a new node, which has no UUID yet, so perform the
+  # RPC call using its name
+  result = lu.rpc.call_node_has_ip_address(node.name, secondary_ip)
+  result.Raise("Failure checking secondary ip on node %s" % node.name,
                prereq=prereq, ecode=errors.ECODE_ENVIRON)
   if not result.payload:
     msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
@@ -101,7 +104,7 @@ class LUNodeAdd(LogicalUnit):
                                          family=self.primary_ip_family)
     self.op.node_name = self.hostname.name
 
-    if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
+    if self.op.readd and self.op.node_name == self.cfg.GetMasterNodeName():
       raise errors.OpPrereqError("Cannot readd the master node",
                                  errors.ECODE_STATE)
 
@@ -128,11 +131,14 @@ class LUNodeAdd(LogicalUnit):
     """Build hooks nodes.
 
     """
-    # Exclude added node
-    pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
-    post_nodes = pre_nodes + [self.op.node_name, ]
+    hook_nodes = self.cfg.GetNodeList()
+    new_node_info = self.cfg.GetNodeInfoByName(self.op.node_name)
+    if new_node_info is not None:
+      # Exclude added node
+      hook_nodes = list(set(hook_nodes) - set([new_node_info.uuid]))
 
-    return (pre_nodes, post_nodes)
+    # add the new node as post hook node by name; it does not have an UUID yet
+    return (hook_nodes, hook_nodes, [self.op.node_name, ])
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -147,7 +153,7 @@ class LUNodeAdd(LogicalUnit):
     """
     cfg = self.cfg
     hostname = self.hostname
-    node = hostname.name
+    node_name = hostname.name
     primary_ip = self.op.primary_ip = hostname.ip
     if self.op.secondary_ip is None:
       if self.primary_ip_family == netutils.IP6Address.family:
@@ -161,18 +167,18 @@ class LUNodeAdd(LogicalUnit):
       raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
                                  " address" % secondary_ip, errors.ECODE_INVAL)
 
-    node_list = cfg.GetNodeList()
-    if not self.op.readd and node in node_list:
+    existing_node_info = cfg.GetNodeInfoByName(node_name)
+    if not self.op.readd and existing_node_info is not None:
       raise errors.OpPrereqError("Node %s is already in the configuration" %
-                                 node, errors.ECODE_EXISTS)
-    elif self.op.readd and node not in node_list:
-      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
-                                 errors.ECODE_NOENT)
+                                 node_name, errors.ECODE_EXISTS)
+    elif self.op.readd and existing_node_info is None:
+      raise errors.OpPrereqError("Node %s is not in the configuration" %
+                                 node_name, errors.ECODE_NOENT)
 
     self.changed_primary_ip = False
 
-    for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
-      if self.op.readd and node == existing_node_name:
+    for existing_node in cfg.GetAllNodesInfo().values():
+      if self.op.readd and node_name == existing_node.name:
         if existing_node.secondary_ip != secondary_ip:
           raise errors.OpPrereqError("Readded node doesn't have the same IP"
                                      " address configuration as before",
@@ -193,22 +199,22 @@ class LUNodeAdd(LogicalUnit):
     # After this 'if' block, None is no longer a valid value for the
     # _capable op attributes
     if self.op.readd:
-      old_node = self.cfg.GetNodeInfo(node)
-      assert old_node is not None, "Can't retrieve locked node %s" % node
+      assert existing_node_info is not None, \
+        "Can't retrieve locked node %s" % node_name
       for attr in self._NFLAGS:
         if getattr(self.op, attr) is None:
-          setattr(self.op, attr, getattr(old_node, attr))
+          setattr(self.op, attr, getattr(existing_node_info, attr))
     else:
       for attr in self._NFLAGS:
         if getattr(self.op, attr) is None:
           setattr(self.op, attr, True)
 
     if self.op.readd and not self.op.vm_capable:
-      pri, sec = cfg.GetNodeInstances(node)
+      pri, sec = cfg.GetNodeInstances(existing_node_info.uuid)
       if pri or sec:
         raise errors.OpPrereqError("Node %s being re-added with vm_capable"
                                    " flag set to false, but it already holds"
-                                   " instances" % node,
+                                   " instances" % node_name,
                                    errors.ECODE_STATE)
 
     # check that the type of the node (single versus dual homed) is the
@@ -240,7 +246,7 @@ class LUNodeAdd(LogicalUnit):
                                    errors.ECODE_ENVIRON)
 
     if self.op.readd:
-      exceptions = [node]
+      exceptions = [existing_node_info.uuid]
     else:
       exceptions = []
 
@@ -250,10 +256,10 @@ class LUNodeAdd(LogicalUnit):
       self.master_candidate = False
 
     if self.op.readd:
-      self.new_node = old_node
+      self.new_node = existing_node_info
     else:
       node_group = cfg.LookupNodeGroup(self.op.group)
-      self.new_node = objects.Node(name=node,
+      self.new_node = objects.Node(name=node_name,
                                    primary_ip=primary_ip,
                                    secondary_ip=secondary_ip,
                                    master_candidate=self.master_candidate,
@@ -274,11 +280,11 @@ class LUNodeAdd(LogicalUnit):
     # TODO: If we need to have multiple DnsOnlyRunner we probably should make
     #       it a property on the base class.
     rpcrunner = rpc.DnsOnlyRunner()
-    result = rpcrunner.call_version([node])[node]
-    result.Raise("Can't get version information from node %s" % node)
+    result = rpcrunner.call_version([node_name])[node_name]
+    result.Raise("Can't get version information from node %s" % node_name)
     if constants.PROTOCOL_VERSION == result.payload:
       logging.info("Communication to node %s fine, sw version %s match",
-                   node, result.payload)
+                   node_name, result.payload)
     else:
       raise errors.OpPrereqError("Version mismatch master version %s,"
                                  " node version %s" %
@@ -291,7 +297,7 @@ class LUNodeAdd(LogicalUnit):
       excl_stor = IsExclusiveStorageEnabledNode(cfg, self.new_node)
       cname = self.cfg.GetClusterName()
       result = rpcrunner.call_node_verify_light(
-          [node], vparams, cname, cfg.GetClusterInfo().hvparams)[node]
+          [node_name], vparams, cname, cfg.GetClusterInfo().hvparams)[node_name]
       (errmsgs, _) = CheckNodePVs(result.payload, excl_stor)
       if errmsgs:
         raise errors.OpPrereqError("Checks on node PVs failed: %s" %
@@ -302,7 +308,7 @@ class LUNodeAdd(LogicalUnit):
 
     """
     new_node = self.new_node
-    node = new_node.name
+    node_name = new_node.name
 
     assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
       "Not owning BGL"
@@ -344,26 +350,25 @@ class LUNodeAdd(LogicalUnit):
     # Add node to our /etc/hosts, and add key to known_hosts
     if self.cfg.GetClusterInfo().modify_etc_hosts:
       master_node = self.cfg.GetMasterNode()
-      result = self.rpc.call_etc_hosts_modify(master_node,
-                                              constants.ETC_HOSTS_ADD,
-                                              self.hostname.name,
-                                              self.hostname.ip)
+      result = self.rpc.call_etc_hosts_modify(
+                 master_node, constants.ETC_HOSTS_ADD, self.hostname.name,
+                 self.hostname.ip)
       result.Raise("Can't update hosts file with new host data")
 
     if new_node.secondary_ip != new_node.primary_ip:
-      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
-                               False)
+      _CheckNodeHasSecondaryIP(self, new_node, new_node.secondary_ip, False)
 
-    node_verify_list = [self.cfg.GetMasterNode()]
+    node_verifier_uuids = [self.cfg.GetMasterNode()]
     node_verify_param = {
-      constants.NV_NODELIST: ([node], {}),
+      constants.NV_NODELIST: ([node_name], {}),
       # TODO: do a node-net-test as well?
     }
 
-    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
-                                       self.cfg.GetClusterName(),
-                                       self.cfg.GetClusterInfo().hvparams)
-    for verifier in node_verify_list:
+    result = self.rpc.call_node_verify(
+               node_verifier_uuids, node_verify_param,
+               self.cfg.GetClusterName(),
+               self.cfg.GetClusterInfo().hvparams)
+    for verifier in node_verifier_uuids:
       result[verifier].Raise("Cannot communicate with node %s" % verifier)
       nl_payload = result[verifier].payload[constants.NV_NODELIST]
       if nl_payload:
@@ -374,19 +379,18 @@ class LUNodeAdd(LogicalUnit):
         raise errors.OpExecError("ssh/hostname verification failed")
 
     if self.op.readd:
-      RedistributeAncillaryFiles(self)
       self.context.ReaddNode(new_node)
+      RedistributeAncillaryFiles(self)
       # make sure we redistribute the config
       self.cfg.Update(new_node, feedback_fn)
       # and make sure the new node will not have old files around
       if not new_node.master_candidate:
-        result = self.rpc.call_node_demote_from_mc(new_node.name)
+        result = self.rpc.call_node_demote_from_mc(new_node.uuid)
         result.Warn("Node failed to demote itself from master candidate status",
                     self.LogWarning)
     else:
-      RedistributeAncillaryFiles(self, additional_nodes=[node],
-                                 additional_vm=self.op.vm_capable)
       self.context.AddNode(new_node, self.proc.GetECId())
+      RedistributeAncillaryFiles(self)
 
 
 class LUNodeSetParams(LogicalUnit):
@@ -412,7 +416,8 @@ class LUNodeSetParams(LogicalUnit):
   _FLAGS = ["master_candidate", "drained", "offline"]
 
   def CheckArguments(self):
-    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
+    (self.op.node_uuid, self.op.node_name) = \
+      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
     all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
                 self.op.master_capable, self.op.vm_capable,
                 self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
@@ -445,7 +450,7 @@ class LUNodeSetParams(LogicalUnit):
 
     """
     return (instance.disk_template in constants.DTS_INT_MIRROR and
-            self.op.node_name in instance.all_nodes)
+            self.op.node_uuid in instance.all_nodes)
 
   def ExpandNames(self):
     if self.lock_all:
@@ -457,7 +462,7 @@ class LUNodeSetParams(LogicalUnit):
         }
     else:
       self.needed_locks = {
-        locking.LEVEL_NODE: self.op.node_name,
+        locking.LEVEL_NODE: self.op.node_uuid,
         }
 
     # Since modifying a node can have severe effects on currently running
@@ -495,7 +500,7 @@ class LUNodeSetParams(LogicalUnit):
     """Build hooks nodes.
 
     """
-    nl = [self.cfg.GetMasterNode(), self.op.node_name]
+    nl = [self.cfg.GetMasterNode(), self.op.node_uuid]
     return (nl, nl)
 
   def CheckPrereq(self):
@@ -504,8 +509,7 @@ class LUNodeSetParams(LogicalUnit):
     This only checks the instance list against the existing names.
 
     """
-    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
-
+    node = self.cfg.GetNodeInfo(self.op.node_uuid)
     if self.lock_instances:
       affected_instances = \
         self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
@@ -518,7 +522,7 @@ class LUNodeSetParams(LogicalUnit):
                                    " secondary IP address have changed since"
                                    " locks were acquired, wanted '%s', have"
                                    " '%s'; retry the operation" %
-                                   (self.op.node_name,
+                                   (node.name,
                                     utils.CommaJoin(wanted_instances),
                                     utils.CommaJoin(owned_instances)),
                                    errors.ECODE_STATE)
@@ -529,7 +533,7 @@ class LUNodeSetParams(LogicalUnit):
         self.op.drained is not None or
         self.op.offline is not None):
       # we can't change the master's node flags
-      if self.op.node_name == self.cfg.GetMasterNode():
+      if node.uuid == self.cfg.GetMasterNode():
         raise errors.OpPrereqError("The master role can be changed"
                                    " only via master-failover",
                                    errors.ECODE_INVAL)
@@ -540,7 +544,7 @@ class LUNodeSetParams(LogicalUnit):
                                  errors.ECODE_STATE)
 
     if self.op.vm_capable is False:
-      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
+      (ipri, isec) = self.cfg.GetNodeInstances(node.uuid)
       if ipri or isec:
         raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
                                    " the vm_capable flag" % node.name,
@@ -551,7 +555,7 @@ class LUNodeSetParams(LogicalUnit):
       # check if after removing the current node, we're missing master
       # candidates
       (mc_remaining, mc_should, _) = \
-          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
+          self.cfg.GetMasterCandidateStats(exceptions=[node.uuid])
       if mc_remaining < mc_should:
         raise errors.OpPrereqError("Not enough master candidates, please"
                                    " pass auto promote option to allow"
@@ -565,7 +569,7 @@ class LUNodeSetParams(LogicalUnit):
 
     # Check for ineffective changes
     for attr in self._FLAGS:
-      if (getattr(self.op, attr) is False and getattr(node, attr) is False):
+      if getattr(self.op, attr) is False and getattr(node, attr) is False:
         self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
         setattr(self.op, attr, None)
 
@@ -616,7 +620,7 @@ class LUNodeSetParams(LogicalUnit):
 
     if old_role == self._ROLE_OFFLINE and new_role != old_role:
       # Trying to transition out of offline status
-      result = self.rpc.call_version([node.name])[node.name]
+      result = self.rpc.call_version([node.uuid])[node.uuid]
       if result.fail_msg:
         raise errors.OpPrereqError("Node %s is being de-offlined but fails"
                                    " to report its version: %s" %
@@ -635,7 +639,7 @@ class LUNodeSetParams(LogicalUnit):
       master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
       master_singlehomed = master.secondary_ip == master.primary_ip
       if master_singlehomed and self.op.secondary_ip != node.primary_ip:
-        if self.op.force and node.name == master.name:
+        if self.op.force and node.uuid == master.uuid:
           self.LogWarning("Transitioning from single-homed to multi-homed"
                           " cluster; all nodes will require a secondary IP"
                           " address")
@@ -646,7 +650,7 @@ class LUNodeSetParams(LogicalUnit):
                                      " target node to be the master",
                                      errors.ECODE_INVAL)
       elif not master_singlehomed and self.op.secondary_ip == node.primary_ip:
-        if self.op.force and node.name == master.name:
+        if self.op.force and node.uuid == master.uuid:
           self.LogWarning("Transitioning from multi-homed to single-homed"
                           " cluster; secondary IP addresses will have to be"
                           " removed")
@@ -673,8 +677,8 @@ class LUNodeSetParams(LogicalUnit):
           CheckInstanceState(self, instance, INSTANCE_DOWN,
                              msg="cannot change secondary ip")
 
-        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
-        if master.name != node.name:
+        _CheckNodeHasSecondaryIP(self, node, self.op.secondary_ip, True)
+        if master.uuid != node.uuid:
           # check reachability from master secondary ip to new secondary ip
           if not netutils.TcpPing(self.op.secondary_ip,
                                   constants.DEFAULT_NODED_PORT,
@@ -684,7 +688,7 @@ class LUNodeSetParams(LogicalUnit):
                                        errors.ECODE_ENVIRON)
 
     if self.op.ndparams:
-      new_ndparams = GetUpdatedParams(self.node.ndparams, self.op.ndparams)
+      new_ndparams = GetUpdatedParams(node.ndparams, self.op.ndparams)
       utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
       CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
                            "node", "cluster or group")
@@ -692,18 +696,17 @@ class LUNodeSetParams(LogicalUnit):
 
     if self.op.hv_state:
       self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
-                                                self.node.hv_state_static)
+                                                node.hv_state_static)
 
     if self.op.disk_state:
       self.new_disk_state = \
-        MergeAndVerifyDiskState(self.op.disk_state,
-                                self.node.disk_state_static)
+        MergeAndVerifyDiskState(self.op.disk_state, node.disk_state_static)
 
   def Exec(self, feedback_fn):
     """Modifies a node.
 
     """
-    node = self.node
+    node = self.cfg.GetNodeInfo(self.op.node_uuid)
     old_role = self.old_role
     new_role = self.new_role
 
@@ -742,7 +745,7 @@ class LUNodeSetParams(LogicalUnit):
 
       # we locked all nodes, we adjust the CP before updating this node
       if self.lock_all:
-        AdjustCandidatePool(self, [node.name])
+        AdjustCandidatePool(self, [node.uuid])
 
     if self.op.secondary_ip:
       node.secondary_ip = self.op.secondary_ip
@@ -766,8 +769,10 @@ class LUNodePowercycle(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
-    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
+    (self.op.node_uuid, self.op.node_name) = \
+      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
+
+    if self.op.node_uuid == self.cfg.GetMasterNode() and not self.op.force:
       raise errors.OpPrereqError("The node is the master and the force"
                                  " parameter was not set",
                                  errors.ECODE_INVAL)
@@ -787,7 +792,7 @@ class LUNodePowercycle(NoHooksLU):
     """
     default_hypervisor = self.cfg.GetHypervisorType()
     hvparams = self.cfg.GetClusterInfo().hvparams[default_hypervisor]
-    result = self.rpc.call_node_powercycle(self.op.node_name,
+    result = self.rpc.call_node_powercycle(self.op.node_uuid,
                                            default_hypervisor,
                                            hvparams)
     result.Raise("Failed to schedule the reboot")
@@ -798,28 +803,28 @@ def _GetNodeInstancesInner(cfg, fn):
   return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
 
 
-def _GetNodePrimaryInstances(cfg, node_name):
+def _GetNodePrimaryInstances(cfg, node_uuid):
   """Returns primary instances on a node.
 
   """
   return _GetNodeInstancesInner(cfg,
-                                lambda inst: node_name == inst.primary_node)
+                                lambda inst: node_uuid == inst.primary_node)
 
 
-def _GetNodeSecondaryInstances(cfg, node_name):
+def _GetNodeSecondaryInstances(cfg, node_uuid):
   """Returns secondary instances on a node.
 
   """
   return _GetNodeInstancesInner(cfg,
-                                lambda inst: node_name in inst.secondary_nodes)
+                                lambda inst: node_uuid in inst.secondary_nodes)
 
 
-def _GetNodeInstances(cfg, node_name):
+def _GetNodeInstances(cfg, node_uuid):
   """Returns a list of all primary and secondary instances on a node.
 
   """
 
-  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
+  return _GetNodeInstancesInner(cfg, lambda inst: node_uuid in inst.all_nodes)
 
 
 class LUNodeEvacuate(NoHooksLU):
@@ -841,13 +846,16 @@ class LUNodeEvacuate(NoHooksLU):
     CheckIAllocatorOrNode(self, "iallocator", "remote_node")
 
   def ExpandNames(self):
-    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
+    (self.op.node_uuid, self.op.node_name) = \
+      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
 
     if self.op.remote_node is not None:
-      self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
+      (self.op.remote_node_uuid, self.op.remote_node) = \
+        ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
+                              self.op.remote_node)
       assert self.op.remote_node
 
-      if self.op.remote_node == self.op.node_name:
+      if self.op.node_uuid == self.op.remote_node_uuid:
         raise errors.OpPrereqError("Can not use evacuated node as a new"
                                    " secondary node", errors.ECODE_INVAL)
 
@@ -869,17 +877,17 @@ class LUNodeEvacuate(NoHooksLU):
     self.lock_nodes = self._DetermineNodes()
 
   def _DetermineNodes(self):
-    """Gets the list of nodes to operate on.
+    """Gets the list of node UUIDs to operate on.
 
     """
     if self.op.remote_node is None:
       # Iallocator will choose any node(s) in the same group
-      group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
+      group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_uuid])
     else:
-      group_nodes = frozenset([self.op.remote_node])
+      group_nodes = frozenset([self.op.remote_node_uuid])
 
     # Determine nodes to be locked
-    return set([self.op.node_name]) | group_nodes
+    return set([self.op.node_uuid]) | group_nodes
 
   def _DetermineInstances(self):
     """Builds list of instances to operate on.
@@ -908,7 +916,7 @@ class LUNodeEvacuate(NoHooksLU):
                                  " instances",
                                  errors.ECODE_INVAL)
 
-    return inst_fn(self.cfg, self.op.node_name)
+    return inst_fn(self.cfg, self.op.node_uuid)
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_INSTANCE:
@@ -974,7 +982,7 @@ class LUNodeEvacuate(NoHooksLU):
 
     if self.op.remote_node is not None:
       for i in self.instances:
-        if i.primary_node == self.op.remote_node:
+        if i.primary_node == self.op.remote_node_uuid:
           raise errors.OpPrereqError("Node %s is the primary node of"
                                      " instance %s, cannot use it as"
                                      " secondary" %
@@ -1033,11 +1041,12 @@ class LUNodeMigrate(LogicalUnit):
     pass
 
   def ExpandNames(self):
-    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
+    (self.op.node_uuid, self.op.node_name) = \
+      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
 
     self.share_locks = ShareAll()
     self.needed_locks = {
-      locking.LEVEL_NODE: [self.op.node_name],
+      locking.LEVEL_NODE: [self.op.node_uuid],
       }
 
   def BuildHooksEnv(self):
@@ -1072,7 +1081,7 @@ class LUNodeMigrate(LogicalUnit):
                                  target_node=self.op.target_node,
                                  allow_runtime_changes=allow_runtime_changes,
                                  ignore_ipolicy=self.op.ignore_ipolicy)]
-      for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)]
+      for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_uuid)]
 
     # TODO: Run iallocator in this opcode and pass correct placement options to
     # OpInstanceMigrate. Since other jobs can modify the cluster between
@@ -1104,7 +1113,8 @@ class LUNodeModifyStorage(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
+    (self.op.node_uuid, self.op.node_name) = \
+      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
 
     storage_type = self.op.storage_type
 
@@ -1124,7 +1134,7 @@ class LUNodeModifyStorage(NoHooksLU):
 
   def ExpandNames(self):
     self.needed_locks = {
-      locking.LEVEL_NODE: self.op.node_name,
+      locking.LEVEL_NODE: self.op.node_uuid,
       }
 
   def Exec(self, feedback_fn):
@@ -1132,7 +1142,7 @@ class LUNodeModifyStorage(NoHooksLU):
 
     """
     st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
-    result = self.rpc.call_storage_modify(self.op.node_name,
+    result = self.rpc.call_storage_modify(self.op.node_uuid,
                                           self.op.storage_type, st_args,
                                           self.op.name, self.op.changes)
     result.Raise("Failed to modify storage unit '%s' on %s" %
@@ -1147,7 +1157,7 @@ class NodeQuery(QueryBase):
     lu.share_locks = ShareAll()
 
     if self.names:
-      self.wanted = GetWantedNodes(lu, self.names)
+      (self.wanted, _) = GetWantedNodes(lu, self.names)
     else:
       self.wanted = locking.ALL_SET
 
@@ -1168,31 +1178,32 @@ class NodeQuery(QueryBase):
     """
     all_info = lu.cfg.GetAllNodesInfo()
 
-    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
+    node_uuids = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
 
     # Gather data as requested
     if query.NQ_LIVE in self.requested_data:
       # filter out non-vm_capable nodes
-      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
+      toquery_node_uuids = [node.uuid for node in all_info.values()
+                            if node.vm_capable and node.uuid in node_uuids]
 
-      es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, toquery_nodes)
+      es_flags = rpc.GetExclusiveStorageForNodes(lu.cfg, toquery_node_uuids)
       # FIXME: This currently maps everything to lvm, this should be more
       # flexible
       vg_req = rpc.BuildVgInfoQuery(lu.cfg)
       default_hypervisor = lu.cfg.GetHypervisorType()
       hvparams = lu.cfg.GetClusterInfo().hvparams[default_hypervisor]
       hvspecs = [(default_hypervisor, hvparams)]
-      node_data = lu.rpc.call_node_info(toquery_nodes, vg_req,
+      node_data = lu.rpc.call_node_info(toquery_node_uuids, vg_req,
                                         hvspecs, es_flags)
-      live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload))
-                       for (name, nresult) in node_data.items()
+      live_data = dict((uuid, rpc.MakeLegacyNodeInfo(nresult.payload))
+                       for (uuid, nresult) in node_data.items()
                        if not nresult.fail_msg and nresult.payload)
     else:
       live_data = None
 
     if query.NQ_INST in self.requested_data:
-      node_to_primary = dict([(name, set()) for name in nodenames])
-      node_to_secondary = dict([(name, set()) for name in nodenames])
+      node_to_primary = dict([(uuid, set()) for uuid in node_uuids])
+      node_to_secondary = dict([(uuid, set()) for uuid in node_uuids])
 
       inst_data = lu.cfg.GetAllInstancesInfo()
 
@@ -1207,8 +1218,8 @@ class NodeQuery(QueryBase):
       node_to_secondary = None
 
     if query.NQ_OOB in self.requested_data:
-      oob_support = dict((name, bool(SupportsOob(lu.cfg, node)))
-                         for name, node in all_info.iteritems())
+      oob_support = dict((uuid, bool(SupportsOob(lu.cfg, node)))
+                         for uuid, node in all_info.iteritems())
     else:
       oob_support = None
 
@@ -1217,7 +1228,7 @@ class NodeQuery(QueryBase):
     else:
       groups = {}
 
-    return query.NodeQueryData([all_info[name] for name in nodenames],
+    return query.NodeQueryData([all_info[uuid] for uuid in node_uuids],
                                live_data, lu.cfg.GetMasterNode(),
                                node_to_primary, node_to_secondary, groups,
                                oob_support, lu.cfg.GetClusterInfo())
@@ -1281,7 +1292,7 @@ class LUNodeQueryvols(NoHooksLU):
 
     if self.op.nodes:
       self.needed_locks = {
-        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes),
+        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes)[0],
         }
     else:
       self.needed_locks = {
@@ -1293,20 +1304,21 @@ class LUNodeQueryvols(NoHooksLU):
     """Computes the list of nodes and their attributes.
 
     """
-    nodenames = self.owned_locks(locking.LEVEL_NODE)
-    volumes = self.rpc.call_node_volumes(nodenames)
+    node_uuids = self.owned_locks(locking.LEVEL_NODE)
+    volumes = self.rpc.call_node_volumes(node_uuids)
 
     ilist = self.cfg.GetAllInstancesInfo()
     vol2inst = MapInstanceDisksToNodes(ilist.values())
 
     output = []
-    for node in nodenames:
-      nresult = volumes[node]
+    for node_uuid in node_uuids:
+      nresult = volumes[node_uuid]
       if nresult.offline:
         continue
       msg = nresult.fail_msg
       if msg:
-        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
+        self.LogWarning("Can't compute volume data on node %s: %s",
+                        self.cfg.GetNodeName(node_uuid), msg)
         continue
 
       node_vols = sorted(nresult.payload,
@@ -1316,7 +1328,7 @@ class LUNodeQueryvols(NoHooksLU):
         node_output = []
         for field in self.op.output_fields:
           if field == "node":
-            val = node
+            val = self.cfg.GetNodeName(node_uuid)
           elif field == "phys":
             val = vol["dev"]
           elif field == "vg":
@@ -1326,7 +1338,7 @@ class LUNodeQueryvols(NoHooksLU):
           elif field == "size":
             val = int(float(vol["size"]))
           elif field == "instance":
-            val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
+            val = vol2inst.get((node_uuid, vol["vg"] + "/" + vol["name"]), "-")
           else:
             raise errors.ParameterError(field)
           node_output.append(str(val))
@@ -1353,7 +1365,7 @@ class LUNodeQueryStorage(NoHooksLU):
 
     if self.op.nodes:
       self.needed_locks = {
-        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes),
+        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes)[0],
         }
     else:
       self.needed_locks = {
@@ -1365,7 +1377,7 @@ class LUNodeQueryStorage(NoHooksLU):
     """Computes the list of nodes and their attributes.
 
     """
-    self.nodes = self.owned_locks(locking.LEVEL_NODE)
+    self.node_uuids = self.owned_locks(locking.LEVEL_NODE)
 
     # Always get name to sort by
     if constants.SF_NAME in self.op.output_fields:
@@ -1382,20 +1394,22 @@ class LUNodeQueryStorage(NoHooksLU):
     name_idx = field_idx[constants.SF_NAME]
 
     st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
-    data = self.rpc.call_storage_list(self.nodes,
+    data = self.rpc.call_storage_list(self.node_uuids,
                                       self.op.storage_type, st_args,
                                       self.op.name, fields)
 
     result = []
 
-    for node in utils.NiceSort(self.nodes):
-      nresult = data[node]
+    for node_uuid in utils.NiceSort(self.node_uuids):
+      node_name = self.cfg.GetNodeName(node_uuid)
+      nresult = data[node_uuid]
       if nresult.offline:
         continue
 
       msg = nresult.fail_msg
       if msg:
-        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
+        self.LogWarning("Can't get storage data from node %s: %s",
+                        node_name, msg)
         continue
 
       rows = dict([(row[name_idx], row) for row in nresult.payload])
@@ -1407,7 +1421,7 @@ class LUNodeQueryStorage(NoHooksLU):
 
         for field in self.op.output_fields:
           if field == constants.SF_NODE:
-            val = node
+            val = node_name
           elif field == constants.SF_TYPE:
             val = self.op.storage_type
           elif field in field_idx:
@@ -1447,7 +1461,7 @@ class LUNodeRemove(LogicalUnit):
     """
     all_nodes = self.cfg.GetNodeList()
     try:
-      all_nodes.remove(self.op.node_name)
+      all_nodes.remove(self.op.node_uuid)
     except ValueError:
       pass
     return (all_nodes, all_nodes)
@@ -1463,17 +1477,18 @@ class LUNodeRemove(LogicalUnit):
     Any errors are signaled by raising errors.OpPrereqError.
 
     """
-    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
-    node = self.cfg.GetNodeInfo(self.op.node_name)
+    (self.op.node_uuid, self.op.node_name) = \
+      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
+    node = self.cfg.GetNodeInfo(self.op.node_uuid)
     assert node is not None
 
     masternode = self.cfg.GetMasterNode()
-    if node.name == masternode:
+    if node.uuid == masternode:
       raise errors.OpPrereqError("Node is the master node, failover to another"
                                  " node is required", errors.ECODE_INVAL)
 
     for instance_name, instance in self.cfg.GetAllInstancesInfo().items():
-      if node.name in instance.all_nodes:
+      if node.uuid in instance.all_nodes:
         raise errors.OpPrereqError("Instance %s is still running on the node,"
                                    " please remove first" % instance_name,
                                    errors.ECODE_INVAL)
@@ -1494,12 +1509,14 @@ class LUNodeRemove(LogicalUnit):
       "Not owning BGL"
 
     # Promote nodes to master candidate as needed
-    AdjustCandidatePool(self, exceptions=[node.name])
-    self.context.RemoveNode(node.name)
+    AdjustCandidatePool(self, exceptions=[node.uuid])
+    self.context.RemoveNode(node)
 
     # Run post hooks on the node before it's removed
     RunPostHook(self, node.name)
 
+    # we have to call this by name rather than by UUID, as the node is no longer
+    # in the config
     result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
     msg = result.fail_msg
     if msg:
@@ -1508,8 +1525,8 @@ class LUNodeRemove(LogicalUnit):
 
     # Remove node from our /etc/hosts
     if self.cfg.GetClusterInfo().modify_etc_hosts:
-      master_node = self.cfg.GetMasterNode()
-      result = self.rpc.call_etc_hosts_modify(master_node,
+      master_node_uuid = self.cfg.GetMasterNode()
+      result = self.rpc.call_etc_hosts_modify(master_node_uuid,
                                               constants.ETC_HOSTS_REMOVE,
                                               node.name, None)
       result.Raise("Can't update hosts file with new host data")
@@ -1523,7 +1540,8 @@ class LURepairNodeStorage(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
+    (self.op.node_uuid, self.op.node_name) = \
+      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
 
     storage_type = self.op.storage_type
 
@@ -1535,16 +1553,18 @@ class LURepairNodeStorage(NoHooksLU):
 
   def ExpandNames(self):
     self.needed_locks = {
-      locking.LEVEL_NODE: [self.op.node_name],
+      locking.LEVEL_NODE: [self.op.node_uuid],
       }
 
-  def _CheckFaultyDisks(self, instance, node_name):
+  def _CheckFaultyDisks(self, instance, node_uuid):
     """Ensure faulty disks abort the opcode or at least warn."""
     try:
       if FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
-                                 node_name, True):
+                                 node_uuid, True):
         raise errors.OpPrereqError("Instance '%s' has faulty disks on"
-                                   " node '%s'" % (instance.name, node_name),
+                                   " node '%s'" %
+                                   (instance.name,
+                                    self.cfg.GetNodeName(node_uuid)),
                                    errors.ECODE_STATE)
     except errors.OpPrereqError, err:
       if self.op.ignore_consistency:
@@ -1557,20 +1577,20 @@ class LURepairNodeStorage(NoHooksLU):
 
     """
     # Check whether any instance on this node has faulty disks
-    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
+    for inst in _GetNodeInstances(self.cfg, self.op.node_uuid):
       if not inst.disks_active:
         continue
       check_nodes = set(inst.all_nodes)
-      check_nodes.discard(self.op.node_name)
-      for inst_node_name in check_nodes:
-        self._CheckFaultyDisks(inst, inst_node_name)
+      check_nodes.discard(self.op.node_uuid)
+      for inst_node_uuid in check_nodes:
+        self._CheckFaultyDisks(inst, inst_node_uuid)
 
   def Exec(self, feedback_fn):
     feedback_fn("Repairing storage unit '%s' on %s ..." %
                 (self.op.name, self.op.node_name))
 
     st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
-    result = self.rpc.call_storage_execute(self.op.node_name,
+    result = self.rpc.call_storage_execute(self.op.node_uuid,
                                            self.op.storage_type, st_args,
                                            self.op.name,
                                            constants.SO_FIX_CONSISTENCY)
index ea4bca5..31501c6 100644 (file)
@@ -58,12 +58,12 @@ class OsQuery(QueryBase):
 
     @rtype: dict
     @return: a dictionary with osnames as keys and as value another
-        map, with nodes as keys and tuples of (path, status, diagnose,
+        map, with node UUIDs as keys and tuples of (path, status, diagnose,
         variants, parameters, api_versions) as values, eg::
 
-          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
-                                     (/srv/..., False, "invalid api")],
-                           "node2": [(/srv/..., True, "", [], [])]}
+          {"debian-etch": {"node1-uuid": [(/usr/lib/..., True, "", [], []),
+                                          (/srv/..., False, "invalid api")],
+                           "node2-uuid": [(/srv/..., True, "", [], [])]}
           }
 
     """
@@ -71,9 +71,9 @@ class OsQuery(QueryBase):
     # we build here the list of nodes that didn't fail the RPC (at RPC
     # level), so that nodes with a non-responding node daemon don't
     # make all OSes invalid
-    good_nodes = [node_name for node_name in rlist
-                  if not rlist[node_name].fail_msg]
-    for node_name, nr in rlist.items():
+    good_node_uuids = [node_uuid for node_uuid in rlist
+                       if not rlist[node_uuid].fail_msg]
+    for node_uuid, nr in rlist.items():
       if nr.fail_msg or not nr.payload:
         continue
       for (name, path, status, diagnose, variants,
@@ -82,11 +82,11 @@ class OsQuery(QueryBase):
           # build a list of nodes for this os containing empty lists
           # for each node in node_list
           all_os[name] = {}
-          for nname in good_nodes:
-            all_os[name][nname] = []
+          for nuuid in good_node_uuids:
+            all_os[name][nuuid] = []
         # convert params from [name, help] to (name, help)
         params = [tuple(v) for v in params]
-        all_os[name][node_name].append((path, status, diagnose,
+        all_os[name][node_uuid].append((path, status, diagnose,
                                         variants, params, api_versions))
     return all_os
 
@@ -100,10 +100,10 @@ class OsQuery(QueryBase):
                            if level != locking.LEVEL_CLUSTER) or
                 self.do_locking or self.use_locking)
 
-    valid_nodes = [node.name
-                   for node in lu.cfg.GetAllNodesInfo().values()
-                   if not node.offline and node.vm_capable]
-    pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
+    valid_node_uuids = [node.uuid
+                        for node in lu.cfg.GetAllNodesInfo().values()
+                        if not node.offline and node.vm_capable]
+    pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_node_uuids))
     cluster = lu.cfg.GetClusterInfo()
 
     data = {}
index 726e3a1..d095fc6 100644 (file)
@@ -29,7 +29,8 @@ from ganeti import locking
 from ganeti import objects
 from ganeti import utils
 from ganeti.cmdlib.base import NoHooksLU
-from ganeti.cmdlib.common import ExpandNodeName, ExpandInstanceName, ShareAll
+from ganeti.cmdlib.common import ExpandNodeUuidAndName, ExpandInstanceName, \
+  ShareAll
 
 
 class TagsLU(NoHooksLU): # pylint: disable=W0223
@@ -43,9 +44,10 @@ class TagsLU(NoHooksLU): # pylint: disable=W0223
     self.needed_locks = {}
 
     if self.op.kind == constants.TAG_NODE:
-      self.op.name = ExpandNodeName(self.cfg, self.op.name)
+      (self.node_uuid, _) = \
+        ExpandNodeUuidAndName(self.cfg, None, self.op.name)
       lock_level = locking.LEVEL_NODE
-      lock_name = self.op.name
+      lock_name = self.node_uuid
     elif self.op.kind == constants.TAG_INSTANCE:
       self.op.name = ExpandInstanceName(self.cfg, self.op.name)
       lock_level = locking.LEVEL_INSTANCE
@@ -75,7 +77,7 @@ class TagsLU(NoHooksLU): # pylint: disable=W0223
     if self.op.kind == constants.TAG_CLUSTER:
       self.target = self.cfg.GetClusterInfo()
     elif self.op.kind == constants.TAG_NODE:
-      self.target = self.cfg.GetNodeInfo(self.op.name)
+      self.target = self.cfg.GetNodeInfo(self.node_uuid)
     elif self.op.kind == constants.TAG_INSTANCE:
       self.target = self.cfg.GetInstanceInfo(self.op.name)
     elif self.op.kind == constants.TAG_NODEGROUP:
index 52787c3..cef5447 100644 (file)
@@ -57,8 +57,9 @@ class LUTestDelay(NoHooksLU):
       # _GetWantedNodes can be used here, but is not always appropriate to use
       # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
       # more information.
-      self.op.on_nodes = GetWantedNodes(self, self.op.on_nodes)
-      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
+      (self.op.on_node_uuids, self.op.on_nodes) = \
+        GetWantedNodes(self, self.op.on_nodes)
+      self.needed_locks[locking.LEVEL_NODE] = self.op.on_node_uuids
 
   def _TestDelay(self):
     """Do the actual sleep.
@@ -67,10 +68,11 @@ class LUTestDelay(NoHooksLU):
     if self.op.on_master:
       if not utils.TestDelay(self.op.duration):
         raise errors.OpExecError("Error during master delay test")
-    if self.op.on_nodes:
-      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
-      for node, node_result in result.items():
-        node_result.Raise("Failure during rpc call to node %s" % node)
+    if self.op.on_node_uuids:
+      result = self.rpc.call_test_delay(self.op.on_node_uuids, self.op.duration)
+      for node_uuid, node_result in result.items():
+        node_result.Raise("Failure during rpc call to node %s" %
+                          self.cfg.GetNodeName(node_uuid))
 
   def Exec(self, feedback_fn):
     """Execute the test delay opcode, with the wanted repetitions.
@@ -263,7 +265,7 @@ class LUTestAllocator(NoHooksLU):
     elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
       fname = ExpandInstanceName(self.cfg, self.op.name)
       self.op.name = fname
-      self.relocate_from = \
+      self.relocate_from_node_uuids = \
           list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
     elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
                           constants.IALLOCATOR_MODE_NODE_EVAC):
@@ -299,8 +301,9 @@ class LUTestAllocator(NoHooksLU):
                                           hypervisor=self.op.hypervisor,
                                           node_whitelist=None)
     elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
-      req = iallocator.IAReqRelocate(name=self.op.name,
-                                     relocate_from=list(self.relocate_from))
+      req = iallocator.IAReqRelocate(
+            name=self.op.name,
+            relocate_from_node_uuids=list(self.relocate_from_node_uuids))
     elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
       req = iallocator.IAReqGroupChange(instances=self.op.instances,
                                         target_groups=self.op.target_groups)
index de06e6e..37fc926 100644 (file)
@@ -760,10 +760,10 @@ class ConfigWriter:
                     (mc_now, mc_max))
 
     # node checks
-    for node_name, node in data.nodes.items():
-      if node.name != node_name:
-        result.append("Node '%s' is indexed by wrong name '%s'" %
-                      (node.name, node_name))
+    for node_uuid, node in data.nodes.items():
+      if node.uuid != node_uuid:
+        result.append("Node '%s' is indexed by wrong UUID '%s'" %
+                      (node.name, node_uuid))
       if [node.master_candidate, node.drained, node.offline].count(True) > 1:
         result.append("Node %s state is invalid: master_candidate=%s,"
                       " drain=%s, offline=%s" %
@@ -862,7 +862,7 @@ class ConfigWriter:
     """
     return self._UnlockedVerifyConfig()
 
-  def _UnlockedSetDiskID(self, disk, node_name):
+  def _UnlockedSetDiskID(self, disk, node_uuid):
     """Convert the unique ID to the ID needed on the target nodes.
 
     This is used only for drbd, which needs ip/port configuration.
@@ -876,15 +876,15 @@ class ConfigWriter:
     """
     if disk.children:
       for child in disk.children:
-        self._UnlockedSetDiskID(child, node_name)
+        self._UnlockedSetDiskID(child, node_uuid)
 
     if disk.logical_id is None and disk.physical_id is not None:
       return
     if disk.dev_type == constants.LD_DRBD8:
       pnode, snode, port, pminor, sminor, secret = disk.logical_id
-      if node_name not in (pnode, snode):
+      if node_uuid not in (pnode, snode):
         raise errors.ConfigurationError("DRBD device not knowing node %s" %
-                                        node_name)
+                                        node_uuid)
       pnode_info = self._UnlockedGetNodeInfo(pnode)
       snode_info = self._UnlockedGetNodeInfo(snode)
       if pnode_info is None or snode_info is None:
@@ -892,7 +892,7 @@ class ConfigWriter:
                                         " for %s" % str(disk))
       p_data = (pnode_info.secondary_ip, port)
       s_data = (snode_info.secondary_ip, port)
-      if pnode == node_name:
+      if pnode == node_uuid:
         disk.physical_id = p_data + s_data + (pminor, secret)
       else: # it must be secondary, we tested above
         disk.physical_id = s_data + p_data + (sminor, secret)
@@ -901,7 +901,7 @@ class ConfigWriter:
     return
 
   @locking.ssynchronized(_config_lock)
-  def SetDiskID(self, disk, node_name):
+  def SetDiskID(self, disk, node_uuid):
     """Convert the unique ID to the ID needed on the target nodes.
 
     This is used only for drbd, which needs ip/port configuration.
@@ -911,7 +911,7 @@ class ConfigWriter:
     node.
 
     """
-    return self._UnlockedSetDiskID(disk, node_name)
+    return self._UnlockedSetDiskID(disk, node_uuid)
 
   @locking.ssynchronized(_config_lock)
   def AddTcpUdpPort(self, port):
@@ -961,7 +961,7 @@ class ConfigWriter:
     """Compute the used DRBD minor/nodes.
 
     @rtype: (dict, list)
-    @return: dictionary of node_name: dict of minor: instance_name;
+    @return: dictionary of node_uuid: dict of minor: instance_name;
         the returned dict will have all the nodes in it (even if with
         an empty list), and a list of duplicates; if the duplicates
         list is not empty, the configuration is corrupted and its caller
@@ -1002,7 +1002,7 @@ class ConfigWriter:
 
     This is just a wrapper over L{_UnlockedComputeDRBDMap}.
 
-    @return: dictionary of node_name: dict of minor: instance_name;
+    @return: dictionary of node_uuid: dict of minor: instance_name;
         the returned dict will have all the nodes in it (even if with
         an empty list).
 
@@ -1014,7 +1014,7 @@ class ConfigWriter:
     return d_map
 
   @locking.ssynchronized(_config_lock)
-  def AllocateDRBDMinor(self, nodes, instance):
+  def AllocateDRBDMinor(self, node_uuids, instance):
     """Allocate a drbd minor.
 
     The free minor will be automatically computed from the existing
@@ -1034,13 +1034,13 @@ class ConfigWriter:
       raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
                                       str(duplicates))
     result = []
-    for nname in nodes:
-      ndata = d_map[nname]
+    for nuuid in node_uuids:
+      ndata = d_map[nuuid]
       if not ndata:
         # no minors used, we can start at 0
         result.append(0)
         ndata[0] = instance
-        self._temporary_drbds[(nname, 0)] = instance
+        self._temporary_drbds[(nuuid, 0)] = instance
         continue
       keys = ndata.keys()
       keys.sort()
@@ -1052,21 +1052,21 @@ class ConfigWriter:
       else:
         minor = ffree
       # double-check minor against current instances
-      assert minor not in d_map[nname], \
+      assert minor not in d_map[nuuid], \
              ("Attempt to reuse allocated DRBD minor %d on node %s,"
               " already allocated to instance %s" %
-              (minor, nname, d_map[nname][minor]))
+              (minor, nuuid, d_map[nuuid][minor]))
       ndata[minor] = instance
       # double-check minor against reservation
-      r_key = (nname, minor)
+      r_key = (nuuid, minor)
       assert r_key not in self._temporary_drbds, \
              ("Attempt to reuse reserved DRBD minor %d on node %s,"
               " reserved for instance %s" %
-              (minor, nname, self._temporary_drbds[r_key]))
+              (minor, nuuid, self._temporary_drbds[r_key]))
       self._temporary_drbds[r_key] = instance
       result.append(minor)
     logging.debug("Request to allocate drbd minors, input: %s, returning %s",
-                  nodes, result)
+                  node_uuids, result)
     return result
 
   def _UnlockedReleaseDRBDMinors(self, instance):
@@ -1120,14 +1120,23 @@ class ConfigWriter:
 
   @locking.ssynchronized(_config_lock, shared=1)
   def GetMasterNode(self):
-    """Get the hostname of the master node for this cluster.
+    """Get the UUID of the master node for this cluster.
 
-    @return: Master hostname
+    @return: Master node UUID
 
     """
     return self._config_data.cluster.master_node
 
   @locking.ssynchronized(_config_lock, shared=1)
+  def GetMasterNodeName(self):
+    """Get the hostname of the master node for this cluster.
+
+    @return: Master node hostname
+
+    """
+    return self._UnlockedGetNodeName(self._config_data.cluster.master_node)
+
+  @locking.ssynchronized(_config_lock, shared=1)
   def GetMasterIP(self):
     """Get the IP of the master node for this cluster.
 
@@ -1214,7 +1223,7 @@ class ConfigWriter:
     """
     cluster = self._config_data.cluster
     result = objects.MasterNetworkParameters(
-      name=cluster.master_node, ip=cluster.master_ip,
+      uuid=cluster.master_node, ip=cluster.master_ip,
       netmask=cluster.master_netmask, netdev=cluster.master_netdev,
       ip_family=cluster.primary_ip_family)
 
@@ -1372,11 +1381,11 @@ class ConfigWriter:
     """Get nodes which are member in the same nodegroups as the given nodes.
 
     """
-    ngfn = lambda node_name: self._UnlockedGetNodeInfo(node_name).group
-    return frozenset(member_name
-                     for node_name in nodes
-                     for member_name in
-                       self._UnlockedGetNodeGroup(ngfn(node_name)).members)
+    ngfn = lambda node_uuid: self._UnlockedGetNodeInfo(node_uuid).group
+    return frozenset(member_uuid
+                     for node_uuid in nodes
+                     for member_uuid in
+                       self._UnlockedGetNodeGroup(ngfn(node_uuid)).members)
 
   @locking.ssynchronized(_config_lock, shared=1)
   def GetMultiNodeGroupInfo(self, group_uuids):
@@ -1631,8 +1640,8 @@ class ConfigWriter:
     else:
       nodes = instance.all_nodes
 
-    return frozenset(self._UnlockedGetNodeInfo(node_name).group
-                     for node_name in nodes)
+    return frozenset(self._UnlockedGetNodeInfo(node_uuid).group
+                     for node_uuid in nodes)
 
   @locking.ssynchronized(_config_lock, shared=1)
   def GetInstanceNetworks(self, instance_name):
@@ -1708,69 +1717,78 @@ class ConfigWriter:
 
     node.serial_no = 1
     node.ctime = node.mtime = time.time()
-    self._UnlockedAddNodeToGroup(node.name, node.group)
-    self._config_data.nodes[node.name] = node
+    self._UnlockedAddNodeToGroup(node.uuid, node.group)
+    self._config_data.nodes[node.uuid] = node
     self._config_data.cluster.serial_no += 1
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock)
-  def RemoveNode(self, node_name):
+  def RemoveNode(self, node_uuid):
     """Remove a node from the configuration.
 
     """
-    logging.info("Removing node %s from configuration", node_name)
+    logging.info("Removing node %s from configuration", node_uuid)
 
-    if node_name not in self._config_data.nodes:
-      raise errors.ConfigurationError("Unknown node '%s'" % node_name)
+    if node_uuid not in self._config_data.nodes:
+      raise errors.ConfigurationError("Unknown node '%s'" % node_uuid)
 
-    self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_name])
-    del self._config_data.nodes[node_name]
+    self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_uuid])
+    del self._config_data.nodes[node_uuid]
     self._config_data.cluster.serial_no += 1
     self._WriteConfig()
 
   def ExpandNodeName(self, short_name):
-    """Attempt to expand an incomplete node name.
+    """Attempt to expand an incomplete node name into a node UUID.
 
     """
-    # Locking is done in L{ConfigWriter.GetNodeList}
-    return _MatchNameComponentIgnoreCase(short_name, self.GetNodeList())
+    # Locking is done in L{ConfigWriter.GetAllNodesInfo}
+    all_nodes = self.GetAllNodesInfo().values()
+    expanded_name = _MatchNameComponentIgnoreCase(
+                      short_name, [node.name for node in all_nodes])
 
-  def _UnlockedGetNodeInfo(self, node_name):
+    if expanded_name is not None:
+      # there has to be exactly one node whith that name
+      node = (filter(lambda n: n.name == expanded_name, all_nodes)[0])
+      return (node.uuid, node.name)
+    else:
+      return None
+
+  def _UnlockedGetNodeInfo(self, node_uuid):
     """Get the configuration of a node, as stored in the config.
 
     This function is for internal use, when the config lock is already
     held.
 
-    @param node_name: the node name, e.g. I{node1.example.com}
+    @param node_uuid: the node UUID
 
     @rtype: L{objects.Node}
     @return: the node object
 
     """
-    if node_name not in self._config_data.nodes:
+    if node_uuid not in self._config_data.nodes:
       return None
 
-    return self._config_data.nodes[node_name]
+    return self._config_data.nodes[node_uuid]
 
   @locking.ssynchronized(_config_lock, shared=1)
-  def GetNodeInfo(self, node_name):
+  def GetNodeInfo(self, node_uuid):
     """Get the configuration of a node, as stored in the config.
 
     This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
 
-    @param node_name: the node name, e.g. I{node1.example.com}
+    @param node_uuid: the node UUID
 
     @rtype: L{objects.Node}
     @return: the node object
 
     """
-    return self._UnlockedGetNodeInfo(node_name)
+    return self._UnlockedGetNodeInfo(node_uuid)
 
   @locking.ssynchronized(_config_lock, shared=1)
-  def GetNodeInstances(self, node_name):
+  def GetNodeInstances(self, node_uuid):
     """Get the instances of a node, as stored in the config.
 
-    @param node_name: the node name, e.g. I{node1.example.com}
+    @param node_uuid: the node UUID
 
     @rtype: (list, list)
     @return: a tuple with two lists: the primary and the secondary instances
@@ -1779,9 +1797,9 @@ class ConfigWriter:
     pri = []
     sec = []
     for inst in self._config_data.instances.values():
-      if inst.primary_node == node_name:
+      if inst.primary_node == node_uuid:
         pri.append(inst.name)
-      if node_name in inst.secondary_nodes:
+      if node_uuid in inst.secondary_nodes:
         sec.append(inst.name)
     return (pri, sec)
 
@@ -1802,8 +1820,8 @@ class ConfigWriter:
 
     return frozenset(inst.name
                      for inst in self._config_data.instances.values()
-                     for node_name in nodes_fn(inst)
-                     if self._UnlockedGetNodeInfo(node_name).group == uuid)
+                     for node_uuid in nodes_fn(inst)
+                     if self._UnlockedGetNodeInfo(node_uuid).group == uuid)
 
   def _UnlockedGetHvparamsString(self, hvname):
     """Return the string representation of the list of hyervisor parameters of
@@ -1855,7 +1873,7 @@ class ConfigWriter:
     """
     all_nodes = [self._UnlockedGetNodeInfo(node)
                  for node in self._UnlockedGetNodeList()]
-    return [node.name for node in all_nodes if not node.offline]
+    return [node.uuid for node in all_nodes if not node.offline]
 
   @locking.ssynchronized(_config_lock, shared=1)
   def GetOnlineNodeList(self):
@@ -1871,7 +1889,7 @@ class ConfigWriter:
     """
     all_nodes = [self._UnlockedGetNodeInfo(node)
                  for node in self._UnlockedGetNodeList()]
-    return [node.name for node in all_nodes if node.vm_capable]
+    return [node.uuid for node in all_nodes if node.vm_capable]
 
   @locking.ssynchronized(_config_lock, shared=1)
   def GetNonVmCapableNodeList(self):
@@ -1880,20 +1898,29 @@ class ConfigWriter:
     """
     all_nodes = [self._UnlockedGetNodeInfo(node)
                  for node in self._UnlockedGetNodeList()]
-    return [node.name for node in all_nodes if not node.vm_capable]
+    return [node.uuid for node in all_nodes if not node.vm_capable]
 
   @locking.ssynchronized(_config_lock, shared=1)
-  def GetMultiNodeInfo(self, nodes):
+  def GetMultiNodeInfo(self, node_uuids):
     """Get the configuration of multiple nodes.
 
-    @param nodes: list of node names
+    @param node_uuids: list of node UUIDs
     @rtype: list
     @return: list of tuples of (node, node_info), where node_info is
         what would GetNodeInfo return for the node, in the original
         order
 
     """
-    return [(name, self._UnlockedGetNodeInfo(name)) for name in nodes]
+    return [(uuid, self._UnlockedGetNodeInfo(uuid)) for uuid in node_uuids]
+
+  def _UnlockedGetAllNodesInfo(self):
+    """Gets configuration of all nodes.
+
+    @note: See L{GetAllNodesInfo}
+
+    """
+    return dict([(node_uuid, self._UnlockedGetNodeInfo(node_uuid))
+                 for node_uuid in self._UnlockedGetNodeList()])
 
   @locking.ssynchronized(_config_lock, shared=1)
   def GetAllNodesInfo(self):
@@ -1906,25 +1933,73 @@ class ConfigWriter:
     """
     return self._UnlockedGetAllNodesInfo()
 
-  def _UnlockedGetAllNodesInfo(self):
-    """Gets configuration of all nodes.
+  def _UnlockedGetNodeInfoByName(self, node_name):
+    for node in self._UnlockedGetAllNodesInfo().values():
+      if node.name == node_name:
+        return node
+    return None
 
-    @note: See L{GetAllNodesInfo}
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetNodeInfoByName(self, node_name):
+    """Get the L{objects.Node} object for a named node.
+
+    @param node_name: name of the node to get information for
+    @type node_name: string
+    @return: the corresponding L{objects.Node} instance or None if no
+          information is available
+
+    """
+    return self._UnlockedGetNodeInfoByName(node_name)
+
+  def _UnlockedGetNodeName(self, node_spec):
+    if isinstance(node_spec, objects.Node):
+      return node_spec.name
+    elif isinstance(node_spec, basestring):
+      node_info = self._UnlockedGetNodeInfo(node_spec)
+      if node_info is None:
+        raise errors.OpExecError("Unknown node: %s" % node_spec)
+      return node_info.name
+    else:
+      raise errors.ProgrammerError("Can't handle node spec '%s'" % node_spec)
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetNodeName(self, node_spec):
+    """Gets the node name for the passed node.
+
+    @param node_spec: node to get names for
+    @type node_spec: either node UUID or a L{objects.Node} object
+    @rtype: string
+    @return: node name
+
+    """
+    return self._UnlockedGetNodeName(node_spec)
+
+  def _UnlockedGetNodeNames(self, node_specs):
+    return [self._UnlockedGetNodeName(node_spec) for node_spec in node_specs]
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetNodeNames(self, node_specs):
+    """Gets the node names for the passed list of nodes.
+
+    @param node_specs: list of nodes to get names for
+    @type node_specs: list of either node UUIDs or L{objects.Node} objects
+    @rtype: list of strings
+    @return: list of node names
 
     """
-    return dict([(node, self._UnlockedGetNodeInfo(node))
-                 for node in self._UnlockedGetNodeList()])
+    return self._UnlockedGetNodeNames(node_specs)
 
   @locking.ssynchronized(_config_lock, shared=1)
-  def GetNodeGroupsFromNodes(self, nodes):
+  def GetNodeGroupsFromNodes(self, node_uuids):
     """Returns groups for a list of nodes.
 
-    @type nodes: list of string
-    @param nodes: List of node names
+    @type node_uuids: list of string
+    @param node_uuids: List of node UUIDs
     @rtype: frozenset
 
     """
-    return frozenset(self._UnlockedGetNodeInfo(name).group for name in nodes)
+    return frozenset(self._UnlockedGetNodeInfo(uuid).group
+                     for uuid in node_uuids)
 
   def _UnlockedGetMasterCandidateStats(self, exceptions=None):
     """Get the number of current and maximum desired and possible candidates.
@@ -1937,7 +2012,7 @@ class ConfigWriter:
     """
     mc_now = mc_should = mc_max = 0
     for node in self._config_data.nodes.values():
-      if exceptions and node.name in exceptions:
+      if exceptions and node.uuid in exceptions:
         continue
       if not (node.offline or node.drained) and node.master_capable:
         mc_max += 1
@@ -1961,26 +2036,27 @@ class ConfigWriter:
     return self._UnlockedGetMasterCandidateStats(exceptions)
 
   @locking.ssynchronized(_config_lock)
-  def MaintainCandidatePool(self, exceptions):
+  def MaintainCandidatePool(self, exception_node_uuids):
     """Try to grow the candidate pool to the desired size.
 
-    @type exceptions: list
-    @param exceptions: if passed, list of nodes that should be ignored
+    @type exception_node_uuids: list
+    @param exception_node_uuids: if passed, list of nodes that should be ignored
     @rtype: list
     @return: list with the adjusted nodes (L{objects.Node} instances)
 
     """
-    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(exceptions)
+    mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(
+                          exception_node_uuids)
     mod_list = []
     if mc_now < mc_max:
       node_list = self._config_data.nodes.keys()
       random.shuffle(node_list)
-      for name in node_list:
+      for uuid in node_list:
         if mc_now >= mc_max:
           break
-        node = self._config_data.nodes[name]
+        node = self._config_data.nodes[uuid]
         if (node.master_candidate or node.offline or node.drained or
-            node.name in exceptions or not node.master_capable):
+            node.uuid in exception_node_uuids or not node.master_capable):
           continue
         mod_list.append(node)
         node.master_candidate = True
@@ -1996,7 +2072,7 @@ class ConfigWriter:
 
     return mod_list
 
-  def _UnlockedAddNodeToGroup(self, node_name, nodegroup_uuid):
+  def _UnlockedAddNodeToGroup(self, node_uuid, nodegroup_uuid):
     """Add a given node to the specified group.
 
     """
@@ -2006,8 +2082,8 @@ class ConfigWriter:
       # the meantime. It's ok though, as we'll fail cleanly if the node group
       # is not found anymore.
       raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid)
-    if node_name not in self._config_data.nodegroups[nodegroup_uuid].members:
-      self._config_data.nodegroups[nodegroup_uuid].members.append(node_name)
+    if node_uuid not in self._config_data.nodegroups[nodegroup_uuid].members:
+      self._config_data.nodegroups[nodegroup_uuid].members.append(node_uuid)
 
   def _UnlockedRemoveNodeFromGroup(self, node):
     """Remove a given node from its group.
@@ -2016,13 +2092,13 @@ class ConfigWriter:
     nodegroup = node.group
     if nodegroup not in self._config_data.nodegroups:
       logging.warning("Warning: node '%s' has unknown node group '%s'"
-                      " (while being removed from it)", node.name, nodegroup)
+                      " (while being removed from it)", node.uuid, nodegroup)
     nodegroup_obj = self._config_data.nodegroups[nodegroup]
-    if node.name not in nodegroup_obj.members:
+    if node.uuid not in nodegroup_obj.members:
       logging.warning("Warning: node '%s' not a member of its node group '%s'"
-                      " (while being removed from it)", node.name, nodegroup)
+                      " (while being removed from it)", node.uuid, nodegroup)
     else:
-      nodegroup_obj.members.remove(node.name)
+      nodegroup_obj.members.remove(node.uuid)
 
   @locking.ssynchronized(_config_lock)
   def AssignGroupNodes(self, mods):
@@ -2037,17 +2113,17 @@ class ConfigWriter:
 
     resmod = []
 
-    # Try to resolve names/UUIDs first
-    for (node_name, new_group_uuid) in mods:
+    # Try to resolve UUIDs first
+    for (node_uuid, new_group_uuid) in mods:
       try:
-        node = nodes[node_name]
+        node = nodes[node_uuid]
       except KeyError:
-        raise errors.ConfigurationError("Unable to find node '%s'" % node_name)
+        raise errors.ConfigurationError("Unable to find node '%s'" % node_uuid)
 
       if node.group == new_group_uuid:
         # Node is being assigned to its current group
         logging.debug("Node '%s' was assigned to its current group (%s)",
-                      node_name, node.group)
+                      node_uuid, node.group)
         continue
 
       # Try to find current group of node
@@ -2064,12 +2140,12 @@ class ConfigWriter:
         raise errors.ConfigurationError("Unable to find new group '%s'" %
                                         new_group_uuid)
 
-      assert node.name in old_group.members, \
+      assert node.uuid in old_group.members, \
         ("Inconsistent configuration: node '%s' not listed in members for its"
-         " old group '%s'" % (node.name, old_group.uuid))
-      assert node.name not in new_group.members, \
+         " old group '%s'" % (node.uuid, old_group.uuid))
+      assert node.uuid not in new_group.members, \
         ("Inconsistent configuration: node '%s' already listed in members for"
-         " its new group '%s'" % (node.name, new_group.uuid))
+         " its new group '%s'" % (node.uuid, new_group.uuid))
 
       resmod.append((node, old_group, new_group))
 
@@ -2081,10 +2157,10 @@ class ConfigWriter:
       node.group = new_group.uuid
 
       # Update members of involved groups
-      if node.name in old_group.members:
-        old_group.members.remove(node.name)
-      if node.name not in new_group.members:
-        new_group.members.append(node.name)
+      if node.uuid in old_group.members:
+        old_group.members.remove(node.uuid)
+      if node.uuid not in new_group.members:
+        new_group.members.append(node.uuid)
 
     # Update timestamps and serials (only once per node/group object)
     now = time.time()
@@ -2135,11 +2211,18 @@ class ConfigWriter:
       raise errors.ConfigurationError("Incomplete configuration"
                                       " (missing cluster.rsahostkeypub)")
 
-    if data.cluster.master_node != self._my_hostname and not accept_foreign:
+    if not data.cluster.master_node in data.nodes:
+      msg = ("The configuration denotes node %s as master, but does not"
+             " contain information about this node" %
+             data.cluster.master_node)
+      raise errors.ConfigurationError(msg)
+
+    master_info = data.nodes[data.cluster.master_node]
+    if master_info.name != self._my_hostname and not accept_foreign:
       msg = ("The configuration denotes node %s as master, while my"
              " hostname is %s; opening a foreign configuration is only"
              " possible in accept_foreign mode" %
-             (data.cluster.master_node, self._my_hostname))
+             (master_info.name, self._my_hostname))
       raise errors.ConfigurationError(msg)
 
     self._config_data = data
@@ -2188,7 +2271,7 @@ class ConfigWriter:
       # nodegroups are being added, and upon normally loading the config,
       # because the members list of a node group is discarded upon
       # serializing/deserializing the object.
-      self._UnlockedAddNodeToGroup(node.name, node.group)
+      self._UnlockedAddNodeToGroup(node.uuid, node.group)
 
     modified = (oldconf != self._config_data.ToDict())
     if modified:
@@ -2222,11 +2305,9 @@ class ConfigWriter:
     # since the node list comes from _UnlocketGetNodeList, and we are
     # called with the lock held, so no modifications should take place
     # in between
-    for node_name in self._UnlockedGetNodeList():
-      if node_name == myhostname:
-        continue
-      node_info = self._UnlockedGetNodeInfo(node_name)
-      if not node_info.master_candidate:
+    for node_uuid in self._UnlockedGetNodeList():
+      node_info = self._UnlockedGetNodeInfo(node_uuid)
+      if node_info.name == myhostname or not node_info.master_candidate:
         continue
       node_list.append(node_info.name)
       addr_list.append(node_info.primary_ip)
@@ -2293,7 +2374,7 @@ class ConfigWriter:
     if self._last_cluster_serial < self._config_data.cluster.serial_no:
       if not self._offline:
         result = self._GetRpc(None).call_write_ssconf_files(
-          self._UnlockedGetOnlineNodeList(),
+          self._UnlockedGetNodeNames(self._UnlockedGetOnlineNodeList()),
           self._UnlockedGetSsconfValues())
 
         for nname, nresu in result.items():
@@ -2352,18 +2433,18 @@ class ConfigWriter:
     """
     fn = "\n".join
     instance_names = utils.NiceSort(self._UnlockedGetInstanceList())
-    node_names = utils.NiceSort(self._UnlockedGetNodeList())
-    node_info = [self._UnlockedGetNodeInfo(name) for name in node_names]
+    node_infos = self._UnlockedGetAllNodesInfo().values()
+    node_names = [node.name for node in node_infos]
     node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
-                    for ninfo in node_info]
+                    for ninfo in node_infos]
     node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
-                    for ninfo in node_info]
+                    for ninfo in node_infos]
 
     instance_data = fn(instance_names)
-    off_data = fn(node.name for node in node_info if node.offline)
-    on_data = fn(node.name for node in node_info if not node.offline)
-    mc_data = fn(node.name for node in node_info if node.master_candidate)
-    mc_ips_data = fn(node.primary_ip for node in node_info
+    off_data = fn(node.name for node in node_infos if node.offline)
+    on_data = fn(node.name for node in node_infos if not node.offline)
+    mc_data = fn(node.name for node in node_infos if node.master_candidate)
+    mc_ips_data = fn(node.primary_ip for node in node_infos
                      if node.master_candidate)
     node_data = fn(node_names)
     node_pri_ips_data = fn(node_pri_ips)
@@ -2394,7 +2475,7 @@ class ConfigWriter:
       constants.SS_MASTER_IP: cluster.master_ip,
       constants.SS_MASTER_NETDEV: cluster.master_netdev,
       constants.SS_MASTER_NETMASK: str(cluster.master_netmask),
-      constants.SS_MASTER_NODE: cluster.master_node,
+      constants.SS_MASTER_NODE: self._UnlockedGetNodeName(cluster.master_node),
       constants.SS_NODE_LIST: node_data,
       constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
       constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
@@ -2679,34 +2760,34 @@ class ConfigWriter:
     self._config_data.cluster.serial_no += 1
     self._WriteConfig()
 
-  def _UnlockedGetGroupNetParams(self, net_uuid, node):
+  def _UnlockedGetGroupNetParams(self, net_uuid, node_uuid):
     """Get the netparams (mode, link) of a network.
 
     Get a network's netparams for a given node.
 
     @type net_uuid: string
     @param net_uuid: network uuid
-    @type node: string
-    @param node: node name
+    @type node_uuid: string
+    @param node_uuid: node UUID
     @rtype: dict or None
     @return: netparams
 
     """
-    node_info = self._UnlockedGetNodeInfo(node)
+    node_info = self._UnlockedGetNodeInfo(node_uuid)
     nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
     netparams = nodegroup_info.networks.get(net_uuid, None)
 
     return netparams
 
   @locking.ssynchronized(_config_lock, shared=1)
-  def GetGroupNetParams(self, net_uuid, node):
+  def GetGroupNetParams(self, net_uuid, node_uuid):
     """Locking wrapper of _UnlockedGetGroupNetParams()
 
     """
-    return self._UnlockedGetGroupNetParams(net_uuid, node)
+    return self._UnlockedGetGroupNetParams(net_uuid, node_uuid)
 
   @locking.ssynchronized(_config_lock, shared=1)
-  def CheckIPInNodeGroup(self, ip, node):
+  def CheckIPInNodeGroup(self, ip, node_uuid):
     """Check IP uniqueness in nodegroup.
 
     Check networks that are connected in the node's node group
@@ -2715,15 +2796,15 @@ class ConfigWriter:
 
     @type ip: string
     @param ip: ip address
-    @type node: string
-    @param node: node name
+    @type node_uuid: string
+    @param node_uuid: node UUID
     @rtype: (string, dict) or (None, None)
     @return: (network name, netparams)
 
     """
     if ip is None:
       return (None, None)
-    node_info = self._UnlockedGetNodeInfo(node)
+    node_info = self._UnlockedGetNodeInfo(node_uuid)
     nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
     for net_uuid in nodegroup_info.networks.keys():
       net_info = self._UnlockedGetNetwork(net_uuid)
index 9f40087..515a2a8 100644 (file)
@@ -171,40 +171,41 @@ class HooksMaster(object):
 
     return self.hooks_execution_fn(node_list, hpath, phase, env)
 
-  def RunPhase(self, phase, nodes=None):
+  def RunPhase(self, phase, node_names=None):
     """Run all the scripts for a phase.
 
     This is the main function of the HookMaster.
     It executes self.hooks_execution_fn, and after running
-    self.hooks_results_adapt_fn on its results it expects them to be in the form
-    {node_name: (fail_msg, [(script, result, output), ...]}).
+    self.hooks_results_adapt_fn on its results it expects them to be in the
+    form {node_name: (fail_msg, [(script, result, output), ...]}).
 
     @param phase: one of L{constants.HOOKS_PHASE_POST} or
         L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
-    @param nodes: overrides the predefined list of nodes for the given phase
+    @param node_names: overrides the predefined list of nodes for the given
+        phase
     @return: the processed results of the hooks multi-node rpc call
     @raise errors.HooksFailure: on communication failure to the nodes
     @raise errors.HooksAbort: on failure of one of the hooks
 
     """
     if phase == constants.HOOKS_PHASE_PRE:
-      if nodes is None:
-        nodes = self.pre_nodes
+      if node_names is None:
+        node_names = self.pre_nodes
       env = self.pre_env
     elif phase == constants.HOOKS_PHASE_POST:
-      if nodes is None:
-        nodes = self.post_nodes
+      if node_names is None:
+        node_names = self.post_nodes
       env = self._BuildEnv(phase)
     else:
       raise AssertionError("Unknown phase '%s'" % phase)
 
-    if not nodes:
+    if not node_names:
       # empty node list, we should not attempt to run this as either
       # we're in the cluster init phase and the rpc client part can't
       # even attempt to run, or this LU doesn't do hooks at all
       return
 
-    results = self._RunWrapper(nodes, self.hooks_path, phase, env)
+    results = self._RunWrapper(node_names, self.hooks_path, phase, env)
     if not results:
       msg = "Communication Failure"
       if phase == constants.HOOKS_PHASE_PRE:
@@ -258,11 +259,20 @@ class HooksMaster(object):
     if lu.HPATH is None:
       nodes = (None, None)
     else:
-      nodes = map(frozenset, lu.BuildHooksNodes())
+      hooks_nodes = lu.BuildHooksNodes()
+      to_name = lambda node_uuids: frozenset(lu.cfg.GetNodeNames(node_uuids))
+      if len(hooks_nodes) == 2:
+        nodes = (to_name(hooks_nodes[0]), to_name(hooks_nodes[1]))
+      elif len(hooks_nodes) == 3:
+        nodes = (to_name(hooks_nodes[0]),
+                 to_name(hooks_nodes[1]) | frozenset(hooks_nodes[2]))
+      else:
+        raise errors.ProgrammerError(
+          "LogicalUnit.BuildHooksNodes must return a 2- or 3-tuple")
 
     master_name = cluster_name = None
     if lu.cfg:
-      master_name = lu.cfg.GetMasterNode()
+      master_name = lu.cfg.GetMasterNodeName()
       cluster_name = lu.cfg.GetClusterName()
 
     return HooksMaster(lu.op.OP_ID, lu.HPATH, nodes, hooks_execution_fn,
index 381d298..f202386 100644 (file)
@@ -248,7 +248,7 @@ class BaseHypervisor(object):
     raise NotImplementedError
 
   @classmethod
-  def GetInstanceConsole(cls, instance, hvparams, beparams):
+  def GetInstanceConsole(cls, instance, primary_node, hvparams, beparams):
     """Return information for connecting to the console of an instance.
 
     """
index 59da714..c3b96e6 100644 (file)
@@ -267,7 +267,7 @@ class ChrootManager(hv_base.BaseHypervisor):
     return self.GetLinuxNodeInfo()
 
   @classmethod
-  def GetInstanceConsole(cls, instance, # pylint: disable=W0221
+  def GetInstanceConsole(cls, instance, primary_node, # pylint: disable=W0221
                          hvparams, beparams, root_dir=None):
     """Return information for connecting to the console of an instance.
 
@@ -279,7 +279,7 @@ class ChrootManager(hv_base.BaseHypervisor):
 
     return objects.InstanceConsole(instance=instance.name,
                                    kind=constants.CONS_SSH,
-                                   host=instance.primary_node,
+                                   host=primary_node.name,
                                    user=constants.SSH_CONSOLE_USER,
                                    command=["chroot", root_dir])
 
index f7c2061..bbb8daf 100644 (file)
@@ -232,7 +232,7 @@ class FakeHypervisor(hv_base.BaseHypervisor):
     return result
 
   @classmethod
-  def GetInstanceConsole(cls, instance, hvparams, beparams):
+  def GetInstanceConsole(cls, instance, primary_node, hvparams, beparams):
     """Return information for connecting to the console of an instance.
 
     """
index 4979818..917b9f6 100644 (file)
@@ -2020,7 +2020,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
     return result
 
   @classmethod
-  def GetInstanceConsole(cls, instance, hvparams, beparams):
+  def GetInstanceConsole(cls, instance, primary_node, hvparams, beparams):
     """Return a command for connecting to the console of an instance.
 
     """
@@ -2032,7 +2032,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
              "UNIX-CONNECT:%s" % cls._InstanceSerial(instance.name)]
       return objects.InstanceConsole(instance=instance.name,
                                      kind=constants.CONS_SSH,
-                                     host=instance.primary_node,
+                                     host=primary_node.name,
                                      user=constants.SSH_CONSOLE_USER,
                                      command=cmd)
 
index df8e62a..65dd64f 100644 (file)
@@ -414,13 +414,13 @@ class LXCHypervisor(hv_base.BaseHypervisor):
     return self.GetLinuxNodeInfo()
 
   @classmethod
-  def GetInstanceConsole(cls, instance, hvparams, beparams):
+  def GetInstanceConsole(cls, instance, primary_node, hvparams, beparams):
     """Return a command for connecting to the console of an instance.
 
     """
     return objects.InstanceConsole(instance=instance.name,
                                    kind=constants.CONS_SSH,
-                                   host=instance.primary_node,
+                                   host=primary_node.name,
                                    user=constants.SSH_CONSOLE_USER,
                                    command=["lxc-console", "-n", instance.name])
 
index 647d3df..00bd633 100644 (file)
@@ -627,14 +627,14 @@ class XenHypervisor(hv_base.BaseHypervisor):
     instance_list = self._GetInstanceList(True, hvparams)
     return _GetNodeInfo(result.stdout, instance_list)
 
-  def GetInstanceConsole(self, instance, hvparams, beparams):
+  def GetInstanceConsole(self, instance, primary_node, hvparams, beparams):
     """Return a command for connecting to the console of an instance.
 
     """
     xen_cmd = self._GetCommand(hvparams)
     return objects.InstanceConsole(instance=instance.name,
                                    kind=constants.CONS_SSH,
-                                   host=instance.primary_node,
+                                   host=primary_node.name,
                                    user=constants.SSH_CONSOLE_USER,
                                    command=[pathutils.XEN_CONSOLE_WRAPPER,
                                             xen_cmd, instance.name])
index 9416428..d7e6cfb 100644 (file)
@@ -234,7 +234,7 @@ class IAReqRelocate(IARequestBase):
   MODE = constants.IALLOCATOR_MODE_RELOC
   REQ_PARAMS = [
     _INST_NAME,
-    ("relocate_from", _STRING_LIST),
+    ("relocate_from_node_uuids", _STRING_LIST),
     ]
   REQ_RESULT = ht.TList
 
@@ -266,7 +266,7 @@ class IAReqRelocate(IARequestBase):
       "name": self.name,
       "disk_space_total": disk_space,
       "required_nodes": 1,
-      "relocate_from": self.relocate_from,
+      "relocate_from": cfg.GetNodeNames(self.relocate_from_node_uuids),
       }
 
   def ValidateResult(self, ia, result):
@@ -282,12 +282,13 @@ class IAReqRelocate(IARequestBase):
                         ia.in_data["nodegroups"])
 
     instance = ia.cfg.GetInstanceInfo(self.name)
-    request_groups = fn(self.relocate_from + [instance.primary_node])
-    result_groups = fn(result + [instance.primary_node])
+    request_groups = fn(ia.cfg.GetNodeNames(self.relocate_from_node_uuids) +
+                        ia.cfg.GetNodeNames([instance.primary_node]))
+    result_groups = fn(result + ia.cfg.GetNodeNames([instance.primary_node]))
 
     if ia.success and not set(result_groups).issubset(request_groups):
       raise errors.ResultValidationError("Groups of nodes returned by"
-                                         "iallocator (%s) differ from original"
+                                         " iallocator (%s) differ from original"
                                          " groups (%s)" %
                                          (utils.CommaJoin(result_groups),
                                           utils.CommaJoin(request_groups)))
@@ -418,7 +419,7 @@ class IAllocator(object):
     i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
 
     # node data
-    node_list = [n.name for n in ninfo.values() if n.vm_capable]
+    node_list = [n.uuid for n in ninfo.values() if n.vm_capable]
 
     if isinstance(self.req, IAReqInstanceAlloc):
       hypervisor_name = self.req.hypervisor
@@ -430,7 +431,7 @@ class IAllocator(object):
       hypervisor_name = cluster_info.primary_hypervisor
       node_whitelist = None
 
-    es_flags = rpc.GetExclusiveStorageForNodeNames(cfg, node_list)
+    es_flags = rpc.GetExclusiveStorageForNodes(cfg, node_list)
     vg_req = rpc.BuildVgInfoQuery(cfg)
     has_lvm = bool(vg_req)
     hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])]
@@ -449,7 +450,7 @@ class IAllocator(object):
     assert len(data["nodes"]) == len(ninfo), \
         "Incomplete node data computed"
 
-    data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
+    data["instances"] = self._ComputeInstanceData(cfg, cluster_info, i_list)
 
     self.in_data = data
 
@@ -508,26 +509,27 @@ class IAllocator(object):
     #TODO(dynmem): compute the right data on MAX and MIN memory
     # make a copy of the current dict
     node_results = dict(node_results)
-    for nname, nresult in node_data.items():
-      assert nname in node_results, "Missing basic data for node %s" % nname
-      ninfo = node_cfg[nname]
+    for nuuid, nresult in node_data.items():
+      ninfo = node_cfg[nuuid]
+      assert ninfo.name in node_results, "Missing basic data for node %s" % \
+                                         ninfo.name
 
       if not (ninfo.offline or ninfo.drained):
-        nresult.Raise("Can't get data for node %s" % nname)
-        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
-                                nname)
+        nresult.Raise("Can't get data for node %s" % ninfo.name)
+        node_iinfo[nuuid].Raise("Can't get node instance info from node %s" %
+                                ninfo.name)
         remote_info = rpc.MakeLegacyNodeInfo(nresult.payload,
                                              require_vg_info=has_lvm)
 
         def get_attr(attr):
           if attr not in remote_info:
             raise errors.OpExecError("Node '%s' didn't return attribute"
-                                     " '%s'" % (nname, attr))
+                                     " '%s'" % (ninfo.name, attr))
           value = remote_info[attr]
           if not isinstance(value, int):
             raise errors.OpExecError("Node '%s' returned invalid value"
                                      " for '%s': %s" %
-                                     (nname, attr, value))
+                                     (ninfo.name, attr, value))
           return value
 
         mem_free = get_attr("memory_free")
@@ -535,12 +537,12 @@ class IAllocator(object):
         # compute memory used by primary instances
         i_p_mem = i_p_up_mem = 0
         for iinfo, beinfo in i_list:
-          if iinfo.primary_node == nname:
+          if iinfo.primary_node == nuuid:
             i_p_mem += beinfo[constants.BE_MAXMEM]
-            if iinfo.name not in node_iinfo[nname].payload:
+            if iinfo.name not in node_iinfo[nuuid].payload:
               i_used_mem = 0
             else:
-              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
+              i_used_mem = int(node_iinfo[nuuid].payload[iinfo.name]["memory"])
             i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
             mem_free -= max(0, i_mem_diff)
 
@@ -571,13 +573,13 @@ class IAllocator(object):
           "i_pri_memory": i_p_mem,
           "i_pri_up_memory": i_p_up_mem,
           }
-        pnr_dyn.update(node_results[nname])
-        node_results[nname] = pnr_dyn
+        pnr_dyn.update(node_results[ninfo.name])
+        node_results[ninfo.name] = pnr_dyn
 
     return node_results
 
   @staticmethod
-  def _ComputeInstanceData(cluster_info, i_list):
+  def _ComputeInstanceData(cfg, cluster_info, i_list):
     """Compute global instance data.
 
     """
@@ -602,7 +604,8 @@ class IAllocator(object):
         "memory": beinfo[constants.BE_MAXMEM],
         "spindle_use": beinfo[constants.BE_SPINDLE_USE],
         "os": iinfo.os,
-        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
+        "nodes": [cfg.GetNodeName(iinfo.primary_node)] +
+                 cfg.GetNodeNames(iinfo.secondary_nodes),
         "nics": nic_data,
         "disks": [{constants.IDISK_SIZE: dsk.size,
                    constants.IDISK_MODE: dsk.mode,
index 0954011..8bb5e5d 100644 (file)
@@ -134,13 +134,13 @@ class ImportExportCbBase(object):
 class _DiskImportExportBase(object):
   MODE_TEXT = None
 
-  def __init__(self, lu, node_name, opts,
+  def __init__(self, lu, node_uuid, opts,
                instance, component, timeouts, cbs, private=None):
     """Initializes this class.
 
     @param lu: Logical unit instance
-    @type node_name: string
-    @param node_name: Node name for import
+    @type node_uuid: string
+    @param node_uuid: Node UUID for import
     @type opts: L{objects.ImportExportOptions}
     @param opts: Import/export daemon options
     @type instance: L{objects.Instance}
@@ -157,7 +157,8 @@ class _DiskImportExportBase(object):
     assert self.MODE_TEXT
 
     self._lu = lu
-    self.node_name = node_name
+    self.node_uuid = node_uuid
+    self.node_name = lu.cfg.GetNodeName(node_uuid)
     self._opts = opts.Copy()
     self._instance = instance
     self._component = component
@@ -293,12 +294,12 @@ class _DiskImportExportBase(object):
     """
     if self._daemon_name:
       self._lu.LogWarning("Aborting %s '%s' on %s",
-                          self.MODE_TEXT, self._daemon_name, self.node_name)
-      result = self._lu.rpc.call_impexp_abort(self.node_name, self._daemon_name)
+                          self.MODE_TEXT, self._daemon_name, self.node_uuid)
+      result = self._lu.rpc.call_impexp_abort(self.node_uuid, self._daemon_name)
       if result.fail_msg:
         self._lu.LogWarning("Failed to abort %s '%s' on %s: %s",
                             self.MODE_TEXT, self._daemon_name,
-                            self.node_name, result.fail_msg)
+                            self.node_uuid, result.fail_msg)
         return False
 
     return True
@@ -374,7 +375,7 @@ class _DiskImportExportBase(object):
 
       # TODO: Log remote peer
       logging.debug("%s '%s' on %s is now connected",
-                    self.MODE_TEXT, self._daemon_name, self.node_name)
+                    self.MODE_TEXT, self._daemon_name, self.node_uuid)
 
       self._cbs.ReportConnected(self, self._private)
 
@@ -440,14 +441,15 @@ class _DiskImportExportBase(object):
 
     if success:
       logging.info("%s '%s' on %s succeeded", self.MODE_TEXT,
-                   self._daemon_name, self.node_name)
+                   self._daemon_name, self.node_uuid)
     elif self._daemon_name:
       self._lu.LogWarning("%s '%s' on %s failed: %s",
-                          self.MODE_TEXT, self._daemon_name, self.node_name,
+                          self.MODE_TEXT, self._daemon_name,
+                          self._lu.cfg.GetNodeName(self.node_uuid),
                           message)
     else:
       self._lu.LogWarning("%s on %s failed: %s", self.MODE_TEXT,
-                          self.node_name, message)
+                          self._lu.cfg.GetNodeName(self.node_uuid), message)
 
     self._cbs.ReportFinished(self, self._private)
 
@@ -455,7 +457,7 @@ class _DiskImportExportBase(object):
     """Makes the RPC call to finalize this import/export.
 
     """
-    return self._lu.rpc.call_impexp_cleanup(self.node_name, self._daemon_name)
+    return self._lu.rpc.call_impexp_cleanup(self.node_uuid, self._daemon_name)
 
   def Finalize(self, error=None):
     """Finalizes this import/export.
@@ -463,13 +465,13 @@ class _DiskImportExportBase(object):
     """
     if self._daemon_name:
       logging.info("Finalizing %s '%s' on %s",
-                   self.MODE_TEXT, self._daemon_name, self.node_name)
+                   self.MODE_TEXT, self._daemon_name, self.node_uuid)
 
       result = self._Finalize()
       if result.fail_msg:
         self._lu.LogWarning("Failed to finalize %s '%s' on %s: %s",
                             self.MODE_TEXT, self._daemon_name,
-                            self.node_name, result.fail_msg)
+                            self.node_uuid, result.fail_msg)
         return False
 
       # Daemon is no longer running
@@ -485,13 +487,13 @@ class _DiskImportExportBase(object):
 class DiskImport(_DiskImportExportBase):
   MODE_TEXT = "import"
 
-  def __init__(self, lu, node_name, opts, instance, component,
+  def __init__(self, lu, node_uuid, opts, instance, component,
                dest, dest_args, timeouts, cbs, private=None):
     """Initializes this class.
 
     @param lu: Logical unit instance
-    @type node_name: string
-    @param node_name: Node name for import
+    @type node_uuid: string
+    @param node_uuid: Node name for import
     @type opts: L{objects.ImportExportOptions}
     @param opts: Import/export daemon options
     @type instance: L{objects.Instance}
@@ -507,7 +509,7 @@ class DiskImport(_DiskImportExportBase):
     @param private: Private data for callback functions
 
     """
-    _DiskImportExportBase.__init__(self, lu, node_name, opts, instance,
+    _DiskImportExportBase.__init__(self, lu, node_uuid, opts, instance,
                                    component, timeouts, cbs, private)
     self._dest = dest
     self._dest_args = dest_args
@@ -529,7 +531,7 @@ class DiskImport(_DiskImportExportBase):
     """Starts the import daemon.
 
     """
-    return self._lu.rpc.call_import_start(self.node_name, self._opts,
+    return self._lu.rpc.call_import_start(self.node_uuid, self._opts,
                                           self._instance, self._component,
                                           (self._dest, self._dest_args))
 
@@ -550,7 +552,7 @@ class DiskImport(_DiskImportExportBase):
       self._ts_listening = time.time()
 
       logging.debug("Import '%s' on %s is now listening on port %s",
-                    self._daemon_name, self.node_name, port)
+                    self._daemon_name, self.node_uuid, port)
 
       self._cbs.ReportListening(self, self._private, self._component)
 
@@ -576,14 +578,14 @@ class DiskImport(_DiskImportExportBase):
 class DiskExport(_DiskImportExportBase):
   MODE_TEXT = "export"
 
-  def __init__(self, lu, node_name, opts, dest_host, dest_port,
+  def __init__(self, lu, node_uuid, opts, dest_host, dest_port,
                instance, component, source, source_args,
                timeouts, cbs, private=None):
     """Initializes this class.
 
     @param lu: Logical unit instance
-    @type node_name: string
-    @param node_name: Node name for import
+    @type node_uuid: string
+    @param node_uuid: Node UUID for import
     @type opts: L{objects.ImportExportOptions}
     @param opts: Import/export daemon options
     @type dest_host: string
@@ -603,7 +605,7 @@ class DiskExport(_DiskImportExportBase):
     @param private: Private data for callback functions
 
     """
-    _DiskImportExportBase.__init__(self, lu, node_name, opts, instance,
+    _DiskImportExportBase.__init__(self, lu, node_uuid, opts, instance,
                                    component, timeouts, cbs, private)
     self._dest_host = dest_host
     self._dest_port = dest_port
@@ -614,7 +616,7 @@ class DiskExport(_DiskImportExportBase):
     """Starts the export daemon.
 
     """
-    return self._lu.rpc.call_export_start(self.node_name, self._opts,
+    return self._lu.rpc.call_export_start(self.node_uuid, self._opts,
                                           self._dest_host, self._dest_port,
                                           self._instance, self._component,
                                           (self._source, self._source_args))
@@ -823,8 +825,8 @@ class ImportExportLoop:
 
 
 class _TransferInstCbBase(ImportExportCbBase):
-  def __init__(self, lu, feedback_fn, instance, timeouts, src_node, src_cbs,
-               dest_node, dest_ip):
+  def __init__(self, lu, feedback_fn, instance, timeouts, src_node_uuid,
+               src_cbs, dest_node_uuid, dest_ip):
     """Initializes this class.
 
     """
@@ -834,9 +836,9 @@ class _TransferInstCbBase(ImportExportCbBase):
     self.feedback_fn = feedback_fn
     self.instance = instance
     self.timeouts = timeouts
-    self.src_node = src_node
+    self.src_node_uuid = src_node_uuid
     self.src_cbs = src_cbs
-    self.dest_node = dest_node
+    self.dest_node_uuid = dest_node_uuid
     self.dest_ip = dest_ip
 
 
@@ -901,7 +903,7 @@ class _TransferInstDestCb(_TransferInstCbBase):
     self.feedback_fn("%s is now listening, starting export" % dtp.data.name)
 
     # Start export on source node
-    de = DiskExport(self.lu, self.src_node, dtp.export_opts,
+    de = DiskExport(self.lu, self.src_node_uuid, dtp.export_opts,
                     self.dest_ip, ie.listen_port, self.instance,
                     component, dtp.data.src_io, dtp.data.src_ioargs,
                     self.timeouts, self.src_cbs, private=dtp)
@@ -914,7 +916,8 @@ class _TransferInstDestCb(_TransferInstCbBase):
 
     """
     self.feedback_fn("%s is receiving data on %s" %
-                     (dtp.data.name, self.dest_node))
+                     (dtp.data.name,
+                      self.lu.cfg.GetNodeName(self.dest_node_uuid)))
 
   def ReportFinished(self, ie, dtp):
     """Called when a transfer has finished.
@@ -1003,16 +1006,16 @@ def _GetInstDiskMagic(base, instance_name, index):
   return h.hexdigest()
 
 
-def TransferInstanceData(lu, feedback_fn, src_node, dest_node, dest_ip,
-                         instance, all_transfers):
+def TransferInstanceData(lu, feedback_fn, src_node_uuid, dest_node_uuid,
+                         dest_ip, instance, all_transfers):
   """Transfers an instance's data from one node to another.
 
   @param lu: Logical unit instance
   @param feedback_fn: Feedback function
-  @type src_node: string
-  @param src_node: Source node name
-  @type dest_node: string
-  @param dest_node: Destination node name
+  @type src_node_uuid: string
+  @param src_node_uuid: Source node UUID
+  @type dest_node_uuid: string
+  @param dest_node_uuid: Destination node UUID
   @type dest_ip: string
   @param dest_ip: IP address of destination node
   @type instance: L{objects.Instance}
@@ -1027,14 +1030,18 @@ def TransferInstanceData(lu, feedback_fn, src_node, dest_node, dest_ip,
   # Disable compression for all moves as these are all within the same cluster
   compress = constants.IEC_NONE
 
+  src_node_name = lu.cfg.GetNodeName(src_node_uuid)
+  dest_node_name = lu.cfg.GetNodeName(dest_node_uuid)
+
   logging.debug("Source node %s, destination node %s, compression '%s'",
-                src_node, dest_node, compress)
+                src_node_name, dest_node_name, compress)
 
   timeouts = ImportExportTimeouts(constants.DISK_TRANSFER_CONNECT_TIMEOUT)
   src_cbs = _TransferInstSourceCb(lu, feedback_fn, instance, timeouts,
-                                  src_node, None, dest_node, dest_ip)
+                                  src_node_uuid, None, dest_node_uuid, dest_ip)
   dest_cbs = _TransferInstDestCb(lu, feedback_fn, instance, timeouts,
-                                 src_node, src_cbs, dest_node, dest_ip)
+                                 src_node_uuid, src_cbs, dest_node_uuid,
+                                 dest_ip)
 
   all_dtp = []
 
@@ -1045,7 +1052,7 @@ def TransferInstanceData(lu, feedback_fn, src_node, dest_node, dest_ip,
     for idx, transfer in enumerate(all_transfers):
       if transfer:
         feedback_fn("Exporting %s from %s to %s" %
-                    (transfer.name, src_node, dest_node))
+                    (transfer.name, src_node_name, dest_node_name))
 
         magic = _GetInstDiskMagic(base_magic, instance.name, idx)
         opts = objects.ImportExportOptions(key_name=None, ca_pem=None,
@@ -1053,7 +1060,7 @@ def TransferInstanceData(lu, feedback_fn, src_node, dest_node, dest_ip,
 
         dtp = _DiskTransferPrivate(transfer, True, opts)
 
-        di = DiskImport(lu, dest_node, opts, instance, "disk%d" % idx,
+        di = DiskImport(lu, dest_node_uuid, opts, instance, "disk%d" % idx,
                         transfer.dest_io, transfer.dest_ioargs,
                         timeouts, dest_cbs, private=dtp)
         ieloop.Add(di)
@@ -1217,7 +1224,7 @@ class ExportInstanceHelper:
 
     """
     instance = self._instance
-    src_node = instance.primary_node
+    src_node_uuid = instance.primary_node
 
     assert len(self._snap_disks) == len(instance.disks)
 
@@ -1242,14 +1249,14 @@ class ExportInstanceHelper:
 
     # Actually export data
     dresults = TransferInstanceData(self._lu, self._feedback_fn,
-                                    src_node, dest_node.name,
+                                    src_node_uuid, dest_node.uuid,
                                     dest_node.secondary_ip,
                                     instance, transfers)
 
     assert len(dresults) == len(instance.disks)
 
     self._feedback_fn("Finalizing export on %s" % dest_node.name)
-    result = self._lu.rpc.call_finalize_export(dest_node.name, instance,
+    result = self._lu.rpc.call_finalize_export(dest_node.uuid, instance,
                                                self._snap_disks)
     msg = result.fail_msg
     fin_resu = not msg
index f523f2a..8fdce13 100644 (file)
@@ -358,7 +358,7 @@ class TaggableObject(ConfigObject):
 class MasterNetworkParameters(ConfigObject):
   """Network configuration parameters for the master
 
-  @ivar name: master name
+  @ivar uuid: master nodes UUID
   @ivar ip: master IP
   @ivar netmask: master netmask
   @ivar netdev: master network device
@@ -366,7 +366,7 @@ class MasterNetworkParameters(ConfigObject):
 
   """
   __slots__ = [
-    "name",
+    "uuid",
     "ip",
     "netmask",
     "netdev",
@@ -578,7 +578,7 @@ class Disk(ConfigObject):
           return True
     return self.dev_type == dev_type
 
-  def GetNodes(self, node):
+  def GetNodes(self, node_uuid):
     """This function returns the nodes this device lives on.
 
     Given the node on which the parent of the device lives on (or, in
@@ -590,26 +590,26 @@ class Disk(ConfigObject):
     if self.dev_type in [constants.LD_LV, constants.LD_FILE,
                          constants.LD_BLOCKDEV, constants.LD_RBD,
                          constants.LD_EXT]:
-      result = [node]
+      result = [node_uuid]
     elif self.dev_type in constants.LDS_DRBD:
       result = [self.logical_id[0], self.logical_id[1]]
-      if node not in result:
+      if node_uuid not in result:
         raise errors.ConfigurationError("DRBD device passed unknown node")
     else:
       raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
     return result
 
-  def ComputeNodeTree(self, parent_node):
+  def ComputeNodeTree(self, parent_node_uuid):
     """Compute the node/disk tree for this disk and its children.
 
     This method, given the node on which the parent disk lives, will
-    return the list of all (node, disk) pairs which describe the disk
+    return the list of all (node UUID, disk) pairs which describe the disk
     tree in the most compact way. For example, a drbd/lvm stack
     will be returned as (primary_node, drbd) and (secondary_node, drbd)
     which represents all the top-level devices on the nodes.
 
     """
-    my_nodes = self.GetNodes(parent_node)
+    my_nodes = self.GetNodes(parent_node_uuid)
     result = [(node, self) for node in my_nodes]
     if not self.children:
       # leaf device
@@ -701,7 +701,7 @@ class Disk(ConfigObject):
         child.UnsetSize()
     self.size = 0
 
-  def SetPhysicalID(self, target_node, nodes_ip):
+  def SetPhysicalID(self, target_node_uuid, nodes_ip):
     """Convert the logical ID to the physical ID.
 
     This is used only for drbd, which needs ip/port configuration.
@@ -711,7 +711,7 @@ class Disk(ConfigObject):
     node.
 
     Arguments:
-      - target_node: the node we wish to configure for
+      - target_node_uuid: the node UUID we wish to configure for
       - nodes_ip: a mapping of node name to ip
 
     The target_node must exist in in nodes_ip, and must be one of the
@@ -721,23 +721,23 @@ class Disk(ConfigObject):
     """
     if self.children:
       for child in self.children:
-        child.SetPhysicalID(target_node, nodes_ip)
+        child.SetPhysicalID(target_node_uuid, nodes_ip)
 
     if self.logical_id is None and self.physical_id is not None:
       return
     if self.dev_type in constants.LDS_DRBD:
-      pnode, snode, port, pminor, sminor, secret = self.logical_id
-      if target_node not in (pnode, snode):
+      pnode_uuid, snode_uuid, port, pminor, sminor, secret = self.logical_id
+      if target_node_uuid not in (pnode_uuid, snode_uuid):
         raise errors.ConfigurationError("DRBD device not knowing node %s" %
-                                        target_node)
-      pnode_ip = nodes_ip.get(pnode, None)
-      snode_ip = nodes_ip.get(snode, None)
+                                        target_node_uuid)
+      pnode_ip = nodes_ip.get(pnode_uuid, None)
+      snode_ip = nodes_ip.get(snode_uuid, None)
       if pnode_ip is None or snode_ip is None:
         raise errors.ConfigurationError("Can't find primary or secondary node"
                                         " for %s" % str(self))
       p_data = (pnode_ip, port)
       s_data = (snode_ip, port)
-      if pnode == target_node:
+      if pnode_uuid == target_node_uuid:
         self.physical_id = p_data + s_data + (pminor, secret)
       else: # it must be secondary, we tested above
         self.physical_id = s_data + p_data + (sminor, secret)
@@ -1118,7 +1118,7 @@ class Instance(TaggableObject):
         'node' : ['lv', ...] data.
 
     @return: None if lvmap arg is given, otherwise, a dictionary of
-        the form { 'nodename' : ['volume1', 'volume2', ...], ... };
+        the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
         volumeN is of the form "vg_name/lv_name", compatible with
         GetVolumeList()
 
index 918a273..0f61f94 100644 (file)
@@ -69,6 +69,9 @@ _PIgnoreOfflineNodes = ("ignore_offline_nodes", False, ht.TBool,
 #: a required node name (for single-node LUs)
 _PNodeName = ("node_name", ht.NoDefault, ht.TNonEmptyString, "Node name")
 
+#: a node UUID (for use with _PNodeName)
+_PNodeUuid = ("node_uuid", None, ht.TMaybeString, "Node UUID")
+
 #: a required node group name (for single-group LUs)
 _PGroupName = ("group_name", ht.NoDefault, ht.TNonEmptyString, "Group name")
 
@@ -133,6 +136,9 @@ _PNoRemember = ("no_remember", False, ht.TBool,
 _PMigrationTargetNode = ("target_node", None, ht.TMaybeString,
                          "Target node for shared-storage instances")
 
+_PMigrationTargetNodeUuid = ("target_node_uuid", None, ht.TMaybeString,
+                             "Target node UUID for shared-storage instances")
+
 _PStartupPaused = ("startup_paused", False, ht.TBool,
                    "Pause instance at startup")
 
@@ -1051,7 +1057,9 @@ class OpOobCommand(OpCode):
   """Interact with OOB."""
   OP_PARAMS = [
     ("node_names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
-     "List of nodes to run the OOB command against"),
+     "List of node names to run the OOB command against"),
+    ("node_uuids", None, ht.TMaybeListOf(ht.TNonEmptyString),
+     "List of node UUIDs to run the OOB command against"),
     ("command", ht.NoDefault, ht.TElemOf(constants.OOB_COMMANDS),
      "OOB command to be run"),
     ("timeout", constants.OOB_TIMEOUT, ht.TInt,
@@ -1073,6 +1081,8 @@ class OpRestrictedCommand(OpCode):
     _PUseLocking,
     ("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
      "Nodes on which the command should be run (at least one)"),
+    ("node_uuids", None, ht.TMaybeListOf(ht.TNonEmptyString),
+     "Node UUIDs on which the command should be run (at least one)"),
     ("command", ht.NoDefault, ht.TNonEmptyString,
      "Command name (no parameters)"),
     ]
@@ -1100,6 +1110,7 @@ class OpNodeRemove(OpCode):
   OP_DSC_FIELD = "node_name"
   OP_PARAMS = [
     _PNodeName,
+    _PNodeUuid
     ]
   OP_RESULT = ht.TNone
 
@@ -1187,6 +1198,7 @@ class OpNodeModifyStorage(OpCode):
   OP_DSC_FIELD = "node_name"
   OP_PARAMS = [
     _PNodeName,
+    _PNodeUuid,
     _PStorageType,
     _PStorageName,
     ("changes", ht.NoDefault, ht.TDict, "Requested changes"),
@@ -1199,6 +1211,7 @@ class OpRepairNodeStorage(OpCode):
   OP_DSC_FIELD = "node_name"
   OP_PARAMS = [
     _PNodeName,
+    _PNodeUuid,
     _PStorageType,
     _PStorageName,
     _PIgnoreConsistency,
@@ -1211,6 +1224,7 @@ class OpNodeSetParams(OpCode):
   OP_DSC_FIELD = "node_name"
   OP_PARAMS = [
     _PNodeName,
+    _PNodeUuid,
     _PForce,
     _PHvState,
     _PDiskState,
@@ -1240,6 +1254,7 @@ class OpNodePowercycle(OpCode):
   OP_DSC_FIELD = "node_name"
   OP_PARAMS = [
     _PNodeName,
+    _PNodeUuid,
     _PForce,
     ]
   OP_RESULT = ht.TMaybeString
@@ -1250,9 +1265,11 @@ class OpNodeMigrate(OpCode):
   OP_DSC_FIELD = "node_name"
   OP_PARAMS = [
     _PNodeName,
+    _PNodeUuid,
     _PMigrationMode,
     _PMigrationLive,
     _PMigrationTargetNode,
+    _PMigrationTargetNodeUuid,
     _PAllowRuntimeChgs,
     _PIgnoreIpolicy,
     _PIAllocFromDesc("Iallocator for deciding the target node"
@@ -1267,7 +1284,9 @@ class OpNodeEvacuate(OpCode):
   OP_PARAMS = [
     _PEarlyRelease,
     _PNodeName,
+    _PNodeUuid,
     ("remote_node", None, ht.TMaybeString, "New secondary node"),
+    ("remote_node_uuid", None, ht.TMaybeString, "New secondary node UUID"),
     _PIAllocFromDesc("Iallocator for computing solution"),
     ("mode", ht.NoDefault, ht.TElemOf(constants.NODE_EVAC_MODES),
      "Node evacuation mode"),
@@ -1333,7 +1352,9 @@ class OpInstanceCreate(OpCode):
     ("osparams", ht.EmptyDict, ht.TDict, "OS parameters for instance"),
     ("os_type", None, ht.TMaybeString, "Operating system"),
     ("pnode", None, ht.TMaybeString, "Primary node"),
+    ("pnode_uuid", None, ht.TMaybeString, "Primary node UUID"),
     ("snode", None, ht.TMaybeString, "Secondary node"),
+    ("snode_uuid", None, ht.TMaybeString, "Secondary node UUID"),
     ("source_handshake", None, ht.TMaybe(ht.TList),
      "Signed handshake from source (remote import only)"),
     ("source_instance_name", None, ht.TMaybeString,
@@ -1344,6 +1365,7 @@ class OpInstanceCreate(OpCode):
     ("source_x509_ca", None, ht.TMaybeString,
      "Source X509 CA in PEM format (remote import only)"),
     ("src_node", None, ht.TMaybeString, "Source node for import"),
+    ("src_node_uuid", None, ht.TMaybeString, "Source node UUID for import"),
     ("src_path", None, ht.TMaybeString, "Source directory for import"),
     ("start", True, ht.TBool, "Whether to start instance after creation"),
     ("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Instance tags"),
@@ -1502,6 +1524,7 @@ class OpInstanceReplaceDisks(OpCode):
     ("disks", ht.EmptyList, ht.TListOf(ht.TNonNegativeInt),
      "Disk indexes"),
     ("remote_node", None, ht.TMaybeString, "New secondary node"),
+    ("remote_node_uuid", None, ht.TMaybeString, "New secondary node UUID"),
     _PIAllocFromDesc("Iallocator for deciding new secondary node"),
     ]
   OP_RESULT = ht.TNone
@@ -1515,6 +1538,7 @@ class OpInstanceFailover(OpCode):
     _PShutdownTimeout,
     _PIgnoreConsistency,
     _PMigrationTargetNode,
+    _PMigrationTargetNodeUuid,
     _PIgnoreIpolicy,
     _PIAllocFromDesc("Iallocator for deciding the target node for"
                      " shared-storage instances"),
@@ -1538,6 +1562,7 @@ class OpInstanceMigrate(OpCode):
     _PMigrationMode,
     _PMigrationLive,
     _PMigrationTargetNode,
+    _PMigrationTargetNodeUuid,
     _PAllowRuntimeChgs,
     _PIgnoreIpolicy,
     ("cleanup", False, ht.TBool,
@@ -1566,6 +1591,7 @@ class OpInstanceMove(OpCode):
     _PShutdownTimeout,
     _PIgnoreIpolicy,
     ("target_node", ht.NoDefault, ht.TNonEmptyString, "Target node"),
+    ("target_node_uuid", None, ht.TMaybeString, "Target node UUID"),
     _PIgnoreConsistency,
     ]
   OP_RESULT = ht.TNone
@@ -1620,6 +1646,8 @@ class OpInstanceRecreateDisks(OpCode):
      " index and a possibly empty dictionary with disk parameter changes"),
     ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
      "New instance nodes, if relocation is desired"),
+    ("node_uuids", None, ht.TMaybeListOf(ht.TNonEmptyString),
+     "New instance node UUIDs, if relocation is desired"),
     _PIAllocFromDesc("Iallocator for deciding new nodes"),
     ]
   OP_RESULT = ht.TNone
@@ -1707,8 +1735,11 @@ class OpInstanceSetParams(OpCode):
     ("disk_template", None, ht.TMaybe(_BuildDiskTemplateCheck(False)),
      "Disk template for instance"),
     ("pnode", None, ht.TMaybeString, "New primary node"),
+    ("pnode_uuid", None, ht.TMaybeString, "New primary node UUID"),
     ("remote_node", None, ht.TMaybeString,
      "Secondary node (used when changing disk template)"),
+    ("remote_node_uuid", None, ht.TMaybeString,
+     "Secondary node UUID (used when changing disk template)"),
     ("os_name", None, ht.TMaybeString,
      "Change the instance's OS without reinstalling the instance"),
     ("osparams", None, ht.TMaybeDict, "Per-instance OS parameters"),
@@ -1773,6 +1804,8 @@ class OpGroupAssignNodes(OpCode):
     _PForce,
     ("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
      "List of nodes to assign"),
+    ("node_uuids", None, ht.TMaybeListOf(ht.TNonEmptyString),
+     "List of node UUIDs to assign"),
     ]
   OP_RESULT = ht.TNone
 
@@ -1908,6 +1941,8 @@ class OpBackupExport(OpCode):
     # (e.g. "destination")
     ("target_node", ht.NoDefault, ht.TOr(ht.TNonEmptyString, ht.TList),
      "Destination information, depends on export mode"),
+    ("target_node_uuid", None, ht.TMaybeString,
+     "Target node UUID (if local export)"),
     ("shutdown", True, ht.TBool, "Whether to shutdown instance before export"),
     ("remove_instance", False, ht.TBool,
      "Whether to remove instance after export"),
@@ -2015,6 +2050,7 @@ class OpTestDelay(OpCode):
     ("duration", ht.NoDefault, ht.TNumber, None),
     ("on_master", True, ht.TBool, None),
     ("on_nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None),
+    ("on_node_uuids", None, ht.TMaybeListOf(ht.TNonEmptyString), None),
     ("repeat", 0, ht.TNonNegativeInt, None),
     ]
 
index 1ff701c..524d298 100644 (file)
@@ -1091,14 +1091,14 @@ class NodeQueryData:
   """Data container for node data queries.
 
   """
-  def __init__(self, nodes, live_data, master_name, node_to_primary,
+  def __init__(self, nodes, live_data, master_uuid, node_to_primary,
                node_to_secondary, groups, oob_support, cluster):
     """Initializes this class.
 
     """
     self.nodes = nodes
     self.live_data = live_data
-    self.master_name = master_name
+    self.master_uuid = master_uuid
     self.node_to_primary = node_to_primary
     self.node_to_secondary = node_to_secondary
     self.groups = groups
@@ -1123,7 +1123,7 @@ class NodeQueryData:
       else:
         self.ndparams = self.cluster.FillND(node, group)
       if self.live_data:
-        self.curlive_data = self.live_data.get(node.name, None)
+        self.curlive_data = self.live_data.get(node.uuid, None)
       else:
         self.curlive_data = None
       yield node
@@ -1218,7 +1218,7 @@ def _GetNodePower(ctx, node):
   @param node: Node object
 
   """
-  if ctx.oob_support[node.name]:
+  if ctx.oob_support[node.uuid]:
     return node.powered
 
   return _FS_UNAVAIL
@@ -1329,7 +1329,7 @@ def _BuildNodeFields():
     (_MakeField("tags", "Tags", QFT_OTHER, "Tags"), NQ_CONFIG, 0,
      lambda ctx, node: list(node.GetTags())),
     (_MakeField("master", "IsMaster", QFT_BOOL, "Whether node is master"),
-     NQ_CONFIG, 0, lambda ctx, node: node.name == ctx.master_name),
+     NQ_CONFIG, 0, lambda ctx, node: node.uuid == ctx.master_uuid),
     (_MakeField("group", "Group", QFT_TEXT, "Node group"), NQ_GROUP, 0,
      _GetGroup(_GetNodeGroup)),
     (_MakeField("group.uuid", "GroupUUID", QFT_TEXT, "UUID of node group"),
@@ -1359,14 +1359,14 @@ def _BuildNodeFields():
               " \"%s\" for regular, \"%s\" for drained, \"%s\" for offline" %
               role_values)
   fields.append((_MakeField("role", "Role", QFT_TEXT, role_doc), NQ_CONFIG, 0,
-                 lambda ctx, node: _GetNodeRole(node, ctx.master_name)))
+                 lambda ctx, node: _GetNodeRole(node, ctx.master_uuid)))
   assert set(role_values) == constants.NR_ALL
 
   def _GetLength(getter):
-    return lambda ctx, node: len(getter(ctx)[node.name])
+    return lambda ctx, node: len(getter(ctx)[node.uuid])
 
   def _GetList(getter):
-    return lambda ctx, node: utils.NiceSort(list(getter(ctx)[node.name]))
+    return lambda ctx, node: utils.NiceSort(list(getter(ctx)[node.uuid]))
 
   # Add fields operating on instance lists
   for prefix, titleprefix, docword, getter in \
@@ -1404,40 +1404,42 @@ class InstanceQueryData:
   """Data container for instance data queries.
 
   """
-  def __init__(self, instances, cluster, disk_usage, offline_nodes, bad_nodes,
-               live_data, wrongnode_inst, console, nodes, groups, networks):
+  def __init__(self, instances, cluster, disk_usage, offline_node_uuids,
+               bad_node_uuids, live_data, wrongnode_inst, console, nodes,
+               groups, networks):
     """Initializes this class.
 
     @param instances: List of instance objects
     @param cluster: Cluster object
     @type disk_usage: dict; instance name as key
     @param disk_usage: Per-instance disk usage
-    @type offline_nodes: list of strings
-    @param offline_nodes: List of offline nodes
-    @type bad_nodes: list of strings
-    @param bad_nodes: List of faulty nodes
+    @type offline_node_uuids: list of strings
+    @param offline_node_uuids: List of offline nodes
+    @type bad_node_uuids: list of strings
+    @param bad_node_uuids: List of faulty nodes
     @type live_data: dict; instance name as key
     @param live_data: Per-instance live data
     @type wrongnode_inst: set
     @param wrongnode_inst: Set of instances running on wrong node(s)
     @type console: dict; instance name as key
     @param console: Per-instance console information
-    @type nodes: dict; node name as key
+    @type nodes: dict; node UUID as key
     @param nodes: Node objects
     @type networks: dict; net_uuid as key
     @param networks: Network objects
 
     """
-    assert len(set(bad_nodes) & set(offline_nodes)) == len(offline_nodes), \
+    assert len(set(bad_node_uuids) & set(offline_node_uuids)) == \
+           len(offline_node_uuids), \
            "Offline nodes not included in bad nodes"
-    assert not (set(live_data.keys()) & set(bad_nodes)), \
+    assert not (set(live_data.keys()) & set(bad_node_uuids)), \
            "Found live data for bad or offline nodes"
 
     self.instances = instances
     self.cluster = cluster
     self.disk_usage = disk_usage
-    self.offline_nodes = offline_nodes
-    self.bad_nodes = bad_nodes
+    self.offline_nodes = offline_node_uuids
+    self.bad_nodes = bad_node_uuids
     self.live_data = live_data
     self.wrongnode_inst = wrongnode_inst
     self.console = console
@@ -2023,34 +2025,51 @@ _INST_SIMPLE_FIELDS = {
   }
 
 
-def _GetInstNodeGroup(ctx, default, node_name):
+def _GetNodeName(ctx, default, node_uuid):
+  """Gets node name of a node.
+
+  @type ctx: L{InstanceQueryData}
+  @param default: Default value
+  @type node_uuid: string
+  @param node_uuid: Node UUID
+
+  """
+  try:
+    node = ctx.nodes[node_uuid]
+  except KeyError:
+    return default
+  else:
+    return node.name
+
+
+def _GetInstNodeGroup(ctx, default, node_uuid):
   """Gets group UUID of an instance node.
 
   @type ctx: L{InstanceQueryData}
   @param default: Default value
-  @type node_name: string
-  @param node_name: Node name
+  @type node_uuid: string
+  @param node_uuid: Node UUID
 
   """
   try:
-    node = ctx.nodes[node_name]
+    node = ctx.nodes[node_uuid]
   except KeyError:
     return default
   else:
     return node.group
 
 
-def _GetInstNodeGroupName(ctx, default, node_name):
+def _GetInstNodeGroupName(ctx, default, node_uuid):
   """Gets group name of an instance node.
 
   @type ctx: L{InstanceQueryData}
   @param default: Default value
-  @type node_name: string
-  @param node_name: Node name
+  @type node_uuid: string
+  @param node_uuid: Node UUID
 
   """
   try:
-    node = ctx.nodes[node_name]
+    node = ctx.nodes[node_uuid]
   except KeyError:
     return default
 
@@ -2068,7 +2087,8 @@ def _BuildInstanceFields():
   """
   fields = [
     (_MakeField("pnode", "Primary_node", QFT_TEXT, "Primary node"),
-     IQ_CONFIG, QFF_HOSTNAME, _GetItemAttr("primary_node")),
+     IQ_NODES, QFF_HOSTNAME,
+     lambda ctx, inst: _GetNodeName(ctx, None, inst.primary_node)),
     (_MakeField("pnode.group", "PrimaryNodeGroup", QFT_TEXT,
                 "Primary node's group"),
      IQ_NODES, 0,
@@ -2081,7 +2101,9 @@ def _BuildInstanceFields():
     # TODO: Allow filtering by secondary node as hostname
     (_MakeField("snodes", "Secondary_Nodes", QFT_OTHER,
                 "Secondary nodes; usually this will just be one node"),
-     IQ_CONFIG, 0, lambda ctx, inst: list(inst.secondary_nodes)),
+     IQ_NODES, 0,
+     lambda ctx, inst: map(compat.partial(_GetNodeName, ctx, None),
+                           inst.secondary_nodes)),
     (_MakeField("snodes.group", "SecondaryNodesGroups", QFT_OTHER,
                 "Node groups of secondary nodes"),
      IQ_NODES, 0,
@@ -2577,17 +2599,18 @@ _CLUSTER_VERSION_FIELDS = {
 
 _CLUSTER_SIMPLE_FIELDS = {
   "cluster_name": ("Name", QFT_TEXT, QFF_HOSTNAME, "Cluster name"),
-  "master_node": ("Master", QFT_TEXT, QFF_HOSTNAME, "Master node name"),
   "volume_group_name": ("VgName", QFT_TEXT, 0, "LVM volume group name"),
   }
 
 
 class ClusterQueryData:
-  def __init__(self, cluster, drain_flag, watcher_pause):
+  def __init__(self, cluster, nodes, drain_flag, watcher_pause):
     """Initializes this class.
 
     @type cluster: L{objects.Cluster}
     @param cluster: Instance of cluster object
+    @type nodes: dict; node UUID as key
+    @param nodes: Node objects
     @type drain_flag: bool
     @param drain_flag: Whether job queue is drained
     @type watcher_pause: number
@@ -2595,6 +2618,7 @@ class ClusterQueryData:
 
     """
     self._cluster = cluster
+    self.nodes = nodes
     self.drain_flag = drain_flag
     self.watcher_pause = watcher_pause
 
@@ -2628,6 +2652,9 @@ def _BuildClusterFields():
     (_MakeField("watcher_pause", "WatcherPause", QFT_TIMESTAMP,
                 "Until when watcher is paused"), CQ_WATCHER_PAUSE, 0,
      _ClusterWatcherPause),
+    (_MakeField("master_node", "Master", QFT_TEXT, "Master node name"),
+     CQ_CONFIG, QFF_HOSTNAME,
+     lambda ctx, cluster: _GetNodeName(ctx, None, cluster.master_node)),
     ]
 
   # Simple fields
index ad8396a..e85fa32 100644 (file)
@@ -275,7 +275,7 @@ def _SsconfResolver(ssconf_ips, node_list, _,
     ip = ipmap.get(node)
     if ip is None:
       ip = nslookup_fn(node, family=family)
-    result.append((node, ip))
+    result.append((node, ip, node))
 
   return result
 
@@ -292,54 +292,60 @@ class _StaticResolver:
 
     """
     assert len(hosts) == len(self._addresses)
-    return zip(hosts, self._addresses)
+    return zip(hosts, self._addresses, hosts)
 
 
-def _CheckConfigNode(name, node, accept_offline_node):
+def _CheckConfigNode(node_uuid_or_name, node, accept_offline_node):
   """Checks if a node is online.
 
-  @type name: string
-  @param name: Node name
+  @type node_uuid_or_name: string
+  @param node_uuid_or_name: Node UUID
   @type node: L{objects.Node} or None
   @param node: Node object
 
   """
   if node is None:
-    # Depend on DNS for name resolution
-    ip = name
-  elif node.offline and not accept_offline_node:
-    ip = _OFFLINE
+    # Assume that the passed parameter was actually a node name, so depend on
+    # DNS for name resolution
+    return (node_uuid_or_name, node_uuid_or_name, node_uuid_or_name)
   else:
-    ip = node.primary_ip
-  return (name, ip)
+    if node.offline and not accept_offline_node:
+      ip = _OFFLINE
+    else:
+      ip = node.primary_ip
+    return (node.name, ip, node_uuid_or_name)
 
 
-def _NodeConfigResolver(single_node_fn, all_nodes_fn, hosts, opts):
+def _NodeConfigResolver(single_node_fn, all_nodes_fn, node_uuids, opts):
   """Calculate node addresses using configuration.
 
+  Note that strings in node_uuids are treated as node names if the UUID is not
+  found in the configuration.
+
   """
   accept_offline_node = (opts is rpc_defs.ACCEPT_OFFLINE_NODE)
 
   assert accept_offline_node or opts is None, "Unknown option"
 
   # Special case for single-host lookups
-  if len(hosts) == 1:
-    (name, ) = hosts
-    return [_CheckConfigNode(name, single_node_fn(name), accept_offline_node)]
+  if len(node_uuids) == 1:
+    (uuid, ) = node_uuids
+    return [_CheckConfigNode(uuid, single_node_fn(uuid), accept_offline_node)]
   else:
     all_nodes = all_nodes_fn()
-    return [_CheckConfigNode(name, all_nodes.get(name, None),
+    return [_CheckConfigNode(uuid, all_nodes.get(uuid, None),
                              accept_offline_node)
-            for name in hosts]
+            for uuid in node_uuids]
 
 
 class _RpcProcessor:
   def __init__(self, resolver, port, lock_monitor_cb=None):
     """Initializes this class.
 
-    @param resolver: callable accepting a list of hostnames, returning a list
-      of tuples containing name and IP address (IP address can be the name or
-      the special value L{_OFFLINE} to mark offline machines)
+    @param resolver: callable accepting a list of node UUIDs or hostnames,
+      returning a list of tuples containing name, IP address and original name
+      of the resolved node. IP address can be the name or the special value
+      L{_OFFLINE} to mark offline machines.
     @type port: int
     @param port: TCP port
     @param lock_monitor_cb: Callable for registering with lock monitor
@@ -363,19 +369,21 @@ class _RpcProcessor:
     assert isinstance(body, dict)
     assert len(body) == len(hosts)
     assert compat.all(isinstance(v, str) for v in body.values())
-    assert frozenset(map(compat.fst, hosts)) == frozenset(body.keys()), \
+    assert frozenset(map(lambda x: x[2], hosts)) == frozenset(body.keys()), \
         "%s != %s" % (hosts, body.keys())
 
-    for (name, ip) in hosts:
+    for (name, ip, original_name) in hosts:
       if ip is _OFFLINE:
         # Node is marked as offline
-        results[name] = RpcResult(node=name, offline=True, call=procedure)
+        results[original_name] = RpcResult(node=name,
+                                           offline=True,
+                                           call=procedure)
       else:
-        requests[name] = \
+        requests[original_name] = \
           http.client.HttpClientRequest(str(ip), port,
                                         http.HTTP_POST, str("/%s" % procedure),
                                         headers=_RPC_CLIENT_HEADERS,
-                                        post_data=body[name],
+                                        post_data=body[original_name],
                                         read_timeout=read_timeout,
                                         nicename="%s/%s" % (name, procedure),
                                         curl_config_fn=_ConfigRpcCurl)
@@ -406,12 +414,12 @@ class _RpcProcessor:
 
     return results
 
-  def __call__(self, hosts, procedure, body, read_timeout, resolver_opts,
+  def __call__(self, nodes, procedure, body, read_timeout, resolver_opts,
                _req_process_fn=None):
     """Makes an RPC request to a number of nodes.
 
-    @type hosts: sequence
-    @param hosts: Hostnames
+    @type nodes: sequence
+    @param nodes: node UUIDs or Hostnames
     @type procedure: string
     @param procedure: Request path
     @type body: dictionary
@@ -429,7 +437,7 @@ class _RpcProcessor:
       _req_process_fn = http.client.ProcessRequests
 
     (results, requests) = \
-      self._PrepareRequests(self._resolver(hosts, resolver_opts), self._port,
+      self._PrepareRequests(self._resolver(nodes, resolver_opts), self._port,
                             procedure, body, read_timeout)
 
     _req_process_fn(requests.values(), lock_monitor_cb=self._lock_monitor_cb)
@@ -675,29 +683,30 @@ def AnnotateDiskParams(template, disks, disk_params):
   return [annotation_fn(disk.Copy(), ld_params) for disk in disks]
 
 
-def _GetESFlag(cfg, nodename):
-  ni = cfg.GetNodeInfo(nodename)
+def _GetESFlag(cfg, node_uuid):
+  ni = cfg.GetNodeInfo(node_uuid)
   if ni is None:
-    raise errors.OpPrereqError("Invalid node name %s" % nodename,
+    raise errors.OpPrereqError("Invalid node name %s" % node_uuid,
                                errors.ECODE_NOENT)
   return cfg.GetNdParams(ni)[constants.ND_EXCLUSIVE_STORAGE]
 
 
-def GetExclusiveStorageForNodeNames(cfg, nodelist):
+def GetExclusiveStorageForNodes(cfg, node_uuids):
   """Return the exclusive storage flag for all the given nodes.
 
   @type cfg: L{config.ConfigWriter}
   @param cfg: cluster configuration
-  @type nodelist: list or tuple
-  @param nodelist: node names for which to read the flag
+  @type node_uuids: list or tuple
+  @param node_uuids: node UUIDs for which to read the flag
   @rtype: dict
   @return: mapping from node names to exclusive storage flags
-  @raise errors.OpPrereqError: if any given node name has no corresponding node
+  @raise errors.OpPrereqError: if any given node name has no corresponding
+  node
 
   """
   getflag = lambda n: _GetESFlag(cfg, n)
-  flags = map(getflag, nodelist)
-  return dict(zip(nodelist, flags))
+  flags = map(getflag, node_uuids)
+  return dict(zip(node_uuids, flags))
 
 
 #: Generic encoders
index 5306d2a..07b4645 100644 (file)
@@ -142,6 +142,11 @@ def _NodeInfoPreProc(node, args):
     return args
 
 
+def _DrbdCallsPreProc(node, args):
+  """Add the target node UUID as additional field for DRBD related calls."""
+  return args + [node]
+
+
 def _OsGetPostProc(result):
   """Post-processor for L{rpc.RpcRunner.call_os_get}.
 
@@ -394,17 +399,18 @@ _BLOCKDEV_CALLS = [
   ("drbd_disconnect_net", MULTI, None, constants.RPC_TMO_NORMAL, [
     ("nodes_ip", None, None),
     ("disks", ED_OBJECT_DICT_LIST, None),
-    ], None, None, "Disconnects the network of the given drbd devices"),
+    ], _DrbdCallsPreProc, None,
+   "Disconnects the network of the given drbd devices"),
   ("drbd_attach_net", MULTI, None, constants.RPC_TMO_NORMAL, [
     ("nodes_ip", None, None),
     ("disks", ED_DISKS_DICT_DP, None),
     ("instance_name", None, None),
     ("multimaster", None, None),
-    ], None, None, "Connects the given DRBD devices"),
+    ], _DrbdCallsPreProc, None, "Connects the given DRBD devices"),
   ("drbd_wait_sync", MULTI, None, constants.RPC_TMO_SLOW, [
     ("nodes_ip", None, None),
     ("disks", ED_DISKS_DICT_DP, None),
-    ], None, None,
+    ], _DrbdCallsPreProc, None,
    "Waits for the synchronization of drbd devices is complete"),
   ("blockdev_grow", SINGLE, None, constants.RPC_TMO_NORMAL, [
     ("cf_bdev", ED_SINGLE_DISK_DICT_DP, None),
index 8b52c35..4e34411 100644 (file)
@@ -519,8 +519,8 @@ class GanetiContext(object):
     self.jobqueue.AddNode(node)
 
     # Add the new node to the Ganeti Lock Manager
-    self.glm.add(locking.LEVEL_NODE, node.name)
-    self.glm.add(locking.LEVEL_NODE_RES, node.name)
+    self.glm.add(locking.LEVEL_NODE, node.uuid)
+    self.glm.add(locking.LEVEL_NODE_RES, node.uuid)
 
   def ReaddNode(self, node):
     """Updates a node that's already in the configuration
@@ -529,19 +529,19 @@ class GanetiContext(object):
     # Synchronize the queue again
     self.jobqueue.AddNode(node)
 
-  def RemoveNode(self, name):
+  def RemoveNode(self, node):
     """Removes a node from the configuration and lock manager.
 
     """
     # Remove node from configuration
-    self.cfg.RemoveNode(name)
+    self.cfg.RemoveNode(node.uuid)
 
     # Notify job queue
-    self.jobqueue.RemoveNode(name)
+    self.jobqueue.RemoveNode(node.name)
 
     # Remove the node from the Ganeti Lock Manager
-    self.glm.remove(locking.LEVEL_NODE, name)
-    self.glm.remove(locking.LEVEL_NODE_RES, name)
+    self.glm.remove(locking.LEVEL_NODE, node.uuid)
+    self.glm.remove(locking.LEVEL_NODE_RES, node.uuid)
 
 
 def _SetWatcherPause(context, until):
@@ -602,11 +602,11 @@ def CheckAgreement():
   myself = netutils.Hostname.GetSysName()
   #temp instantiation of a config writer, used only to get the node list
   cfg = config.ConfigWriter()
-  node_list = cfg.GetNodeList()
+  node_names = cfg.GetNodeNames(cfg.GetNodeList())
   del cfg
   retries = 6
   while retries > 0:
-    votes = bootstrap.GatherMasterVotes(node_list)
+    votes = bootstrap.GatherMasterVotes(node_names)
     if not votes:
       # empty node list, this is a one node cluster
       return True
@@ -646,8 +646,9 @@ def ActivateMasterIP():
   master_params = cfg.GetMasterNetworkParameters()
   ems = cfg.GetUseExternalMipScript()
   runner = rpc.BootstrapRunner()
-  result = runner.call_node_activate_master_ip(master_params.name,
-                                               master_params, ems)
+  # we use the node name, as the configuration is only available here yet
+  result = runner.call_node_activate_master_ip(
+             cfg.GetNodeName(master_params.uuid), master_params, ems)
 
   msg = result.fail_msg
   if msg:
index dddff69..386e568 100644 (file)
@@ -414,9 +414,9 @@ class NodeRequestHandler(http.server.HttpServerHandler):
     disk list must all be drbd devices.
 
     """
-    nodes_ip, disks = params
+    nodes_ip, disks, target_node_uuid = params
     disks = [objects.Disk.FromDict(cf) for cf in disks]
-    return backend.DrbdDisconnectNet(nodes_ip, disks)
+    return backend.DrbdDisconnectNet(target_node_uuid, nodes_ip, disks)
 
   @staticmethod
   def perspective_drbd_attach_net(params):
@@ -426,10 +426,10 @@ class NodeRequestHandler(http.server.HttpServerHandler):
     disk list must all be drbd devices.
 
     """
-    nodes_ip, disks, instance_name, multimaster = params
+    nodes_ip, disks, instance_name, multimaster, target_node_uuid = params
     disks = [objects.Disk.FromDict(cf) for cf in disks]
-    return backend.DrbdAttachNet(nodes_ip, disks,
-                                     instance_name, multimaster)
+    return backend.DrbdAttachNet(target_node_uuid, nodes_ip, disks,
+                                 instance_name, multimaster)
 
   @staticmethod
   def perspective_drbd_wait_sync(params):
@@ -439,9 +439,9 @@ class NodeRequestHandler(http.server.HttpServerHandler):
     disk list must all be drbd devices.
 
     """
-    nodes_ip, disks = params
+    nodes_ip, disks, target_node_uuid = params
     disks = [objects.Disk.FromDict(cf) for cf in disks]
-    return backend.DrbdWaitSync(nodes_ip, disks)
+    return backend.DrbdWaitSync(target_node_uuid, nodes_ip, disks)
 
   @staticmethod
   def perspective_drbd_helper(params):
index 4339ce2..f05d3b5 100644 (file)
@@ -59,6 +59,7 @@ import Ganeti.Logging
 import qualified Ganeti.Constants as C
 import qualified Ganeti.Path as Path
 import Ganeti.Query.Server (prepQueryD, runQueryD)
+import qualified Ganeti.Query.Cluster as QCluster
 import Ganeti.Utils
 
 -- * Types and constants definitions
@@ -137,16 +138,16 @@ gntErrorToResult (Ok x) = Ok x
 
 -- | Computes the node role.
 nodeRole :: ConfigData -> String -> Result ConfdNodeRole
-nodeRole cfg name =
-  let cmaster = clusterMasterNode . configCluster $ cfg
-      mnode = M.lookup name . fromContainer . configNodes $ cfg
-  in case mnode of
-       Nothing -> Bad "Node not found"
-       Just node | cmaster == name -> Ok NodeRoleMaster
-                 | nodeDrained node -> Ok NodeRoleDrained
-                 | nodeOffline node -> Ok NodeRoleOffline
-                 | nodeMasterCandidate node -> Ok NodeRoleCandidate
-       _ -> Ok NodeRoleRegular
+nodeRole cfg name = do
+  cmaster <- errToResult $ QCluster.clusterMasterNodeName cfg
+  mnode <- errToResult $ getNode cfg name
+  let role = case mnode of
+               node | cmaster == name -> NodeRoleMaster
+                    | nodeDrained node -> NodeRoleDrained
+                    | nodeOffline node -> NodeRoleOffline
+                    | nodeMasterCandidate node -> NodeRoleCandidate
+               _ -> NodeRoleRegular
+  return role
 
 -- | Does an instance ip -> instance -> primary node -> primary ip
 -- transformation.
@@ -170,17 +171,19 @@ buildResponse (cfg, _) (ConfdRequest { confdRqType = ReqPing }) =
 
 buildResponse cdata req@(ConfdRequest { confdRqType = ReqClusterMaster }) =
   case confdRqQuery req of
-    EmptyQuery -> return (ReplyStatusOk, J.showJSON master_name)
+    EmptyQuery -> liftM ((,) ReplyStatusOk . J.showJSON) master_name
     PlainQuery _ -> return queryArgumentError
     DictQuery reqq -> do
-      mnode <- gntErrorToResult $ getNode cfg master_name
+      mnode <- gntErrorToResult $ getNode cfg master_uuid
+      mname <- master_name
       let fvals = map (\field -> case field of
-                                   ReqFieldName -> master_name
+                                   ReqFieldName -> mname
                                    ReqFieldIp -> clusterMasterIp cluster
                                    ReqFieldMNodePip -> nodePrimaryIp mnode
                       ) (confdReqQFields reqq)
       return (ReplyStatusOk, J.showJSON fvals)
-    where master_name = clusterMasterNode cluster
+    where master_uuid = clusterMasterNode cluster
+          master_name = errToResult $ QCluster.clusterMasterNodeName cfg
           cluster = configCluster cfg
           cfg = fst cdata
 
index 18dce62..d18f9d2 100644 (file)
@@ -160,9 +160,16 @@ getItem kind name allitems = do
   maybe (err "not found after successfull match?!") Ok $
         M.lookup fullname allitems
 
--- | Looks up a node.
+-- | Looks up a node by name or uuid.
 getNode :: ConfigData -> String -> ErrorResult Node
-getNode cfg name = getItem "Node" name (fromContainer $ configNodes cfg)
+getNode cfg name =
+  let nodes = fromContainer (configNodes cfg)
+  in case getItem "Node" name nodes of
+       -- if not found by uuid, we need to look it up by name
+       Ok node -> Ok node
+       Bad _ -> let by_name = M.mapKeys
+                              (nodeName . (M.!) nodes) nodes
+                in getItem "Node" name by_name
 
 -- | Looks up an instance.
 getInstance :: ConfigData -> String -> ErrorResult Instance
index 6e615d7..e86aac3 100644 (file)
@@ -1499,6 +1499,7 @@ iMoveToJob nl il idx move =
               , OpCodes.opMigrationMode       = Nothing -- default
               , OpCodes.opOldLiveMode         = Nothing -- default as well
               , OpCodes.opTargetNode          = Nothing -- this is drbd
+              , OpCodes.opTargetNodeUuid      = Nothing
               , OpCodes.opAllowRuntimeChanges = False
               , OpCodes.opIgnoreIpolicy       = False
               , OpCodes.opMigrationCleanup    = False
@@ -1512,6 +1513,7 @@ iMoveToJob nl il idx move =
                 , OpCodes.opReplaceDisksMode = OpCodes.ReplaceNewSecondary
                 , OpCodes.opReplaceDisksList = []
                 , OpCodes.opRemoteNode       = lookNode n
+                , OpCodes.opRemoteNodeUuid   = Nothing
                 , OpCodes.opIallocator       = Nothing
                 }
   in case move of
index 520deeb..4cd482c 100644 (file)
@@ -299,6 +299,7 @@ detectBroken nl inst =
                                        -- mkNonEmpty in this way (using the fact
                                        -- that Maybe is used both for optional
                                        -- fields, and to express failure).
+                                     , opNodeUuids = Nothing
                                      , opIallocator = mkNonEmpty "hail"
                                      }
            , OpInstanceReinstall { opInstanceName = iname
@@ -316,6 +317,7 @@ detectBroken nl inst =
                                                       C.defaultShutdownTimeout
                                 , opIgnoreConsistency = False
                                 , opTargetNode = Nothing
+                                , opTargetNodeUuid = Nothing
                                 , opIgnoreIpolicy = False
                                 , opIallocator = Nothing
                                 }
@@ -328,6 +330,7 @@ detectBroken nl inst =
                                     , opReplaceDisksList = []
                                     , opRemoteNode = Nothing
                                       -- FIXME: ditto, see above.
+                                    , opRemoteNodeUuid = Nothing
                                     , opIallocator = mkNonEmpty "hail"
                                     , opEarlyRelease = False
                                     , opIgnoreIpolicy = False
@@ -343,6 +346,7 @@ detectBroken nl inst =
                                      , opRecreateDisksInfo = RecreateDisksAll
                                      , opNodes = []
                                        -- FIXME: ditto, see above.
+                                     , opNodeUuids = Nothing
                                      , opIallocator = mkNonEmpty "hail"
                                      }
            , OpInstanceReinstall { opInstanceName = iname
@@ -410,6 +414,7 @@ doRepair client delay instData (rtype, opcodes) =
                 OpTestDelay { opDelayDuration = delay
                             , opDelayOnMaster = True
                             , opDelayOnNodes = []
+                            , opDelayOnNodeUuids = Nothing
                             , opDelayRepeat = fromJust $ mkNonNegative 0
                             } : opcodes
               else
index 3b30350..43e9b6a 100644 (file)
@@ -66,6 +66,7 @@ $(genOpCode "OpCode"
      [ pDelayDuration
      , pDelayOnMaster
      , pDelayOnNodes
+     , pDelayOnNodeUuids
      , pDelayRepeat
      ])
   , ("OpInstanceReplaceDisks",
@@ -75,6 +76,7 @@ $(genOpCode "OpCode"
      , pReplaceDisksMode
      , pReplaceDisksList
      , pRemoteNode
+     , pRemoteNodeUuid
      , pIallocator
      ])
   , ("OpInstanceFailover",
@@ -82,6 +84,7 @@ $(genOpCode "OpCode"
      , pShutdownTimeout
      , pIgnoreConsistency
      , pMigrationTargetNode
+     , pMigrationTargetNodeUuid
      , pIgnoreIpolicy
      , pIallocator
      ])
@@ -90,6 +93,7 @@ $(genOpCode "OpCode"
      , pMigrationMode
      , pMigrationLive
      , pMigrationTargetNode
+     , pMigrationTargetNodeUuid
      , pAllowRuntimeChgs
      , pIgnoreIpolicy
      , pMigrationCleanup
@@ -192,12 +196,16 @@ $(genOpCode "OpCode"
      ])
   , ("OpOobCommand",
      [ pNodeNames
+     , pNodeUuids
      , pOobCommand
      , pOobTimeout
      , pIgnoreStatus
      , pPowerDelay
      ])
-  , ("OpNodeRemove", [ pNodeName ])
+  , ("OpNodeRemove",
+     [ pNodeName
+     , pNodeUuid
+     ])
   , ("OpNodeAdd",
      [ pNodeName
      , pHvState
@@ -223,18 +231,21 @@ $(genOpCode "OpCode"
      ])
   , ("OpNodeModifyStorage",
      [ pNodeName
+     , pNodeUuid
      , pStorageType
      , pStorageName
      , pStorageChanges
      ])
   , ("OpRepairNodeStorage",
      [ pNodeName
+     , pNodeUuid
      , pStorageType
      , pStorageName
      , pIgnoreConsistency
      ])
   , ("OpNodeSetParams",
      [ pNodeName
+     , pNodeUuid
      , pForce
      , pHvState
      , pDiskState
@@ -250,13 +261,16 @@ $(genOpCode "OpCode"
      ])
   , ("OpNodePowercycle",
      [ pNodeName
+     , pNodeUuid
      , pForce
      ])
   , ("OpNodeMigrate",
      [ pNodeName
+     , pNodeUuid
      , pMigrationMode
      , pMigrationLive
      , pMigrationTargetNode
+     , pMigrationTargetNodeUuid
      , pAllowRuntimeChgs
      , pIgnoreIpolicy
      , pIallocator
@@ -264,7 +278,9 @@ $(genOpCode "OpCode"
   , ("OpNodeEvacuate",
      [ pEarlyRelease
      , pNodeName
+     , pNodeUuid
      , pRemoteNode
+     , pRemoteNodeUuid
      , pIallocator
      , pEvacMode
      ])
@@ -291,12 +307,15 @@ $(genOpCode "OpCode"
      , pInstOsParams
      , pInstOs
      , pPrimaryNode
+     , pPrimaryNodeUuid
      , pSecondaryNode
+     , pSecondaryNodeUuid
      , pSourceHandshake
      , pSourceInstance
      , pSourceShutdownTimeout
      , pSourceX509Ca
      , pSrcNode
+     , pSrcNodeUuid
      , pSrcPath
      , pStartInstance
      , pOpportunisticLocking
@@ -351,6 +370,7 @@ $(genOpCode "OpCode"
      , pShutdownTimeout
      , pIgnoreIpolicy
      , pMoveTargetNode
+     , pMoveTargetNodeUuid
      , pIgnoreConsistency
      ])
   , ("OpInstanceConsole",
@@ -368,6 +388,7 @@ $(genOpCode "OpCode"
      [ pInstanceName
      , pRecreateDisksInfo
      , pNodes
+     , pNodeUuids
      , pIallocator
      ])
   , ("OpInstanceQuery", dOldQuery)
@@ -388,7 +409,9 @@ $(genOpCode "OpCode"
      , pInstHvParams
      , pOptDiskTemplate
      , pPrimaryNode
+     , pPrimaryNodeUuid
      , pRemoteNode
+     , pRemoteNodeUuid
      , pOsNameChange
      , pInstOsParams
      , pWaitForSync
@@ -421,6 +444,7 @@ $(genOpCode "OpCode"
      [ pGroupName
      , pForce
      , pRequiredNodes
+     , pRequiredNodeUuids
      ])
   , ("OpGroupQuery", dOldQueryNoLocking)
   , ("OpGroupSetParams",
@@ -462,6 +486,7 @@ $(genOpCode "OpCode"
      [ pInstanceName
      , pShutdownTimeout
      , pExportTargetNode
+     , pExportTargetNodeUuid
      , pShutdownInstance
      , pRemoveInstance
      , pIgnoreRemoveFailures
@@ -541,6 +566,7 @@ $(genOpCode "OpCode"
   , ("OpRestrictedCommand",
      [ pUseLocking
      , pRequiredNodes
+     , pRequiredNodeUuids
      , pRestrictedCommand
      ])
   ])
index e445aa9..be10014 100644 (file)
@@ -61,7 +61,9 @@ module Ganeti.OpParams
   , pForce
   , pIgnoreOfflineNodes
   , pNodeName
+  , pNodeUuid
   , pNodeNames
+  , pNodeUuids
   , pGroupName
   , pMigrationMode
   , pMigrationLive
@@ -82,7 +84,9 @@ module Ganeti.OpParams
   , pIpConflictsCheck
   , pNoRemember
   , pMigrationTargetNode
+  , pMigrationTargetNodeUuid
   , pMoveTargetNode
+  , pMoveTargetNodeUuid
   , pStartupPaused
   , pVerbose
   , pDebugSimulateErrors
@@ -144,6 +148,7 @@ module Ganeti.OpParams
   , pNames
   , pNodes
   , pRequiredNodes
+  , pRequiredNodeUuids
   , pStorageType
   , pStorageChanges
   , pMasterCandidate
@@ -153,17 +158,21 @@ module Ganeti.OpParams
   , pPowered
   , pIallocator
   , pRemoteNode
+  , pRemoteNodeUuid
   , pEvacMode
   , pInstCreateMode
   , pNoInstall
   , pInstOs
   , pPrimaryNode
+  , pPrimaryNodeUuid
   , pSecondaryNode
+  , pSecondaryNodeUuid
   , pSourceHandshake
   , pSourceInstance
   , pSourceShutdownTimeout
   , pSourceX509Ca
   , pSrcNode
+  , pSrcNodeUuid
   , pSrcPath
   , pStartInstance
   , pInstTags
@@ -188,6 +197,7 @@ module Ganeti.OpParams
   , pTargetGroups
   , pExportMode
   , pExportTargetNode
+  , pExportTargetNodeUuid
   , pRemoveInstance
   , pIgnoreRemoveFailures
   , pX509KeyName
@@ -200,6 +210,7 @@ module Ganeti.OpParams
   , pDelayDuration
   , pDelayOnMaster
   , pDelayOnNodes
+  , pDelayOnNodeUuids
   , pDelayRepeat
   , pIAllocatorDirection
   , pIAllocatorMode
@@ -563,11 +574,20 @@ pIgnoreOfflineNodes = defaultFalse "ignore_offline_nodes"
 pNodeName :: Field
 pNodeName = simpleField "node_name" [t| NonEmptyString |]
 
+-- | A node UUID (for single-node LUs).
+pNodeUuid :: Field
+pNodeUuid = optionalField $ simpleField "node_uuid" [t| NonEmptyString |]
+
 -- | List of nodes.
 pNodeNames :: Field
 pNodeNames =
   defaultField [| [] |] $ simpleField "node_names" [t| [NonEmptyString] |]
 
+-- | List of node UUIDs.
+pNodeUuids :: Field
+pNodeUuids =
+  optionalField $ simpleField "node_uuids" [t| [NonEmptyString] |]
+
 -- | A required node group name (for single-group LUs).
 pGroupName :: Field
 pGroupName = simpleField "group_name" [t| NonEmptyString |]
@@ -656,12 +676,22 @@ pNoRemember = defaultFalse "no_remember"
 pMigrationTargetNode :: Field
 pMigrationTargetNode = optionalNEStringField "target_node"
 
+-- | Target node UUID for instance migration/failover.
+pMigrationTargetNodeUuid :: Field
+pMigrationTargetNodeUuid = optionalNEStringField "target_node_uuid"
+
 -- | Target node for instance move (required).
 pMoveTargetNode :: Field
 pMoveTargetNode =
   renameField "MoveTargetNode" $
   simpleField "target_node" [t| NonEmptyString |]
 
+-- | Target node UUID for instance move.
+pMoveTargetNodeUuid :: Field
+pMoveTargetNodeUuid =
+  renameField "MoveTargetNodeUuid" . optionalField $
+  simpleField "target_node_uuid" [t| NonEmptyString |]
+
 -- | Pause instance at startup.
 pStartupPaused :: Field
 pStartupPaused = defaultFalse "startup_paused"
@@ -990,6 +1020,12 @@ pRequiredNodes :: Field
 pRequiredNodes =
   renameField "ReqNodes " $ simpleField "nodes" [t| [NonEmptyString] |]
 
+-- | Required list of node names.
+pRequiredNodeUuids :: Field
+pRequiredNodeUuids =
+  renameField "ReqNodeUuids " . optionalField $
+    simpleField "node_uuids" [t| [NonEmptyString] |]
+
 -- | Storage type.
 pStorageType :: Field
 pStorageType = simpleField "storage_type" [t| StorageType |]
@@ -1027,6 +1063,10 @@ pIallocator = optionalNEStringField "iallocator"
 pRemoteNode :: Field
 pRemoteNode = optionalNEStringField "remote_node"
 
+-- | New secondary node UUID.
+pRemoteNodeUuid :: Field
+pRemoteNodeUuid = optionalNEStringField "remote_node_uuid"
+
 -- | Node evacuation mode.
 pEvacMode :: Field
 pEvacMode = renameField "EvacMode" $ simpleField "mode" [t| NodeEvacMode |]
@@ -1048,10 +1088,18 @@ pInstOs = optionalNEStringField "os_type"
 pPrimaryNode :: Field
 pPrimaryNode = optionalNEStringField "pnode"
 
+-- | Primary node UUID for an instance.
+pPrimaryNodeUuid :: Field
+pPrimaryNodeUuid = optionalNEStringField "pnode_uuid"
+
 -- | Secondary node for an instance.
 pSecondaryNode :: Field
 pSecondaryNode = optionalNEStringField "snode"
 
+-- | Secondary node UUID for an instance.
+pSecondaryNodeUuid :: Field
+pSecondaryNodeUuid = optionalNEStringField "snode_uuid"
+
 -- | Signed handshake from source (remote import only).
 pSourceHandshake :: Field
 pSourceHandshake =
@@ -1076,6 +1124,10 @@ pSourceX509Ca = optionalNEStringField "source_x509_ca"
 pSrcNode :: Field
 pSrcNode = optionalNEStringField "src_node"
 
+-- | Source node for import.
+pSrcNodeUuid :: Field
+pSrcNodeUuid = optionalNEStringField "src_node_uuid"
+
 -- | Source directory for import.
 pSrcPath :: Field
 pSrcPath = optionalNEStringField "src_path"
@@ -1181,6 +1233,12 @@ pExportTargetNode =
   renameField "ExportTarget" $
   simpleField "target_node" [t| ExportTarget |]
 
+-- | Export target node UUID field.
+pExportTargetNodeUuid :: Field
+pExportTargetNodeUuid =
+  renameField "ExportTargetNodeUuid" . optionalField $
+  simpleField "target_node_uuid" [t| NonEmptyString |]
+
 -- | Whether to remove instance after export.
 pRemoveInstance :: Field
 pRemoveInstance = defaultFalse "remove_instance"
@@ -1241,6 +1299,12 @@ pDelayOnNodes =
   defaultField [| [] |] $
   simpleField "on_nodes" [t| [NonEmptyString] |]
 
+-- | on_node_uuids field for 'OpTestDelay'.
+pDelayOnNodeUuids :: Field
+pDelayOnNodeUuids =
+  renameField "DelayOnNodeUuids" . optionalField $
+  simpleField "on_node_uuids" [t| [NonEmptyString] |]
+
 -- | Repeat parameter for OpTestDelay.
 pDelayRepeat :: Field
 pDelayRepeat =
diff --git a/src/Ganeti/Query/Cluster.hs b/src/Ganeti/Query/Cluster.hs
new file mode 100644 (file)
index 0000000..0c6d985
--- /dev/null
@@ -0,0 +1,41 @@
+{-| Implementation of the Ganeti Query2 cluster queries.
+
+ -}
+
+{-
+
+Copyright (C) 2012, 2013 Google Inc.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+-}
+
+module Ganeti.Query.Cluster
+  ( clusterMasterNodeName
+  ) where
+
+import Control.Monad (liftM)
+
+import Ganeti.Objects
+import Ganeti.Config
+import Ganeti.Errors
+
+-- | Get master node name.
+clusterMasterNodeName :: ConfigData -> ErrorResult String
+clusterMasterNodeName cfg =
+  let cluster = configCluster cfg
+      masterNodeUuid = clusterMasterNode cluster
+  in liftM nodeName $ getNode cfg masterNodeUuid
index 3c8d8a9..4bc03c0 100644 (file)
@@ -52,6 +52,7 @@ import Ganeti.Logging
 import Ganeti.Luxi
 import Ganeti.OpCodes (TagObject(..))
 import qualified Ganeti.Query.Language as Qlang
+import qualified Ganeti.Query.Cluster as QCluster
 import Ganeti.Query.Query
 import Ganeti.Query.Filter (makeSimpleFilter)
 
@@ -86,6 +87,7 @@ handleCallWrapper (Ok config) op = handleCall config op
 handleCall :: ConfigData -> LuxiOp -> IO (ErrorResult JSValue)
 handleCall cdata QueryClusterInfo =
   let cluster = configCluster cdata
+      master = QCluster.clusterMasterNodeName cdata
       hypervisors = clusterEnabledHypervisors cluster
       diskTemplates = clusterEnabledDiskTemplates cluster
       def_hv = case hypervisors of
@@ -100,7 +102,9 @@ handleCall cdata QueryClusterInfo =
             , ("export_version", showJSON C.exportVersion)
             , ("architecture", showJSON arch_tuple)
             , ("name", showJSON $ clusterClusterName cluster)
-            , ("master", showJSON $ clusterMasterNode cluster)
+            , ("master", showJSON (case master of
+                                     Ok name -> name
+                                     _ -> undefined))
             , ("default_hypervisor", def_hv)
             , ("enabled_hypervisors", showJSON hypervisors)
             , ("hvparams", showJSON $ clusterHvparams cluster)
@@ -143,7 +147,9 @@ handleCall cdata QueryClusterInfo =
             , ("enabled_disk_templates", showJSON diskTemplates)
             ]
 
-  in return . Ok . J.makeObj $ obj
+  in case master of
+    Ok _ -> return . Ok . J.makeObj $ obj
+    Bad ex -> return $ Bad ex
 
 handleCall cfg (QueryTags kind) =
   let tags = case kind of
index 6044322..9a88df5 100644 (file)
@@ -109,17 +109,18 @@ instance Arbitrary OpCodes.OpCode where
     case op_id of
       "OP_TEST_DELAY" ->
         OpCodes.OpTestDelay <$> arbitrary <*> arbitrary <*>
-          genNodeNamesNE <*> arbitrary
+          genNodeNamesNE <*> return Nothing <*> arbitrary
       "OP_INSTANCE_REPLACE_DISKS" ->
         OpCodes.OpInstanceReplaceDisks <$> genFQDN <*> arbitrary <*>
           arbitrary <*> arbitrary <*> genDiskIndices <*>
-          genMaybe genNodeNameNE <*> genMaybe genNameNE
+          genMaybe genNodeNameNE <*> return Nothing <*> genMaybe genNameNE
       "OP_INSTANCE_FAILOVER" ->
         OpCodes.OpInstanceFailover <$> genFQDN <*> arbitrary <*> arbitrary <*>
-          genMaybe genNodeNameNE <*> arbitrary <*> genMaybe genNameNE
+        genMaybe genNodeNameNE <*> return Nothing <*> arbitrary <*>
+        genMaybe genNameNE
       "OP_INSTANCE_MIGRATE" ->
         OpCodes.OpInstanceMigrate <$> genFQDN <*> arbitrary <*> arbitrary <*>
-          genMaybe genNodeNameNE <*> arbitrary <*>
+          genMaybe genNodeNameNE <*> return Nothing <*> arbitrary <*>
           arbitrary <*> arbitrary <*> genMaybe genNameNE <*> arbitrary
       "OP_TAGS_GET" ->
         OpCodes.OpTagsGet <$> arbitrary <*> arbitrary
@@ -172,9 +173,11 @@ instance Arbitrary OpCodes.OpCode where
       "OP_QUERY_FIELDS" ->
         OpCodes.OpQueryFields <$> arbitrary <*> arbitrary
       "OP_OOB_COMMAND" ->
-        OpCodes.OpOobCommand <$> genNodeNamesNE <*> arbitrary <*>
-          arbitrary <*> arbitrary <*> (arbitrary `suchThat` (>0))
-      "OP_NODE_REMOVE" -> OpCodes.OpNodeRemove <$> genNodeNameNE
+        OpCodes.OpOobCommand <$> genNodeNamesNE <*> return Nothing <*>
+          arbitrary <*> arbitrary <*> arbitrary <*>
+          (arbitrary `suchThat` (>0))
+      "OP_NODE_REMOVE" ->
+        OpCodes.OpNodeRemove <$> genNodeNameNE <*> return Nothing
       "OP_NODE_ADD" ->
         OpCodes.OpNodeAdd <$> genNodeNameNE <*> emptyMUD <*> emptyMUD <*>
           genMaybe genName <*> genMaybe genNameNE <*> arbitrary <*>
@@ -187,25 +190,27 @@ instance Arbitrary OpCodes.OpCode where
         OpCodes.OpNodeQueryStorage <$> arbitrary <*> arbitrary <*>
           genNodeNamesNE <*> genNameNE
       "OP_NODE_MODIFY_STORAGE" ->
-        OpCodes.OpNodeModifyStorage <$> genNodeNameNE <*> arbitrary <*>
-          genNameNE <*> pure emptyJSObject
+        OpCodes.OpNodeModifyStorage <$> genNodeNameNE <*> return Nothing <*>
+          arbitrary <*> genNameNE <*> pure emptyJSObject
       "OP_REPAIR_NODE_STORAGE" ->
-        OpCodes.OpRepairNodeStorage <$> genNodeNameNE <*> arbitrary <*>
-          genNameNE <*> arbitrary
+        OpCodes.OpRepairNodeStorage <$> genNodeNameNE <*> return Nothing <*>
+          arbitrary <*> genNameNE <*> arbitrary
       "OP_NODE_SET_PARAMS" ->
-        OpCodes.OpNodeSetParams <$> genNodeNameNE <*> arbitrary <*>
-          emptyMUD <*> emptyMUD <*> arbitrary <*> arbitrary <*> arbitrary <*>
-          arbitrary <*> arbitrary <*> arbitrary <*> genMaybe genNameNE <*>
-          emptyMUD <*> arbitrary
+        OpCodes.OpNodeSetParams <$> genNodeNameNE <*> return Nothing <*>
+          arbitrary <*> emptyMUD <*> emptyMUD <*> arbitrary <*> arbitrary <*>
+          arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*>
+          genMaybe genNameNE <*> emptyMUD <*> arbitrary
       "OP_NODE_POWERCYCLE" ->
-        OpCodes.OpNodePowercycle <$> genNodeNameNE <*> arbitrary
+        OpCodes.OpNodePowercycle <$> genNodeNameNE <*> return Nothing <*>
+          arbitrary
       "OP_NODE_MIGRATE" ->
-        OpCodes.OpNodeMigrate <$> genNodeNameNE <*> arbitrary <*>
-          arbitrary <*> genMaybe genNodeNameNE <*> arbitrary <*>
-          arbitrary <*> genMaybe genNameNE
+        OpCodes.OpNodeMigrate <$> genNodeNameNE <*> return Nothing <*>
+          arbitrary <*> arbitrary <*> genMaybe genNodeNameNE <*>
+          return Nothing <*> arbitrary <*> arbitrary <*> genMaybe genNameNE
       "OP_NODE_EVACUATE" ->
         OpCodes.OpNodeEvacuate <$> arbitrary <*> genNodeNameNE <*>
-          genMaybe genNodeNameNE <*> genMaybe genNameNE <*> arbitrary
+          return Nothing <*> genMaybe genNodeNameNE <*> return Nothing <*>
+          genMaybe genNameNE <*> arbitrary
       "OP_INSTANCE_CREATE" ->
         OpCodes.OpInstanceCreate <$> genFQDN <*> arbitrary <*>
           arbitrary <*> arbitrary <*> arbitrary <*> pure emptyJSObject <*>
@@ -214,9 +219,10 @@ instance Arbitrary OpCodes.OpCode where
           arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*>
           arbitrary <*> arbitrary <*> pure emptyJSObject <*>
           genMaybe genNameNE <*>
-          genMaybe genNodeNameNE <*> genMaybe genNodeNameNE <*>
+          genMaybe genNodeNameNE <*> return Nothing <*>
+          genMaybe genNodeNameNE <*> return Nothing <*>
           genMaybe (pure []) <*> genMaybe genNodeNameNE <*>
-          arbitrary <*> genMaybe genNodeNameNE <*>
+          arbitrary <*> genMaybe genNodeNameNE <*> return Nothing <*>
           genMaybe genNodeNameNE <*> genMaybe genNameNE <*>
           arbitrary <*> arbitrary <*> (genTags >>= mapM mkNonEmpty)
       "OP_INSTANCE_MULTI_ALLOC" ->
@@ -242,7 +248,7 @@ instance Arbitrary OpCodes.OpCode where
           arbitrary <*> arbitrary
       "OP_INSTANCE_MOVE" ->
         OpCodes.OpInstanceMove <$> genFQDN <*> arbitrary <*> arbitrary <*>
-          genNodeNameNE <*> arbitrary
+          genNodeNameNE <*> return Nothing <*> arbitrary
       "OP_INSTANCE_CONSOLE" -> OpCodes.OpInstanceConsole <$> genFQDN
       "OP_INSTANCE_ACTIVATE_DISKS" ->
         OpCodes.OpInstanceActivateDisks <$> genFQDN <*>
@@ -251,7 +257,7 @@ instance Arbitrary OpCodes.OpCode where
         OpCodes.OpInstanceDeactivateDisks <$> genFQDN <*> arbitrary
       "OP_INSTANCE_RECREATE_DISKS" ->
         OpCodes.OpInstanceRecreateDisks <$> genFQDN <*> arbitrary <*>
-          genNodeNamesNE <*> genMaybe genNameNE
+          genNodeNamesNE <*> return Nothing <*> genMaybe genNameNE
       "OP_INSTANCE_QUERY" ->
         OpCodes.OpInstanceQuery <$> genFieldsNE <*> genNamesNE <*> arbitrary
       "OP_INSTANCE_QUERY_DATA" ->
@@ -261,7 +267,8 @@ instance Arbitrary OpCodes.OpCode where
         OpCodes.OpInstanceSetParams <$> genFQDN <*> arbitrary <*>
           arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*>
           pure emptyJSObject <*> arbitrary <*> pure emptyJSObject <*>
-          arbitrary <*> genMaybe genNodeNameNE <*> genMaybe genNodeNameNE <*>
+          arbitrary <*> genMaybe genNodeNameNE <*> return Nothing <*>
+          genMaybe genNodeNameNE <*> return Nothing <*>
           genMaybe genNameNE <*> pure emptyJSObject <*> arbitrary <*>
           arbitrary <*> arbitrary
       "OP_INSTANCE_GROW_DISK" ->
@@ -276,7 +283,7 @@ instance Arbitrary OpCodes.OpCode where
           emptyMUD <*> emptyMUD <*> emptyMUD
       "OP_GROUP_ASSIGN_NODES" ->
         OpCodes.OpGroupAssignNodes <$> genNameNE <*> arbitrary <*>
-          genNodeNamesNE
+          genNodeNamesNE <*> return Nothing
       "OP_GROUP_QUERY" ->
         OpCodes.OpGroupQuery <$> genFieldsNE <*> genNamesNE
       "OP_GROUP_SET_PARAMS" ->
@@ -300,8 +307,9 @@ instance Arbitrary OpCodes.OpCode where
         OpCodes.OpBackupPrepare <$> genFQDN <*> arbitrary
       "OP_BACKUP_EXPORT" ->
         OpCodes.OpBackupExport <$> genFQDN <*> arbitrary <*>
-          arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*>
-          arbitrary <*> genMaybe (pure []) <*> genMaybe genNameNE
+          arbitrary <*> return Nothing <*> arbitrary <*> arbitrary <*>
+          arbitrary <*> arbitrary <*> genMaybe (pure []) <*>
+          genMaybe genNameNE
       "OP_BACKUP_REMOVE" ->
         OpCodes.OpBackupRemove <$> genFQDN
       "OP_TEST_ALLOCATOR" ->
@@ -339,7 +347,7 @@ instance Arbitrary OpCodes.OpCode where
         OpCodes.OpNetworkQuery <$> genFieldsNE <*> genNamesNE <*> arbitrary
       "OP_RESTRICTED_COMMAND" ->
         OpCodes.OpRestrictedCommand <$> arbitrary <*> genNodeNamesNE <*>
-          genNameNE
+          return Nothing <*> genNameNE
       _ -> fail $ "Undefined arbitrary for opcode " ++ op_id
 
 -- | Generates one element of a reason trail
index 0ee3742..1983b63 100755 (executable)
@@ -306,22 +306,33 @@ class TestClusterVerifyFiles(unittest.TestCase):
     if cond:
       errors.append((item, msg))
 
-  _VerifyFiles = cluster.LUClusterVerifyGroup._VerifyFiles
-
   def test(self):
     errors = []
-    master_name = "master.example.com"
     nodeinfo = [
-      objects.Node(name=master_name, offline=False, vm_capable=True),
-      objects.Node(name="node2.example.com", offline=False, vm_capable=True),
-      objects.Node(name="node3.example.com", master_candidate=True,
+      objects.Node(name="master.example.com",
+                   uuid="master-uuid",
+                   offline=False,
+                   vm_capable=True),
+      objects.Node(name="node2.example.com",
+                   uuid="node2-uuid",
+                   offline=False,
+                   vm_capable=True),
+      objects.Node(name="node3.example.com",
+                   uuid="node3-uuid",
+                   master_candidate=True,
                    vm_capable=False),
-      objects.Node(name="node4.example.com", offline=False, vm_capable=True),
-      objects.Node(name="nodata.example.com", offline=False, vm_capable=True),
-      objects.Node(name="offline.example.com", offline=True),
+      objects.Node(name="node4.example.com",
+                   uuid="node4-uuid",
+                   offline=False,
+                   vm_capable=True),
+      objects.Node(name="nodata.example.com",
+                   uuid="nodata-uuid",
+                   offline=False,
+                   vm_capable=True),
+      objects.Node(name="offline.example.com",
+                   uuid="offline-uuid",
+                   offline=True),
       ]
-    cluster = objects.Cluster(modify_etc_hosts=True,
-                              enabled_hypervisors=[constants.HT_XEN_HVM])
     files_all = set([
       pathutils.CLUSTER_DOMAIN_SECRET_FILE,
       pathutils.RAPI_CERT_FILE,
@@ -341,7 +352,7 @@ class TestClusterVerifyFiles(unittest.TestCase):
       pathutils.VNC_PASSWORD_FILE,
       ])
     nvinfo = {
-      master_name: rpc.RpcResult(data=(True, {
+      "master-uuid": rpc.RpcResult(data=(True, {
         constants.NV_FILELIST: {
           pathutils.CLUSTER_CONF_FILE: "82314f897f38b35f9dab2f7c6b1593e0",
           pathutils.RAPI_CERT_FILE: "babbce8f387bc082228e544a2146fee4",
@@ -349,19 +360,19 @@ class TestClusterVerifyFiles(unittest.TestCase):
           hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
           hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
         }})),
-      "node2.example.com": rpc.RpcResult(data=(True, {
+      "node2-uuid": rpc.RpcResult(data=(True, {
         constants.NV_FILELIST: {
           pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
           hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
           }
         })),
-      "node3.example.com": rpc.RpcResult(data=(True, {
+      "node3-uuid": rpc.RpcResult(data=(True, {
         constants.NV_FILELIST: {
           pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
           pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
           }
         })),
-      "node4.example.com": rpc.RpcResult(data=(True, {
+      "node4-uuid": rpc.RpcResult(data=(True, {
         constants.NV_FILELIST: {
           pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
           pathutils.CLUSTER_CONF_FILE: "conf-a6d4b13e407867f7a7b4f0f232a8f527",
@@ -370,14 +381,30 @@ class TestClusterVerifyFiles(unittest.TestCase):
           hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
           }
         })),
-      "nodata.example.com": rpc.RpcResult(data=(True, {})),
-      "offline.example.com": rpc.RpcResult(offline=True),
+      "nodata-uuid": rpc.RpcResult(data=(True, {})),
+      "offline-uuid": rpc.RpcResult(offline=True),
       }
-    assert set(nvinfo.keys()) == set(map(operator.attrgetter("name"), nodeinfo))
+    assert set(nvinfo.keys()) == set(map(operator.attrgetter("uuid"), nodeinfo))
+
+    verify_lu = cluster.LUClusterVerifyGroup(mocks.FakeProc(),
+                                             opcodes.OpClusterVerify(),
+                                             mocks.FakeContext(),
+                                             None)
+
+    verify_lu._ErrorIf = compat.partial(self._FakeErrorIf, errors)
+
+    # TODO: That's a bit hackish to mock only this single method. We should
+    # build a better FakeConfig which provides such a feature already.
+    def GetNodeName(node_uuid):
+      for node in nodeinfo:
+        if node.uuid == node_uuid:
+          return node.name
+      return None
+
+    verify_lu.cfg.GetNodeName = GetNodeName
 
-    self._VerifyFiles(compat.partial(self._FakeErrorIf, errors), nodeinfo,
-                      master_name, nvinfo,
-                      (files_all, files_opt, files_mc, files_vm))
+    verify_lu._VerifyFiles(nodeinfo, "master-uuid", nvinfo,
+                           (files_all, files_opt, files_mc, files_vm))
     self.assertEqual(sorted(errors), sorted([
       (None, ("File %s found with 2 different checksums (variant 1 on"
               " node2.example.com, node3.example.com, node4.example.com;"
@@ -1417,12 +1444,16 @@ class TestGenerateDiskTemplate(unittest.TestCase):
 
 
 class _ConfigForDiskWipe:
-  def __init__(self, exp_node):
-    self._exp_node = exp_node
+  def __init__(self, exp_node_uuid):
+    self._exp_node_uuid = exp_node_uuid
 
-  def SetDiskID(self, device, node):
+  def SetDiskID(self, device, node_uuid):
     assert isinstance(device, objects.Disk)
-    assert node == self._exp_node
+    assert node_uuid == self._exp_node_uuid
+
+  def GetNodeName(self, node_uuid):
+    assert node_uuid == self._exp_node_uuid
+    return "name.of.expected.node"
 
 
 class _RpcForDiskWipe:
@@ -1517,11 +1548,11 @@ class TestWipeDisks(unittest.TestCase):
     return (False, None)
 
   def testFailingWipe(self):
-    node_name = "node13445.example.com"
+    node_uuid = "node13445-uuid"
     pt = _DiskPauseTracker()
 
-    lu = _FakeLU(rpc=_RpcForDiskWipe(node_name, pt, self._FailingWipeCb),
-                 cfg=_ConfigForDiskWipe(node_name))
+    lu = _FakeLU(rpc=_RpcForDiskWipe(node_uuid, pt, self._FailingWipeCb),
+                 cfg=_ConfigForDiskWipe(node_uuid))
 
     disks = [
       objects.Disk(dev_type=constants.LD_LV, logical_id="disk0",
@@ -1532,7 +1563,7 @@ class TestWipeDisks(unittest.TestCase):
       ]
 
     inst = objects.Instance(name="inst562",
-                            primary_node=node_name,
+                            primary_node=node_uuid,
                             disk_template=constants.DT_PLAIN,
                             disks=disks)
 
index 8ca7c64..4c72b8a 100755 (executable)
@@ -328,14 +328,18 @@ class TestConfigRunner(unittest.TestCase):
     cluster_serial += 1
 
     # Create two nodes
-    node1 = objects.Node(name="node1", group=grp1.uuid, ndparams={})
+    node1 = objects.Node(name="node1", group=grp1.uuid, ndparams={},
+                         uuid="node1-uuid")
     node1_serial = 1
-    node2 = objects.Node(name="node2", group=grp2.uuid, ndparams={})
+    node2 = objects.Node(name="node2", group=grp2.uuid, ndparams={},
+                         uuid="node2-uuid")
     node2_serial = 1
     cfg.AddNode(node1, "job")
     cfg.AddNode(node2, "job")
     cluster_serial += 2
-    self.assertEqual(set(cfg.GetNodeList()), set(["node1", "node2", me.name]))
+    self.assertEqual(set(cfg.GetNodeList()),
+                     set(["node1-uuid", "node2-uuid",
+                          cfg.GetNodeInfoByName(me.name).uuid]))
 
     def _VerifySerials():
       self.assertEqual(cfg.GetClusterInfo().serial_no, cluster_serial)
@@ -346,8 +350,8 @@ class TestConfigRunner(unittest.TestCase):
 
     _VerifySerials()
 
-    self.assertEqual(set(grp1.members), set(["node1"]))
-    self.assertEqual(set(grp2.members), set(["node2"]))
+    self.assertEqual(set(grp1.members), set(["node1-uuid"]))
+    self.assertEqual(set(grp2.members), set(["node2-uuid"]))
 
     # Check invalid nodes and groups
     self.assertRaises(errors.ConfigurationError, cfg.AssignGroupNodes, [
@@ -359,8 +363,8 @@ class TestConfigRunner(unittest.TestCase):
 
     self.assertEqual(node1.group, grp1.uuid)
     self.assertEqual(node2.group, grp2.uuid)
-    self.assertEqual(set(grp1.members), set(["node1"]))
-    self.assertEqual(set(grp2.members), set(["node2"]))
+    self.assertEqual(set(grp1.members), set(["node1-uuid"]))
+    self.assertEqual(set(grp2.members), set(["node2-uuid"]))
 
     # Another no-op
     cfg.AssignGroupNodes([])
@@ -370,18 +374,18 @@ class TestConfigRunner(unittest.TestCase):
     # Assign to the same group (should be a no-op)
     self.assertEqual(node2.group, grp2.uuid)
     cfg.AssignGroupNodes([
-      (node2.name, grp2.uuid),
+      (node2.uuid, grp2.uuid),
       ])
     cluster_serial += 1
     self.assertEqual(node2.group, grp2.uuid)
     _VerifySerials()
-    self.assertEqual(set(grp1.members), set(["node1"]))
-    self.assertEqual(set(grp2.members), set(["node2"]))
+    self.assertEqual(set(grp1.members), set(["node1-uuid"]))
+    self.assertEqual(set(grp2.members), set(["node2-uuid"]))
 
     # Assign node 2 to group 1
     self.assertEqual(node2.group, grp2.uuid)
     cfg.AssignGroupNodes([
-      (node2.name, grp1.uuid),
+      (node2.uuid, grp1.uuid),
       ])
     cluster_serial += 1
     node2_serial += 1
@@ -389,7 +393,7 @@ class TestConfigRunner(unittest.TestCase):
     grp2_serial += 1
     self.assertEqual(node2.group, grp1.uuid)
     _VerifySerials()
-    self.assertEqual(set(grp1.members), set(["node1", "node2"]))
+    self.assertEqual(set(grp1.members), set(["node1-uuid", "node2-uuid"]))
     self.assertFalse(grp2.members)
 
     # And assign both nodes to group 2
@@ -397,8 +401,8 @@ class TestConfigRunner(unittest.TestCase):
     self.assertEqual(node2.group, grp1.uuid)
     self.assertNotEqual(grp1.uuid, grp2.uuid)
     cfg.AssignGroupNodes([
-      (node1.name, grp2.uuid),
-      (node2.name, grp2.uuid),
+      (node1.uuid, grp2.uuid),
+      (node2.uuid, grp2.uuid),
       ])
     cluster_serial += 1
     node1_serial += 1
@@ -409,7 +413,7 @@ class TestConfigRunner(unittest.TestCase):
     self.assertEqual(node2.group, grp2.uuid)
     _VerifySerials()
     self.assertFalse(grp1.members)
-    self.assertEqual(set(grp2.members), set(["node1", "node2"]))
+    self.assertEqual(set(grp2.members), set(["node1-uuid", "node2-uuid"]))
 
     # Destructive tests
     orig_group = node2.group
@@ -419,7 +423,7 @@ class TestConfigRunner(unittest.TestCase):
                         for node in cfg.GetAllNodesInfo().values())
       node2.group = "68b3d087-6ea5-491c-b81f-0a47d90228c5"
       self.assertRaises(errors.ConfigurationError, cfg.AssignGroupNodes, [
-        ("node2", grp2.uuid),
+        (node2.uuid, grp2.uuid),
         ])
       _VerifySerials()
     finally:
index 30e00d8..d7cf32f 100755 (executable)
@@ -51,7 +51,7 @@ class FakeLU(cmdlib.LogicalUnit):
     return {}
 
   def BuildHooksNodes(self):
-    return ["localhost"], ["localhost"]
+    return ["a"], ["a"]
 
 
 class TestHooksRunner(unittest.TestCase):
@@ -290,7 +290,7 @@ class FakeEnvLU(cmdlib.LogicalUnit):
     return self.hook_env
 
   def BuildHooksNodes(self):
-    return (["localhost"], ["localhost"])
+    return (["a"], ["a"])
 
 
 class FakeNoHooksLU(cmdlib.NoHooksLU):
@@ -327,7 +327,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     hm.RunPhase(constants.HOOKS_PHASE_PRE)
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
-    self.assertEqual(node_list, set(["localhost"]))
+    self.assertEqual(node_list, set(["node_a.example.com"]))
     self.assertEqual(hpath, self.lu.HPATH)
     self.assertEqual(phase, constants.HOOKS_PHASE_PRE)
     self._CheckEnv(env, constants.HOOKS_PHASE_PRE, self.lu.HPATH)
@@ -337,7 +337,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     hm.RunPhase(constants.HOOKS_PHASE_POST)
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
-    self.assertEqual(node_list, set(["localhost"]))
+    self.assertEqual(node_list, set(["node_a.example.com"]))
     self.assertEqual(hpath, self.lu.HPATH)
     self.assertEqual(phase, constants.HOOKS_PHASE_POST)
     self._CheckEnv(env, constants.HOOKS_PHASE_POST, self.lu.HPATH)
@@ -353,7 +353,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     hm.RunPhase(constants.HOOKS_PHASE_PRE)
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
-    self.assertEqual(node_list, set(["localhost"]))
+    self.assertEqual(node_list, set(["node_a.example.com"]))
     self.assertEqual(hpath, self.lu.HPATH)
     self.assertEqual(phase, constants.HOOKS_PHASE_PRE)
     self.assertEqual(env["GANETI_FOO"], "pre-foo-value")
@@ -368,7 +368,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     hm.RunPhase(constants.HOOKS_PHASE_POST)
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
-    self.assertEqual(node_list, set(["localhost"]))
+    self.assertEqual(node_list, set(["node_a.example.com"]))
     self.assertEqual(hpath, self.lu.HPATH)
     self.assertEqual(phase, constants.HOOKS_PHASE_POST)
     self.assertEqual(env["GANETI_FOO"], "pre-foo-value")
@@ -382,7 +382,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     # Check configuration update hook
     hm.RunConfigUpdate()
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
-    self.assertEqual(set(node_list), set([self.lu.cfg.GetMasterNode()]))
+    self.assertEqual(set(node_list), set([self.lu.cfg.GetMasterNodeName()]))
     self.assertEqual(hpath, constants.HOOKS_NAME_CFGUPDATE)
     self.assertEqual(phase, constants.HOOKS_PHASE_POST)
     self._CheckEnv(env, constants.HOOKS_PHASE_POST,
@@ -405,7 +405,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
   def testNoNodes(self):
     self.lu.hook_env = {}
     hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
-    hm.RunPhase(constants.HOOKS_PHASE_PRE, nodes=[])
+    hm.RunPhase(constants.HOOKS_PHASE_PRE, node_names=[])
     self.assertRaises(IndexError, self._rpcs.pop)
 
   def testSpecificNodes(self):
@@ -419,7 +419,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
 
     for phase in [constants.HOOKS_PHASE_PRE, constants.HOOKS_PHASE_POST]:
-      hm.RunPhase(phase, nodes=nodes)
+      hm.RunPhase(phase, node_names=nodes)
 
       (node_list, hpath, rpc_phase, env) = self._rpcs.pop(0)
       self.assertEqual(set(node_list), set(nodes))
@@ -438,7 +438,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     hm.RunConfigUpdate()
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
-    self.assertEqual(set(node_list), set([self.lu.cfg.GetMasterNode()]))
+    self.assertEqual(set(node_list), set([self.lu.cfg.GetMasterNodeName()]))
     self.assertEqual(hpath, constants.HOOKS_NAME_CFGUPDATE)
     self.assertEqual(phase, constants.HOOKS_PHASE_POST)
     self.assertEqual(env["GANETI_FOO"], "value")
@@ -457,7 +457,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     hm.RunPhase(constants.HOOKS_PHASE_POST)
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
-    self.assertEqual(node_list, set(["localhost"]))
+    self.assertEqual(node_list, set(["node_a.example.com"]))
     self.assertEqual(hpath, self.lu.HPATH)
     self.assertEqual(phase, constants.HOOKS_PHASE_POST)
     self.assertEqual(env["GANETI_FOO"], "value")
@@ -484,7 +484,7 @@ class TestHooksRunnerEnv(unittest.TestCase):
     hm.RunConfigUpdate()
 
     (node_list, hpath, phase, env) = self._rpcs.pop(0)
-    self.assertEqual(set(node_list), set([self.lu.cfg.GetMasterNode()]))
+    self.assertEqual(set(node_list), set([self.lu.cfg.GetMasterNodeName()]))
     self.assertEqual(hpath, constants.HOOKS_NAME_CFGUPDATE)
     self.assertEqual(phase, constants.HOOKS_PHASE_POST)
     self.assertFalse(compat.any(key.startswith("GANETI_POST") for key in env))
@@ -495,5 +495,49 @@ class TestHooksRunnerEnv(unittest.TestCase):
     assert isinstance(self.lu, FakeNoHooksLU), "LU was replaced"
 
 
+class FakeEnvWithNodeNameLU(cmdlib.LogicalUnit):
+  HPATH = "env_test_lu"
+  HTYPE = constants.HTYPE_GROUP
+
+  def __init__(self, *args):
+    cmdlib.LogicalUnit.__init__(self, *args)
+
+  def BuildHooksEnv(self):
+    return {}
+
+  def BuildHooksNodes(self):
+    return (["a"], ["a"], ["explicit.node1.com", "explicit.node2.com"])
+
+
+class TestHooksRunnerEnv(unittest.TestCase):
+  def setUp(self):
+    self._rpcs = []
+
+    self.op = opcodes.OpTestDummy(result=False, messages=[], fail=False)
+    self.lu = FakeEnvWithNodeNameLU(FakeProc(), self.op, FakeContext(), None)
+
+  def _HooksRpc(self, *args):
+    self._rpcs.append(args)
+    return FakeHooksRpcSuccess(*args)
+
+  def testEmptyEnv(self):
+    # Check pre-phase hook
+    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
+    hm.RunPhase(constants.HOOKS_PHASE_PRE)
+
+    (node_list, hpath, phase, env) = self._rpcs.pop(0)
+    self.assertEqual(node_list, set(["node_a.example.com"]))
+
+    # Check post-phase hook
+    hm.RunPhase(constants.HOOKS_PHASE_POST)
+
+    (node_list, hpath, phase, env) = self._rpcs.pop(0)
+    self.assertEqual(node_list, set(["node_a.example.com",
+                                     "explicit.node1.com",
+                                     "explicit.node2.com"]))
+
+    self.assertRaises(IndexError, self._rpcs.pop)
+
+
 if __name__ == "__main__":
   testutils.GanetiTestProgram()
index 233808c..548d8d1 100755 (executable)
@@ -42,12 +42,14 @@ class TestConsole(unittest.TestCase):
     shutil.rmtree(self.tmpdir)
 
   def test(self):
-    instance = objects.Instance(name="fake.example.com", primary_node="node837")
-    cons = hv_chroot.ChrootManager.GetInstanceConsole(instance, {}, {},
+    instance = objects.Instance(name="fake.example.com",
+                                primary_node="node837-uuid")
+    node = objects.Node(name="node837", uuid="node837-uuid")
+    cons = hv_chroot.ChrootManager.GetInstanceConsole(instance, node, {}, {},
                                                       root_dir=self.tmpdir)
     self.assertTrue(cons.Validate())
     self.assertEqual(cons.kind, constants.CONS_SSH)
-    self.assertEqual(cons.host, instance.primary_node)
+    self.assertEqual(cons.host, node.name)
 
 
 if __name__ == "__main__":
index e0c4240..700e0e9 100755 (executable)
@@ -35,7 +35,8 @@ import testutils
 class TestConsole(unittest.TestCase):
   def test(self):
     instance = objects.Instance(name="fake.example.com")
-    cons = hv_fake.FakeHypervisor.GetInstanceConsole(instance, {}, {})
+    node = objects.Node(name="fakenode.example.com")
+    cons = hv_fake.FakeHypervisor.GetInstanceConsole(instance, node, {}, {})
     self.assertTrue(cons.Validate())
     self.assertEqual(cons.kind, constants.CONS_MESSAGE)
 
index c21c749..b3d3f79 100755 (executable)
@@ -185,35 +185,37 @@ class TestQmp(testutils.GanetiTestCase):
 
 
 class TestConsole(unittest.TestCase):
-  def _Test(self, instance, hvparams):
-    cons = hv_kvm.KVMHypervisor.GetInstanceConsole(instance, hvparams, {})
+  def _Test(self, instance, node, hvparams):
+    cons = hv_kvm.KVMHypervisor.GetInstanceConsole(instance, node, hvparams, {})
     self.assertTrue(cons.Validate())
     return cons
 
   def testSerial(self):
     instance = objects.Instance(name="kvm.example.com",
-                                primary_node="node6017")
+                                primary_node="node6017-uuid")
+    node = objects.Node(name="node6017", uuid="node6017-uuid")
     hvparams = {
       constants.HV_SERIAL_CONSOLE: True,
       constants.HV_VNC_BIND_ADDRESS: None,
       constants.HV_KVM_SPICE_BIND: None,
       }
-    cons = self._Test(instance, hvparams)
+    cons = self._Test(instance, node, hvparams)
     self.assertEqual(cons.kind, constants.CONS_SSH)
-    self.assertEqual(cons.host, instance.primary_node)
+    self.assertEqual(cons.host, node.name)
     self.assertEqual(cons.command[0], pathutils.KVM_CONSOLE_WRAPPER)
     self.assertEqual(cons.command[1], constants.SOCAT_PATH)
 
   def testVnc(self):
     instance = objects.Instance(name="kvm.example.com",
-                                primary_node="node7235",
+                                primary_node="node7235-uuid",
                                 network_port=constants.VNC_BASE_PORT + 10)
+    node = objects.Node(name="node7235", uuid="node7235-uuid")
     hvparams = {
       constants.HV_SERIAL_CONSOLE: False,
       constants.HV_VNC_BIND_ADDRESS: "192.0.2.1",
       constants.HV_KVM_SPICE_BIND: None,
       }
-    cons = self._Test(instance, hvparams)
+    cons = self._Test(instance, node, hvparams)
     self.assertEqual(cons.kind, constants.CONS_VNC)
     self.assertEqual(cons.host, "192.0.2.1")
     self.assertEqual(cons.port, constants.VNC_BASE_PORT + 10)
@@ -223,12 +225,13 @@ class TestConsole(unittest.TestCase):
     instance = objects.Instance(name="kvm.example.com",
                                 primary_node="node7235",
                                 network_port=11000)
+    node = objects.Node(name="node7235", uuid="node7235-uuid")
     hvparams = {
       constants.HV_SERIAL_CONSOLE: False,
       constants.HV_VNC_BIND_ADDRESS: None,
       constants.HV_KVM_SPICE_BIND: "192.0.2.1",
       }
-    cons = self._Test(instance, hvparams)
+    cons = self._Test(instance, node, hvparams)
     self.assertEqual(cons.kind, constants.CONS_SPICE)
     self.assertEqual(cons.host, "192.0.2.1")
     self.assertEqual(cons.port, 11000)
@@ -237,12 +240,13 @@ class TestConsole(unittest.TestCase):
     instance = objects.Instance(name="kvm.example.com",
                                 primary_node="node24325",
                                 network_port=0)
+    node = objects.Node(name="node24325", uuid="node24325-uuid")
     hvparams = {
       constants.HV_SERIAL_CONSOLE: False,
       constants.HV_VNC_BIND_ADDRESS: None,
       constants.HV_KVM_SPICE_BIND: None,
       }
-    cons = self._Test(instance, hvparams)
+    cons = self._Test(instance, node, hvparams)
     self.assertEqual(cons.kind, constants.CONS_MESSAGE)
 
 
index 9d9e441..62ba58e 100755 (executable)
@@ -34,11 +34,13 @@ import testutils
 
 class TestConsole(unittest.TestCase):
   def test(self):
-    instance = objects.Instance(name="lxc.example.com", primary_node="node199")
-    cons = hv_lxc.LXCHypervisor.GetInstanceConsole(instance, {}, {})
+    instance = objects.Instance(name="lxc.example.com",
+                                primary_node="node199-uuid")
+    node = objects.Node(name="node199", uuid="node199-uuid")
+    cons = hv_lxc.LXCHypervisor.GetInstanceConsole(instance, node, {}, {})
     self.assertTrue(cons.Validate())
     self.assertEqual(cons.kind, constants.CONS_SSH)
-    self.assertEqual(cons.host, instance.primary_node)
+    self.assertEqual(cons.host, node.name)
     self.assertEqual(cons.command[-1], instance.name)
 
 
index d3b2027..9c73f75 100755 (executable)
@@ -51,11 +51,12 @@ class TestConsole(unittest.TestCase):
     hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
     for cls in [hv_xen.XenPvmHypervisor(), hv_xen.XenHvmHypervisor()]:
       instance = objects.Instance(name="xen.example.com",
-                                  primary_node="node24828")
-      cons = cls.GetInstanceConsole(instance, hvparams, {})
+                                  primary_node="node24828-uuid")
+      node = objects.Node(name="node24828", uuid="node24828-uuid")
+      cons = cls.GetInstanceConsole(instance, node, hvparams, {})
       self.assertTrue(cons.Validate())
       self.assertEqual(cons.kind, constants.CONS_SSH)
-      self.assertEqual(cons.host, instance.primary_node)
+      self.assertEqual(cons.host, node.name)
       self.assertEqual(cons.command[-1], instance.name)
 
 
index 9fc7a05..0c7d117 100755 (executable)
@@ -423,8 +423,8 @@ class TestNodeQuery(unittest.TestCase):
     master_node.mtime = None
     assert master_node.name == master_name
 
-    live_data_name = node_names[4]
-    assert live_data_name != master_name
+    live_data_node = nodes[4]
+    assert live_data_node.name != master_name
 
     fake_live_data = {
       "bootid": "a2504766-498e-4b25-b21e-d23098dc3af4",
@@ -443,31 +443,31 @@ class TestNodeQuery(unittest.TestCase):
     assert (sorted(query._NODE_LIVE_FIELDS.keys()) ==
             sorted(fake_live_data.keys()))
 
-    live_data = dict.fromkeys(node_names, {})
-    live_data[live_data_name] = \
+    live_data = dict.fromkeys([node.uuid for node in nodes], {})
+    live_data[live_data_node.uuid] = \
       dict((query._NODE_LIVE_FIELDS[name][2], value)
            for name, value in fake_live_data.items())
 
-    node_to_primary = dict((name, set()) for name in node_names)
-    node_to_primary[master_name].update(["inst1", "inst2"])
+    node_to_primary = dict((node.uuid, set()) for node in nodes)
+    node_to_primary[master_node.uuid].update(["inst1", "inst2"])
 
-    node_to_secondary = dict((name, set()) for name in node_names)
-    node_to_secondary[live_data_name].update(["instX", "instY", "instZ"])
+    node_to_secondary = dict((node.uuid, set()) for node in nodes)
+    node_to_secondary[live_data_node.uuid].update(["instX", "instY", "instZ"])
 
     ng_uuid = "492b4b74-8670-478a-b98d-4c53a76238e6"
     groups = {
       ng_uuid: objects.NodeGroup(name="ng1", uuid=ng_uuid, ndparams={}),
       }
 
-    oob_not_powered_node = node_names[0]
-    nodes[0].powered = False
-    oob_support = dict((name, False) for name in node_names)
-    oob_support[master_name] = True
-    oob_support[oob_not_powered_node] = True
+    oob_not_powered_node = nodes[0]
+    oob_not_powered_node.powered = False
+    oob_support = dict((node.uuid, False) for node in nodes)
+    oob_support[master_node.uuid] = True
+    oob_support[oob_not_powered_node.uuid] = True
 
     master_node.group = ng_uuid
 
-    nqd = query.NodeQueryData(nodes, live_data, master_name,
+    nqd = query.NodeQueryData(nodes, live_data, master_node.uuid,
                               node_to_primary, node_to_secondary, groups,
                               oob_support, cluster)
     result = q.Query(nqd)
@@ -511,7 +511,7 @@ class TestNodeQuery(unittest.TestCase):
                  row[field_index["powered"]] == (constants.RS_UNAVAIL, None)
                  for row, node in zip(result, nodes))
 
-    live_data_row = result[node_to_row[live_data_name]]
+    live_data_row = result[node_to_row[live_data_node.name]]
 
     for (field, value) in fake_live_data.items():
       self.assertEqual(live_data_row[field_index[field]],
@@ -523,10 +523,11 @@ class TestNodeQuery(unittest.TestCase):
                      (constants.RS_NORMAL, 3))
     self.assertEqual(master_row[field_index["pinst_list"]],
                      (constants.RS_NORMAL,
-                      list(node_to_primary[master_name])))
+                      list(node_to_primary[master_node.uuid])))
     self.assertEqual(live_data_row[field_index["sinst_list"]],
                      (constants.RS_NORMAL,
-                      utils.NiceSort(list(node_to_secondary[live_data_name]))))
+                      utils.NiceSort(list(
+                        node_to_secondary[live_data_node.uuid]))))
 
   def testGetLiveNodeField(self):
     nodes = [
@@ -666,9 +667,10 @@ class TestInstanceQuery(unittest.TestCase):
           },
         })
 
-    offline_nodes = ["nodeoff1", "nodeoff2"]
-    bad_nodes = ["nodebad1", "nodebad2", "nodebad3"] + offline_nodes
-    nodes = ["node%s" % i for i in range(10)] + bad_nodes
+    offline_nodes = ["nodeoff1-uuid", "nodeoff2-uuid"]
+    bad_nodes = ["nodebad1-uuid", "nodebad2-uuid", "nodebad3-uuid"] +\
+                offline_nodes
+    node_uuids = ["node%s-uuid" % i for i in range(10)] + bad_nodes
 
     instances = [
       objects.Instance(name="inst1", hvparams={}, beparams={}, nics=[],
@@ -676,7 +678,7 @@ class TestInstanceQuery(unittest.TestCase):
         ctime=1291244000, mtime=1291244400, serial_no=30,
         admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_PVM,
         os="linux1",
-        primary_node="node1",
+        primary_node="node1-uuid",
         disk_template=constants.DT_PLAIN,
         disks=[],
         disks_active=True,
@@ -686,7 +688,7 @@ class TestInstanceQuery(unittest.TestCase):
         ctime=1291211000, mtime=1291211077, serial_no=1,
         admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_HVM,
         os="deb99",
-        primary_node="node5",
+        primary_node="node5-uuid",
         disk_template=constants.DT_DISKLESS,
         disks=[],
         disks_active=True,
@@ -700,7 +702,7 @@ class TestInstanceQuery(unittest.TestCase):
         ctime=1291011000, mtime=1291013000, serial_no=1923,
         admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_KVM,
         os="busybox",
-        primary_node="node6",
+        primary_node="node6-uuid",
         disk_template=constants.DT_DRBD8,
         disks=[],
         disks_active=False,
@@ -717,7 +719,7 @@ class TestInstanceQuery(unittest.TestCase):
         ctime=1291244390, mtime=1291244395, serial_no=25,
         admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_PVM,
         os="linux1",
-        primary_node="nodeoff2",
+        primary_node="nodeoff2-uuid",
         disk_template=constants.DT_DRBD8,
         disks=[],
         disks_active=True,
@@ -743,7 +745,7 @@ class TestInstanceQuery(unittest.TestCase):
         ctime=1231211000, mtime=1261200000, serial_no=3,
         admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_HVM,
         os="deb99",
-        primary_node="nodebad2",
+        primary_node="nodebad2-uuid",
         disk_template=constants.DT_DISKLESS,
         disks=[],
         disks_active=True,
@@ -757,7 +759,7 @@ class TestInstanceQuery(unittest.TestCase):
         ctime=7513, mtime=11501, serial_no=13390,
         admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_HVM,
         os="deb99",
-        primary_node="node7",
+        primary_node="node7-uuid",
         disk_template=constants.DT_DISKLESS,
         disks=[],
         disks_active=False,
@@ -773,7 +775,7 @@ class TestInstanceQuery(unittest.TestCase):
         ctime=None, mtime=None, serial_no=1947,
         admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_HVM,
         os="deb99",
-        primary_node="node6",
+        primary_node="node6-uuid",
         disk_template=constants.DT_DISKLESS,
         disks=[],
         disks_active=False,
@@ -784,7 +786,7 @@ class TestInstanceQuery(unittest.TestCase):
         ctime=None, mtime=None, serial_no=19478,
         admin_state=constants.ADMINST_OFFLINE, hypervisor=constants.HT_XEN_HVM,
         os="deb99",
-        primary_node="node6",
+        primary_node="node6-uuid",
         disk_template=constants.DT_DISKLESS,
         disks=[],
         disks_active=False,
@@ -831,9 +833,15 @@ class TestInstanceQuery(unittest.TestCase):
                               user="root",
                               command=["hostname"]).ToDict()
 
+    nodes = dict([(uuid, objects.Node(
+                           name="%s.example.com" % uuid,
+                           uuid=uuid,
+                           group="default-uuid"))
+                  for uuid in node_uuids])
+
     iqd = query.InstanceQueryData(instances, cluster, disk_usage,
                                   offline_nodes, bad_nodes, live_data,
-                                  wrongnode_inst, consinfo, {}, {}, {})
+                                  wrongnode_inst, consinfo, nodes, {}, {})
     result = q.Query(iqd)
     self.assertEqual(len(result), len(instances))
     self.assert_(compat.all(len(row) == len(selected)
@@ -845,7 +853,7 @@ class TestInstanceQuery(unittest.TestCase):
     tested_status = set()
 
     for (inst, row) in zip(instances, result):
-      assert inst.primary_node in nodes
+      assert inst.primary_node in node_uuids
 
       self.assertEqual(row[fieldidx["name"]],
                        (constants.RS_NORMAL, inst.name))
index a916483..1d2235e 100755 (executable)
@@ -341,7 +341,7 @@ class TestSsconfResolver(unittest.TestCase):
     ssc = GetFakeSimpleStoreClass(lambda _: node_addr_list)
     result = rpc._SsconfResolver(True, node_list, NotImplemented,
                                  ssc=ssc, nslookup_fn=NotImplemented)
-    self.assertEqual(result, zip(node_list, addr_list))
+    self.assertEqual(result, zip(node_list, addr_list, node_list))
 
   def testNsLookup(self):
     addr_list = ["192.0.2.%d" % n for n in range(0, 255, 13)]
@@ -351,7 +351,7 @@ class TestSsconfResolver(unittest.TestCase):
     nslookup_fn = lambda name, family=None: node_addr_map.get(name)
     result = rpc._SsconfResolver(True, node_list, NotImplemented,
                                  ssc=ssc, nslookup_fn=nslookup_fn)
-    self.assertEqual(result, zip(node_list, addr_list))
+    self.assertEqual(result, zip(node_list, addr_list, node_list))
 
   def testDisabledSsconfIp(self):
     addr_list = ["192.0.2.%d" % n for n in range(0, 255, 13)]
@@ -361,7 +361,7 @@ class TestSsconfResolver(unittest.TestCase):
     nslookup_fn = lambda name, family=None: node_addr_map.get(name)
     result = rpc._SsconfResolver(False, node_list, NotImplemented,
                                  ssc=ssc, nslookup_fn=nslookup_fn)
-    self.assertEqual(result, zip(node_list, addr_list))
+    self.assertEqual(result, zip(node_list, addr_list, node_list))
 
   def testBothLookups(self):
     addr_list = ["192.0.2.%d" % n for n in range(0, 255, 13)]
@@ -373,7 +373,7 @@ class TestSsconfResolver(unittest.TestCase):
     nslookup_fn = lambda name, family=None: node_addr_map.get(name)
     result = rpc._SsconfResolver(True, node_list, NotImplemented,
                                  ssc=ssc, nslookup_fn=nslookup_fn)
-    self.assertEqual(result, zip(node_list, addr_list))
+    self.assertEqual(result, zip(node_list, addr_list, node_list))
 
   def testAddressLookupIPv6(self):
     addr_list = ["2001:db8::%d" % n for n in range(0, 255, 11)]
@@ -382,7 +382,7 @@ class TestSsconfResolver(unittest.TestCase):
     ssc = GetFakeSimpleStoreClass(lambda _: node_addr_list)
     result = rpc._SsconfResolver(True, node_list, NotImplemented,
                                  ssc=ssc, nslookup_fn=NotImplemented)
-    self.assertEqual(result, zip(node_list, addr_list))
+    self.assertEqual(result, zip(node_list, addr_list, node_list))
 
 
 class TestStaticResolver(unittest.TestCase):
@@ -390,7 +390,7 @@ class TestStaticResolver(unittest.TestCase):
     addresses = ["192.0.2.%d" % n for n in range(0, 123, 7)]
     nodes = ["node%s.example.com" % n for n in range(0, 123, 7)]
     res = rpc._StaticResolver(addresses)
-    self.assertEqual(res(nodes, NotImplemented), zip(nodes, addresses))
+    self.assertEqual(res(nodes, NotImplemented), zip(nodes, addresses, nodes))
 
   def testWrongLength(self):
     res = rpc._StaticResolver([])
@@ -399,34 +399,40 @@ class TestStaticResolver(unittest.TestCase):
 
 class TestNodeConfigResolver(unittest.TestCase):
   @staticmethod
-  def _GetSingleOnlineNode(name):
-    assert name == "node90.example.com"
-    return objects.Node(name=name, offline=False, primary_ip="192.0.2.90")
+  def _GetSingleOnlineNode(uuid):
+    assert uuid == "node90-uuid"
+    return objects.Node(name="node90.example.com",
+                        uuid=uuid,
+                        offline=False,
+                        primary_ip="192.0.2.90")
 
   @staticmethod
-  def _GetSingleOfflineNode(name):
-    assert name == "node100.example.com"
-    return objects.Node(name=name, offline=True, primary_ip="192.0.2.100")
+  def _GetSingleOfflineNode(uuid):
+    assert uuid == "node100-uuid"
+    return objects.Node(name="node100.example.com",
+                        uuid=uuid,
+                        offline=True,
+                        primary_ip="192.0.2.100")
 
   def testSingleOnline(self):
     self.assertEqual(rpc._NodeConfigResolver(self._GetSingleOnlineNode,
                                              NotImplemented,
-                                             ["node90.example.com"], None),
-                     [("node90.example.com", "192.0.2.90")])
+                                             ["node90-uuid"], None),
+                     [("node90.example.com", "192.0.2.90", "node90-uuid")])
 
   def testSingleOffline(self):
     self.assertEqual(rpc._NodeConfigResolver(self._GetSingleOfflineNode,
                                              NotImplemented,
-                                             ["node100.example.com"], None),
-                     [("node100.example.com", rpc._OFFLINE)])
+                                             ["node100-uuid"], None),
+                     [("node100.example.com", rpc._OFFLINE, "node100-uuid")])
 
   def testSingleOfflineWithAcceptOffline(self):
     fn = self._GetSingleOfflineNode
-    assert fn("node100.example.com").offline
+    assert fn("node100-uuid").offline
     self.assertEqual(rpc._NodeConfigResolver(fn, NotImplemented,
-                                             ["node100.example.com"],
+                                             ["node100-uuid"],
                                              rpc_defs.ACCEPT_OFFLINE_NODE),
-                     [("node100.example.com", "192.0.2.100")])
+                     [("node100.example.com", "192.0.2.100", "node100-uuid")])
     for i in [False, True, "", "Hello", 0, 1]:
       self.assertRaises(AssertionError, rpc._NodeConfigResolver,
                         fn, NotImplemented, ["node100.example.com"], i)
@@ -434,7 +440,8 @@ class TestNodeConfigResolver(unittest.TestCase):
   def testUnknownSingleNode(self):
     self.assertEqual(rpc._NodeConfigResolver(lambda _: None, NotImplemented,
                                              ["node110.example.com"], None),
-                     [("node110.example.com", "node110.example.com")])
+                     [("node110.example.com", "node110.example.com",
+                       "node110.example.com")])
 
   def testMultiEmpty(self):
     self.assertEqual(rpc._NodeConfigResolver(NotImplemented,
@@ -443,10 +450,11 @@ class TestNodeConfigResolver(unittest.TestCase):
                      [])
 
   def testMultiSomeOffline(self):
-    nodes = dict(("node%s.example.com" % i,
+    nodes = dict(("node%s-uuid" % i,
                   objects.Node(name="node%s.example.com" % i,
                                offline=((i % 3) == 0),
-                               primary_ip="192.0.2.%s" % i))
+                               primary_ip="192.0.2.%s" % i,
+                               uuid="node%s-uuid" % i))
                   for i in range(1, 255))
 
     # Resolve no names
@@ -458,15 +466,15 @@ class TestNodeConfigResolver(unittest.TestCase):
     # Offline, online and unknown hosts
     self.assertEqual(rpc._NodeConfigResolver(NotImplemented,
                                              lambda: nodes,
-                                             ["node3.example.com",
-                                              "node92.example.com",
-                                              "node54.example.com",
+                                             ["node3-uuid",
+                                              "node92-uuid",
+                                              "node54-uuid",
                                               "unknown.example.com",],
                                              None), [
-      ("node3.example.com", rpc._OFFLINE),
-      ("node92.example.com", "192.0.2.92"),
-      ("node54.example.com", rpc._OFFLINE),
-      ("unknown.example.com", "unknown.example.com"),
+      ("node3.example.com", rpc._OFFLINE, "node3-uuid"),
+      ("node92.example.com", "192.0.2.92", "node92-uuid"),
+      ("node54.example.com", rpc._OFFLINE, "node54-uuid"),
+      ("unknown.example.com", "unknown.example.com", "unknown.example.com"),
       ])
 
 
@@ -672,7 +680,7 @@ class TestRpcClientBase(unittest.TestCase):
     def _Resolver(expected, hosts, options):
       self.assertEqual(hosts, nodes)
       self.assertEqual(options, expected)
-      return zip(hosts, nodes)
+      return zip(hosts, nodes, hosts)
 
     def _DynamicResolverOptions((arg0, )):
       return sum(arg0)
index 82e286d..6d2312f 100644 (file)
@@ -53,11 +53,23 @@ class FakeConfig:
     return "test.cluster"
 
   def GetMasterNode(self):
+    return "a"
+
+  def GetMasterNodeName(self):
     return netutils.Hostname.GetSysName()
 
   def GetDefaultIAllocator(Self):
     return "testallocator"
 
+  def GetNodeName(self, node_uuid):
+    if node_uuid in self.GetNodeList():
+      return "node_%s.example.com" % (node_uuid,)
+    else:
+      return None
+
+  def GetNodeNames(self, node_uuids):
+    return map(self.GetNodeName, node_uuids)
+
 
 class FakeProc:
   """Fake processor object"""