Revision 1c3231aa lib/cmdlib/node.py

b/lib/cmdlib/node.py
41 41
from ganeti.cmdlib.common import CheckParamsNotGlobal, \
42 42
  MergeAndVerifyHvState, MergeAndVerifyDiskState, \
43 43
  IsExclusiveStorageEnabledNode, CheckNodePVs, \
44
  RedistributeAncillaryFiles, ExpandNodeName, ShareAll, SupportsOob, \
44
  RedistributeAncillaryFiles, ExpandNodeUuidAndName, ShareAll, SupportsOob, \
45 45
  CheckInstanceState, INSTANCE_DOWN, GetUpdatedParams, \
46 46
  AdjustCandidatePool, CheckIAllocatorOrNode, LoadNodeEvacResult, \
47 47
  GetWantedNodes, MapInstanceDisksToNodes, RunPostHook, \
......
64 64

  
65 65
  @type lu: L{LogicalUnit}
66 66
  @param lu: the LU on behalf of which we make the check
67
  @type node: string
67
  @type node: L{objects.Node}
68 68
  @param node: the node to check
69 69
  @type secondary_ip: string
70 70
  @param secondary_ip: the ip to check
71 71
  @type prereq: boolean
72 72
  @param prereq: whether to throw a prerequisite or an execute error
73
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
73
  @raise errors.OpPrereqError: if the node doesn't have the ip,
74
  and prereq=True
74 75
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
75 76

  
76 77
  """
77
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
78
  result.Raise("Failure checking secondary ip on node %s" % node,
78
  # this can be called with a new node, which has no UUID yet, so perform the
79
  # RPC call using its name
80
  result = lu.rpc.call_node_has_ip_address(node.name, secondary_ip)
81
  result.Raise("Failure checking secondary ip on node %s" % node.name,
79 82
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
80 83
  if not result.payload:
81 84
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
......
101 104
                                         family=self.primary_ip_family)
102 105
    self.op.node_name = self.hostname.name
103 106

  
104
    if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
107
    if self.op.readd and self.op.node_name == self.cfg.GetMasterNodeName():
105 108
      raise errors.OpPrereqError("Cannot readd the master node",
106 109
                                 errors.ECODE_STATE)
107 110

  
......
128 131
    """Build hooks nodes.
129 132

  
130 133
    """
131
    # Exclude added node
132
    pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
133
    post_nodes = pre_nodes + [self.op.node_name, ]
134
    hook_nodes = self.cfg.GetNodeList()
135
    new_node_info = self.cfg.GetNodeInfoByName(self.op.node_name)
136
    if new_node_info is not None:
137
      # Exclude added node
138
      hook_nodes = list(set(hook_nodes) - set([new_node_info.uuid]))
134 139

  
135
    return (pre_nodes, post_nodes)
140
    # add the new node as post hook node by name; it does not have an UUID yet
141
    return (hook_nodes, hook_nodes, [self.op.node_name, ])
136 142

  
137 143
  def CheckPrereq(self):
138 144
    """Check prerequisites.
......
147 153
    """
148 154
    cfg = self.cfg
149 155
    hostname = self.hostname
150
    node = hostname.name
156
    node_name = hostname.name
151 157
    primary_ip = self.op.primary_ip = hostname.ip
152 158
    if self.op.secondary_ip is None:
153 159
      if self.primary_ip_family == netutils.IP6Address.family:
......
161 167
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
162 168
                                 " address" % secondary_ip, errors.ECODE_INVAL)
163 169

  
164
    node_list = cfg.GetNodeList()
165
    if not self.op.readd and node in node_list:
170
    existing_node_info = cfg.GetNodeInfoByName(node_name)
171
    if not self.op.readd and existing_node_info is not None:
166 172
      raise errors.OpPrereqError("Node %s is already in the configuration" %
167
                                 node, errors.ECODE_EXISTS)
168
    elif self.op.readd and node not in node_list:
169
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
170
                                 errors.ECODE_NOENT)
173
                                 node_name, errors.ECODE_EXISTS)
174
    elif self.op.readd and existing_node_info is None:
175
      raise errors.OpPrereqError("Node %s is not in the configuration" %
176
                                 node_name, errors.ECODE_NOENT)
171 177

  
172 178
    self.changed_primary_ip = False
173 179

  
174
    for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
175
      if self.op.readd and node == existing_node_name:
180
    for existing_node in cfg.GetAllNodesInfo().values():
181
      if self.op.readd and node_name == existing_node.name:
176 182
        if existing_node.secondary_ip != secondary_ip:
177 183
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
178 184
                                     " address configuration as before",
......
193 199
    # After this 'if' block, None is no longer a valid value for the
194 200
    # _capable op attributes
195 201
    if self.op.readd:
196
      old_node = self.cfg.GetNodeInfo(node)
197
      assert old_node is not None, "Can't retrieve locked node %s" % node
202
      assert existing_node_info is not None, \
203
        "Can't retrieve locked node %s" % node_name
198 204
      for attr in self._NFLAGS:
199 205
        if getattr(self.op, attr) is None:
200
          setattr(self.op, attr, getattr(old_node, attr))
206
          setattr(self.op, attr, getattr(existing_node_info, attr))
201 207
    else:
202 208
      for attr in self._NFLAGS:
203 209
        if getattr(self.op, attr) is None:
204 210
          setattr(self.op, attr, True)
205 211

  
206 212
    if self.op.readd and not self.op.vm_capable:
207
      pri, sec = cfg.GetNodeInstances(node)
213
      pri, sec = cfg.GetNodeInstances(existing_node_info.uuid)
208 214
      if pri or sec:
209 215
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
210 216
                                   " flag set to false, but it already holds"
211
                                   " instances" % node,
217
                                   " instances" % node_name,
212 218
                                   errors.ECODE_STATE)
213 219

  
214 220
    # check that the type of the node (single versus dual homed) is the
......
240 246
                                   errors.ECODE_ENVIRON)
241 247

  
242 248
    if self.op.readd:
243
      exceptions = [node]
249
      exceptions = [existing_node_info.uuid]
244 250
    else:
245 251
      exceptions = []
246 252

  
......
250 256
      self.master_candidate = False
251 257

  
252 258
    if self.op.readd:
253
      self.new_node = old_node
259
      self.new_node = existing_node_info
254 260
    else:
255 261
      node_group = cfg.LookupNodeGroup(self.op.group)
256
      self.new_node = objects.Node(name=node,
262
      self.new_node = objects.Node(name=node_name,
257 263
                                   primary_ip=primary_ip,
258 264
                                   secondary_ip=secondary_ip,
259 265
                                   master_candidate=self.master_candidate,
......
274 280
    # TODO: If we need to have multiple DnsOnlyRunner we probably should make
275 281
    #       it a property on the base class.
276 282
    rpcrunner = rpc.DnsOnlyRunner()
277
    result = rpcrunner.call_version([node])[node]
278
    result.Raise("Can't get version information from node %s" % node)
283
    result = rpcrunner.call_version([node_name])[node_name]
284
    result.Raise("Can't get version information from node %s" % node_name)
279 285
    if constants.PROTOCOL_VERSION == result.payload:
280 286
      logging.info("Communication to node %s fine, sw version %s match",
281
                   node, result.payload)
287
                   node_name, result.payload)
282 288
    else:
283 289
      raise errors.OpPrereqError("Version mismatch master version %s,"
284 290
                                 " node version %s" %
......
291 297
      excl_stor = IsExclusiveStorageEnabledNode(cfg, self.new_node)
292 298
      cname = self.cfg.GetClusterName()
293 299
      result = rpcrunner.call_node_verify_light(
294
          [node], vparams, cname, cfg.GetClusterInfo().hvparams)[node]
300
          [node_name], vparams, cname, cfg.GetClusterInfo().hvparams)[node_name]
295 301
      (errmsgs, _) = CheckNodePVs(result.payload, excl_stor)
296 302
      if errmsgs:
297 303
        raise errors.OpPrereqError("Checks on node PVs failed: %s" %
......
302 308

  
303 309
    """
304 310
    new_node = self.new_node
305
    node = new_node.name
311
    node_name = new_node.name
306 312

  
307 313
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
308 314
      "Not owning BGL"
......
344 350
    # Add node to our /etc/hosts, and add key to known_hosts
345 351
    if self.cfg.GetClusterInfo().modify_etc_hosts:
346 352
      master_node = self.cfg.GetMasterNode()
347
      result = self.rpc.call_etc_hosts_modify(master_node,
348
                                              constants.ETC_HOSTS_ADD,
349
                                              self.hostname.name,
350
                                              self.hostname.ip)
353
      result = self.rpc.call_etc_hosts_modify(
354
                 master_node, constants.ETC_HOSTS_ADD, self.hostname.name,
355
                 self.hostname.ip)
351 356
      result.Raise("Can't update hosts file with new host data")
352 357

  
353 358
    if new_node.secondary_ip != new_node.primary_ip:
354
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
355
                               False)
359
      _CheckNodeHasSecondaryIP(self, new_node, new_node.secondary_ip, False)
356 360

  
357
    node_verify_list = [self.cfg.GetMasterNode()]
361
    node_verifier_uuids = [self.cfg.GetMasterNode()]
358 362
    node_verify_param = {
359
      constants.NV_NODELIST: ([node], {}),
363
      constants.NV_NODELIST: ([node_name], {}),
360 364
      # TODO: do a node-net-test as well?
361 365
    }
362 366

  
363
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
364
                                       self.cfg.GetClusterName(),
365
                                       self.cfg.GetClusterInfo().hvparams)
366
    for verifier in node_verify_list:
367
    result = self.rpc.call_node_verify(
368
               node_verifier_uuids, node_verify_param,
369
               self.cfg.GetClusterName(),
370
               self.cfg.GetClusterInfo().hvparams)
371
    for verifier in node_verifier_uuids:
367 372
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
368 373
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
369 374
      if nl_payload:
......
374 379
        raise errors.OpExecError("ssh/hostname verification failed")
375 380

  
376 381
    if self.op.readd:
377
      RedistributeAncillaryFiles(self)
378 382
      self.context.ReaddNode(new_node)
383
      RedistributeAncillaryFiles(self)
379 384
      # make sure we redistribute the config
380 385
      self.cfg.Update(new_node, feedback_fn)
381 386
      # and make sure the new node will not have old files around
382 387
      if not new_node.master_candidate:
383
        result = self.rpc.call_node_demote_from_mc(new_node.name)
388
        result = self.rpc.call_node_demote_from_mc(new_node.uuid)
384 389
        result.Warn("Node failed to demote itself from master candidate status",
385 390
                    self.LogWarning)
386 391
    else:
387
      RedistributeAncillaryFiles(self, additional_nodes=[node],
388
                                 additional_vm=self.op.vm_capable)
389 392
      self.context.AddNode(new_node, self.proc.GetECId())
393
      RedistributeAncillaryFiles(self)
390 394

  
391 395

  
392 396
class LUNodeSetParams(LogicalUnit):
......
412 416
  _FLAGS = ["master_candidate", "drained", "offline"]
413 417

  
414 418
  def CheckArguments(self):
415
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
419
    (self.op.node_uuid, self.op.node_name) = \
420
      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
416 421
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
417 422
                self.op.master_capable, self.op.vm_capable,
418 423
                self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
......
445 450

  
446 451
    """
447 452
    return (instance.disk_template in constants.DTS_INT_MIRROR and
448
            self.op.node_name in instance.all_nodes)
453
            self.op.node_uuid in instance.all_nodes)
449 454

  
450 455
  def ExpandNames(self):
451 456
    if self.lock_all:
......
457 462
        }
458 463
    else:
459 464
      self.needed_locks = {
460
        locking.LEVEL_NODE: self.op.node_name,
465
        locking.LEVEL_NODE: self.op.node_uuid,
461 466
        }
462 467

  
463 468
    # Since modifying a node can have severe effects on currently running
......
495 500
    """Build hooks nodes.
496 501

  
497 502
    """
498
    nl = [self.cfg.GetMasterNode(), self.op.node_name]
503
    nl = [self.cfg.GetMasterNode(), self.op.node_uuid]
499 504
    return (nl, nl)
500 505

  
501 506
  def CheckPrereq(self):
......
504 509
    This only checks the instance list against the existing names.
505 510

  
506 511
    """
507
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
508

  
512
    node = self.cfg.GetNodeInfo(self.op.node_uuid)
509 513
    if self.lock_instances:
510 514
      affected_instances = \
511 515
        self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
......
518 522
                                   " secondary IP address have changed since"
519 523
                                   " locks were acquired, wanted '%s', have"
520 524
                                   " '%s'; retry the operation" %
521
                                   (self.op.node_name,
525
                                   (node.name,
522 526
                                    utils.CommaJoin(wanted_instances),
523 527
                                    utils.CommaJoin(owned_instances)),
524 528
                                   errors.ECODE_STATE)
......
529 533
        self.op.drained is not None or
530 534
        self.op.offline is not None):
531 535
      # we can't change the master's node flags
532
      if self.op.node_name == self.cfg.GetMasterNode():
536
      if node.uuid == self.cfg.GetMasterNode():
533 537
        raise errors.OpPrereqError("The master role can be changed"
534 538
                                   " only via master-failover",
535 539
                                   errors.ECODE_INVAL)
......
540 544
                                 errors.ECODE_STATE)
541 545

  
542 546
    if self.op.vm_capable is False:
543
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
547
      (ipri, isec) = self.cfg.GetNodeInstances(node.uuid)
544 548
      if ipri or isec:
545 549
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
546 550
                                   " the vm_capable flag" % node.name,
......
551 555
      # check if after removing the current node, we're missing master
552 556
      # candidates
553 557
      (mc_remaining, mc_should, _) = \
554
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
558
          self.cfg.GetMasterCandidateStats(exceptions=[node.uuid])
555 559
      if mc_remaining < mc_should:
556 560
        raise errors.OpPrereqError("Not enough master candidates, please"
557 561
                                   " pass auto promote option to allow"
......
565 569

  
566 570
    # Check for ineffective changes
567 571
    for attr in self._FLAGS:
568
      if (getattr(self.op, attr) is False and getattr(node, attr) is False):
572
      if getattr(self.op, attr) is False and getattr(node, attr) is False:
569 573
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
570 574
        setattr(self.op, attr, None)
571 575

  
......
616 620

  
617 621
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
618 622
      # Trying to transition out of offline status
619
      result = self.rpc.call_version([node.name])[node.name]
623
      result = self.rpc.call_version([node.uuid])[node.uuid]
620 624
      if result.fail_msg:
621 625
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
622 626
                                   " to report its version: %s" %
......
635 639
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
636 640
      master_singlehomed = master.secondary_ip == master.primary_ip
637 641
      if master_singlehomed and self.op.secondary_ip != node.primary_ip:
638
        if self.op.force and node.name == master.name:
642
        if self.op.force and node.uuid == master.uuid:
639 643
          self.LogWarning("Transitioning from single-homed to multi-homed"
640 644
                          " cluster; all nodes will require a secondary IP"
641 645
                          " address")
......
646 650
                                     " target node to be the master",
647 651
                                     errors.ECODE_INVAL)
648 652
      elif not master_singlehomed and self.op.secondary_ip == node.primary_ip:
649
        if self.op.force and node.name == master.name:
653
        if self.op.force and node.uuid == master.uuid:
650 654
          self.LogWarning("Transitioning from multi-homed to single-homed"
651 655
                          " cluster; secondary IP addresses will have to be"
652 656
                          " removed")
......
673 677
          CheckInstanceState(self, instance, INSTANCE_DOWN,
674 678
                             msg="cannot change secondary ip")
675 679

  
676
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
677
        if master.name != node.name:
680
        _CheckNodeHasSecondaryIP(self, node, self.op.secondary_ip, True)
681
        if master.uuid != node.uuid:
678 682
          # check reachability from master secondary ip to new secondary ip
679 683
          if not netutils.TcpPing(self.op.secondary_ip,
680 684
                                  constants.DEFAULT_NODED_PORT,
......
684 688
                                       errors.ECODE_ENVIRON)
685 689

  
686 690
    if self.op.ndparams:
687
      new_ndparams = GetUpdatedParams(self.node.ndparams, self.op.ndparams)
691
      new_ndparams = GetUpdatedParams(node.ndparams, self.op.ndparams)
688 692
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
689 693
      CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
690 694
                           "node", "cluster or group")
......
692 696

  
693 697
    if self.op.hv_state:
694 698
      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
695
                                                self.node.hv_state_static)
699
                                                node.hv_state_static)
696 700

  
697 701
    if self.op.disk_state:
698 702
      self.new_disk_state = \
699
        MergeAndVerifyDiskState(self.op.disk_state,
700
                                self.node.disk_state_static)
703
        MergeAndVerifyDiskState(self.op.disk_state, node.disk_state_static)
701 704

  
702 705
  def Exec(self, feedback_fn):
703 706
    """Modifies a node.
704 707

  
705 708
    """
706
    node = self.node
709
    node = self.cfg.GetNodeInfo(self.op.node_uuid)
707 710
    old_role = self.old_role
708 711
    new_role = self.new_role
709 712

  
......
742 745

  
743 746
      # we locked all nodes, we adjust the CP before updating this node
744 747
      if self.lock_all:
745
        AdjustCandidatePool(self, [node.name])
748
        AdjustCandidatePool(self, [node.uuid])
746 749

  
747 750
    if self.op.secondary_ip:
748 751
      node.secondary_ip = self.op.secondary_ip
......
766 769
  REQ_BGL = False
767 770

  
768 771
  def CheckArguments(self):
769
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
770
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
772
    (self.op.node_uuid, self.op.node_name) = \
773
      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
774

  
775
    if self.op.node_uuid == self.cfg.GetMasterNode() and not self.op.force:
771 776
      raise errors.OpPrereqError("The node is the master and the force"
772 777
                                 " parameter was not set",
773 778
                                 errors.ECODE_INVAL)
......
787 792
    """
788 793
    default_hypervisor = self.cfg.GetHypervisorType()
789 794
    hvparams = self.cfg.GetClusterInfo().hvparams[default_hypervisor]
790
    result = self.rpc.call_node_powercycle(self.op.node_name,
795
    result = self.rpc.call_node_powercycle(self.op.node_uuid,
791 796
                                           default_hypervisor,
792 797
                                           hvparams)
793 798
    result.Raise("Failed to schedule the reboot")
......
798 803
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
799 804

  
800 805

  
801
def _GetNodePrimaryInstances(cfg, node_name):
806
def _GetNodePrimaryInstances(cfg, node_uuid):
802 807
  """Returns primary instances on a node.
803 808

  
804 809
  """
805 810
  return _GetNodeInstancesInner(cfg,
806
                                lambda inst: node_name == inst.primary_node)
811
                                lambda inst: node_uuid == inst.primary_node)
807 812

  
808 813

  
809
def _GetNodeSecondaryInstances(cfg, node_name):
814
def _GetNodeSecondaryInstances(cfg, node_uuid):
810 815
  """Returns secondary instances on a node.
811 816

  
812 817
  """
813 818
  return _GetNodeInstancesInner(cfg,
814
                                lambda inst: node_name in inst.secondary_nodes)
819
                                lambda inst: node_uuid in inst.secondary_nodes)
815 820

  
816 821

  
817
def _GetNodeInstances(cfg, node_name):
822
def _GetNodeInstances(cfg, node_uuid):
818 823
  """Returns a list of all primary and secondary instances on a node.
819 824

  
820 825
  """
821 826

  
822
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
827
  return _GetNodeInstancesInner(cfg, lambda inst: node_uuid in inst.all_nodes)
823 828

  
824 829

  
825 830
class LUNodeEvacuate(NoHooksLU):
......
841 846
    CheckIAllocatorOrNode(self, "iallocator", "remote_node")
842 847

  
843 848
  def ExpandNames(self):
844
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
849
    (self.op.node_uuid, self.op.node_name) = \
850
      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
845 851

  
846 852
    if self.op.remote_node is not None:
847
      self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
853
      (self.op.remote_node_uuid, self.op.remote_node) = \
854
        ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
855
                              self.op.remote_node)
848 856
      assert self.op.remote_node
849 857

  
850
      if self.op.remote_node == self.op.node_name:
858
      if self.op.node_uuid == self.op.remote_node_uuid:
851 859
        raise errors.OpPrereqError("Can not use evacuated node as a new"
852 860
                                   " secondary node", errors.ECODE_INVAL)
853 861

  
......
869 877
    self.lock_nodes = self._DetermineNodes()
870 878

  
871 879
  def _DetermineNodes(self):
872
    """Gets the list of nodes to operate on.
880
    """Gets the list of node UUIDs to operate on.
873 881

  
874 882
    """
875 883
    if self.op.remote_node is None:
876 884
      # Iallocator will choose any node(s) in the same group
877
      group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
885
      group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_uuid])
878 886
    else:
879
      group_nodes = frozenset([self.op.remote_node])
887
      group_nodes = frozenset([self.op.remote_node_uuid])
880 888

  
881 889
    # Determine nodes to be locked
882
    return set([self.op.node_name]) | group_nodes
890
    return set([self.op.node_uuid]) | group_nodes
883 891

  
884 892
  def _DetermineInstances(self):
885 893
    """Builds list of instances to operate on.
......
908 916
                                 " instances",
909 917
                                 errors.ECODE_INVAL)
910 918

  
911
    return inst_fn(self.cfg, self.op.node_name)
919
    return inst_fn(self.cfg, self.op.node_uuid)
912 920

  
913 921
  def DeclareLocks(self, level):
914 922
    if level == locking.LEVEL_INSTANCE:
......
974 982

  
975 983
    if self.op.remote_node is not None:
976 984
      for i in self.instances:
977
        if i.primary_node == self.op.remote_node:
985
        if i.primary_node == self.op.remote_node_uuid:
978 986
          raise errors.OpPrereqError("Node %s is the primary node of"
979 987
                                     " instance %s, cannot use it as"
980 988
                                     " secondary" %
......
1033 1041
    pass
1034 1042

  
1035 1043
  def ExpandNames(self):
1036
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
1044
    (self.op.node_uuid, self.op.node_name) = \
1045
      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
1037 1046

  
1038 1047
    self.share_locks = ShareAll()
1039 1048
    self.needed_locks = {
1040
      locking.LEVEL_NODE: [self.op.node_name],
1049
      locking.LEVEL_NODE: [self.op.node_uuid],
1041 1050
      }
1042 1051

  
1043 1052
  def BuildHooksEnv(self):
......
1072 1081
                                 target_node=self.op.target_node,
1073 1082
                                 allow_runtime_changes=allow_runtime_changes,
1074 1083
                                 ignore_ipolicy=self.op.ignore_ipolicy)]
1075
      for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)]
1084
      for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_uuid)]
1076 1085

  
1077 1086
    # TODO: Run iallocator in this opcode and pass correct placement options to
1078 1087
    # OpInstanceMigrate. Since other jobs can modify the cluster between
......
1104 1113
  REQ_BGL = False
1105 1114

  
1106 1115
  def CheckArguments(self):
1107
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
1116
    (self.op.node_uuid, self.op.node_name) = \
1117
      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
1108 1118

  
1109 1119
    storage_type = self.op.storage_type
1110 1120

  
......
1124 1134

  
1125 1135
  def ExpandNames(self):
1126 1136
    self.needed_locks = {
1127
      locking.LEVEL_NODE: self.op.node_name,
1137
      locking.LEVEL_NODE: self.op.node_uuid,
1128 1138
      }
1129 1139

  
1130 1140
  def Exec(self, feedback_fn):
......
1132 1142

  
1133 1143
    """
1134 1144
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
1135
    result = self.rpc.call_storage_modify(self.op.node_name,
1145
    result = self.rpc.call_storage_modify(self.op.node_uuid,
1136 1146
                                          self.op.storage_type, st_args,
1137 1147
                                          self.op.name, self.op.changes)
1138 1148
    result.Raise("Failed to modify storage unit '%s' on %s" %
......
1147 1157
    lu.share_locks = ShareAll()
1148 1158

  
1149 1159
    if self.names:
1150
      self.wanted = GetWantedNodes(lu, self.names)
1160
      (self.wanted, _) = GetWantedNodes(lu, self.names)
1151 1161
    else:
1152 1162
      self.wanted = locking.ALL_SET
1153 1163

  
......
1168 1178
    """
1169 1179
    all_info = lu.cfg.GetAllNodesInfo()
1170 1180

  
1171
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
1181
    node_uuids = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
1172 1182

  
1173 1183
    # Gather data as requested
1174 1184
    if query.NQ_LIVE in self.requested_data:
1175 1185
      # filter out non-vm_capable nodes
1176
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
1186
      toquery_node_uuids = [node.uuid for node in all_info.values()
1187
                            if node.vm_capable and node.uuid in node_uuids]
1177 1188

  
1178
      es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, toquery_nodes)
1189
      es_flags = rpc.GetExclusiveStorageForNodes(lu.cfg, toquery_node_uuids)
1179 1190
      # FIXME: This currently maps everything to lvm, this should be more
1180 1191
      # flexible
1181 1192
      vg_req = rpc.BuildVgInfoQuery(lu.cfg)
1182 1193
      default_hypervisor = lu.cfg.GetHypervisorType()
1183 1194
      hvparams = lu.cfg.GetClusterInfo().hvparams[default_hypervisor]
1184 1195
      hvspecs = [(default_hypervisor, hvparams)]
1185
      node_data = lu.rpc.call_node_info(toquery_nodes, vg_req,
1196
      node_data = lu.rpc.call_node_info(toquery_node_uuids, vg_req,
1186 1197
                                        hvspecs, es_flags)
1187
      live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload))
1188
                       for (name, nresult) in node_data.items()
1198
      live_data = dict((uuid, rpc.MakeLegacyNodeInfo(nresult.payload))
1199
                       for (uuid, nresult) in node_data.items()
1189 1200
                       if not nresult.fail_msg and nresult.payload)
1190 1201
    else:
1191 1202
      live_data = None
1192 1203

  
1193 1204
    if query.NQ_INST in self.requested_data:
1194
      node_to_primary = dict([(name, set()) for name in nodenames])
1195
      node_to_secondary = dict([(name, set()) for name in nodenames])
1205
      node_to_primary = dict([(uuid, set()) for uuid in node_uuids])
1206
      node_to_secondary = dict([(uuid, set()) for uuid in node_uuids])
1196 1207

  
1197 1208
      inst_data = lu.cfg.GetAllInstancesInfo()
1198 1209

  
......
1207 1218
      node_to_secondary = None
1208 1219

  
1209 1220
    if query.NQ_OOB in self.requested_data:
1210
      oob_support = dict((name, bool(SupportsOob(lu.cfg, node)))
1211
                         for name, node in all_info.iteritems())
1221
      oob_support = dict((uuid, bool(SupportsOob(lu.cfg, node)))
1222
                         for uuid, node in all_info.iteritems())
1212 1223
    else:
1213 1224
      oob_support = None
1214 1225

  
......
1217 1228
    else:
1218 1229
      groups = {}
1219 1230

  
1220
    return query.NodeQueryData([all_info[name] for name in nodenames],
1231
    return query.NodeQueryData([all_info[uuid] for uuid in node_uuids],
1221 1232
                               live_data, lu.cfg.GetMasterNode(),
1222 1233
                               node_to_primary, node_to_secondary, groups,
1223 1234
                               oob_support, lu.cfg.GetClusterInfo())
......
1281 1292

  
1282 1293
    if self.op.nodes:
1283 1294
      self.needed_locks = {
1284
        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes),
1295
        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes)[0],
1285 1296
        }
1286 1297
    else:
1287 1298
      self.needed_locks = {
......
1293 1304
    """Computes the list of nodes and their attributes.
1294 1305

  
1295 1306
    """
1296
    nodenames = self.owned_locks(locking.LEVEL_NODE)
1297
    volumes = self.rpc.call_node_volumes(nodenames)
1307
    node_uuids = self.owned_locks(locking.LEVEL_NODE)
1308
    volumes = self.rpc.call_node_volumes(node_uuids)
1298 1309

  
1299 1310
    ilist = self.cfg.GetAllInstancesInfo()
1300 1311
    vol2inst = MapInstanceDisksToNodes(ilist.values())
1301 1312

  
1302 1313
    output = []
1303
    for node in nodenames:
1304
      nresult = volumes[node]
1314
    for node_uuid in node_uuids:
1315
      nresult = volumes[node_uuid]
1305 1316
      if nresult.offline:
1306 1317
        continue
1307 1318
      msg = nresult.fail_msg
1308 1319
      if msg:
1309
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
1320
        self.LogWarning("Can't compute volume data on node %s: %s",
1321
                        self.cfg.GetNodeName(node_uuid), msg)
1310 1322
        continue
1311 1323

  
1312 1324
      node_vols = sorted(nresult.payload,
......
1316 1328
        node_output = []
1317 1329
        for field in self.op.output_fields:
1318 1330
          if field == "node":
1319
            val = node
1331
            val = self.cfg.GetNodeName(node_uuid)
1320 1332
          elif field == "phys":
1321 1333
            val = vol["dev"]
1322 1334
          elif field == "vg":
......
1326 1338
          elif field == "size":
1327 1339
            val = int(float(vol["size"]))
1328 1340
          elif field == "instance":
1329
            val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
1341
            val = vol2inst.get((node_uuid, vol["vg"] + "/" + vol["name"]), "-")
1330 1342
          else:
1331 1343
            raise errors.ParameterError(field)
1332 1344
          node_output.append(str(val))
......
1353 1365

  
1354 1366
    if self.op.nodes:
1355 1367
      self.needed_locks = {
1356
        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes),
1368
        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes)[0],
1357 1369
        }
1358 1370
    else:
1359 1371
      self.needed_locks = {
......
1365 1377
    """Computes the list of nodes and their attributes.
1366 1378

  
1367 1379
    """
1368
    self.nodes = self.owned_locks(locking.LEVEL_NODE)
1380
    self.node_uuids = self.owned_locks(locking.LEVEL_NODE)
1369 1381

  
1370 1382
    # Always get name to sort by
1371 1383
    if constants.SF_NAME in self.op.output_fields:
......
1382 1394
    name_idx = field_idx[constants.SF_NAME]
1383 1395

  
1384 1396
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
1385
    data = self.rpc.call_storage_list(self.nodes,
1397
    data = self.rpc.call_storage_list(self.node_uuids,
1386 1398
                                      self.op.storage_type, st_args,
1387 1399
                                      self.op.name, fields)
1388 1400

  
1389 1401
    result = []
1390 1402

  
1391
    for node in utils.NiceSort(self.nodes):
1392
      nresult = data[node]
1403
    for node_uuid in utils.NiceSort(self.node_uuids):
1404
      node_name = self.cfg.GetNodeName(node_uuid)
1405
      nresult = data[node_uuid]
1393 1406
      if nresult.offline:
1394 1407
        continue
1395 1408

  
1396 1409
      msg = nresult.fail_msg
1397 1410
      if msg:
1398
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
1411
        self.LogWarning("Can't get storage data from node %s: %s",
1412
                        node_name, msg)
1399 1413
        continue
1400 1414

  
1401 1415
      rows = dict([(row[name_idx], row) for row in nresult.payload])
......
1407 1421

  
1408 1422
        for field in self.op.output_fields:
1409 1423
          if field == constants.SF_NODE:
1410
            val = node
1424
            val = node_name
1411 1425
          elif field == constants.SF_TYPE:
1412 1426
            val = self.op.storage_type
1413 1427
          elif field in field_idx:
......
1447 1461
    """
1448 1462
    all_nodes = self.cfg.GetNodeList()
1449 1463
    try:
1450
      all_nodes.remove(self.op.node_name)
1464
      all_nodes.remove(self.op.node_uuid)
1451 1465
    except ValueError:
1452 1466
      pass
1453 1467
    return (all_nodes, all_nodes)
......
1463 1477
    Any errors are signaled by raising errors.OpPrereqError.
1464 1478

  
1465 1479
    """
1466
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
1467
    node = self.cfg.GetNodeInfo(self.op.node_name)
1480
    (self.op.node_uuid, self.op.node_name) = \
1481
      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
1482
    node = self.cfg.GetNodeInfo(self.op.node_uuid)
1468 1483
    assert node is not None
1469 1484

  
1470 1485
    masternode = self.cfg.GetMasterNode()
1471
    if node.name == masternode:
1486
    if node.uuid == masternode:
1472 1487
      raise errors.OpPrereqError("Node is the master node, failover to another"
1473 1488
                                 " node is required", errors.ECODE_INVAL)
1474 1489

  
1475 1490
    for instance_name, instance in self.cfg.GetAllInstancesInfo().items():
1476
      if node.name in instance.all_nodes:
1491
      if node.uuid in instance.all_nodes:
1477 1492
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1478 1493
                                   " please remove first" % instance_name,
1479 1494
                                   errors.ECODE_INVAL)
......
1494 1509
      "Not owning BGL"
1495 1510

  
1496 1511
    # Promote nodes to master candidate as needed
1497
    AdjustCandidatePool(self, exceptions=[node.name])
1498
    self.context.RemoveNode(node.name)
1512
    AdjustCandidatePool(self, exceptions=[node.uuid])
1513
    self.context.RemoveNode(node)
1499 1514

  
1500 1515
    # Run post hooks on the node before it's removed
1501 1516
    RunPostHook(self, node.name)
1502 1517

  
1518
    # we have to call this by name rather than by UUID, as the node is no longer
1519
    # in the config
1503 1520
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
1504 1521
    msg = result.fail_msg
1505 1522
    if msg:
......
1508 1525

  
1509 1526
    # Remove node from our /etc/hosts
1510 1527
    if self.cfg.GetClusterInfo().modify_etc_hosts:
1511
      master_node = self.cfg.GetMasterNode()
1512
      result = self.rpc.call_etc_hosts_modify(master_node,
1528
      master_node_uuid = self.cfg.GetMasterNode()
1529
      result = self.rpc.call_etc_hosts_modify(master_node_uuid,
1513 1530
                                              constants.ETC_HOSTS_REMOVE,
1514 1531
                                              node.name, None)
1515 1532
      result.Raise("Can't update hosts file with new host data")
......
1523 1540
  REQ_BGL = False
1524 1541

  
1525 1542
  def CheckArguments(self):
1526
    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
1543
    (self.op.node_uuid, self.op.node_name) = \
1544
      ExpandNodeUuidAndName(self.cfg, self.op.node_uuid, self.op.node_name)
1527 1545

  
1528 1546
    storage_type = self.op.storage_type
1529 1547

  
......
1535 1553

  
1536 1554
  def ExpandNames(self):
1537 1555
    self.needed_locks = {
1538
      locking.LEVEL_NODE: [self.op.node_name],
1556
      locking.LEVEL_NODE: [self.op.node_uuid],
1539 1557
      }
1540 1558

  
1541
  def _CheckFaultyDisks(self, instance, node_name):
1559
  def _CheckFaultyDisks(self, instance, node_uuid):
1542 1560
    """Ensure faulty disks abort the opcode or at least warn."""
1543 1561
    try:
1544 1562
      if FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
1545
                                 node_name, True):
1563
                                 node_uuid, True):
1546 1564
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
1547
                                   " node '%s'" % (instance.name, node_name),
1565
                                   " node '%s'" %
1566
                                   (instance.name,
1567
                                    self.cfg.GetNodeName(node_uuid)),
1548 1568
                                   errors.ECODE_STATE)
1549 1569
    except errors.OpPrereqError, err:
1550 1570
      if self.op.ignore_consistency:
......
1557 1577

  
1558 1578
    """
1559 1579
    # Check whether any instance on this node has faulty disks
1560
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
1580
    for inst in _GetNodeInstances(self.cfg, self.op.node_uuid):
1561 1581
      if not inst.disks_active:
1562 1582
        continue
1563 1583
      check_nodes = set(inst.all_nodes)
1564
      check_nodes.discard(self.op.node_name)
1565
      for inst_node_name in check_nodes:
1566
        self._CheckFaultyDisks(inst, inst_node_name)
1584
      check_nodes.discard(self.op.node_uuid)
1585
      for inst_node_uuid in check_nodes:
1586
        self._CheckFaultyDisks(inst, inst_node_uuid)
1567 1587

  
1568 1588
  def Exec(self, feedback_fn):
1569 1589
    feedback_fn("Repairing storage unit '%s' on %s ..." %
1570 1590
                (self.op.name, self.op.node_name))
1571 1591

  
1572 1592
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
1573
    result = self.rpc.call_storage_execute(self.op.node_name,
1593
    result = self.rpc.call_storage_execute(self.op.node_uuid,
1574 1594
                                           self.op.storage_type, st_args,
1575 1595
                                           self.op.name,
1576 1596
                                           constants.SO_FIX_CONSISTENCY)

Also available in: Unified diff