Revision 1c3231aa lib/cmdlib/misc.py

b/lib/cmdlib/misc.py
47 47

  
48 48
    """
49 49
    if self.op.node_names:
50
      self.op.node_names = GetWantedNodes(self, self.op.node_names)
51
      lock_names = self.op.node_names
50
      (self.op.node_uuids, self.op.node_names) = \
51
        GetWantedNodes(self, self.op.node_names)
52
      lock_node_uuids = self.op.node_uuids
52 53
    else:
53
      lock_names = locking.ALL_SET
54
      lock_node_uuids = locking.ALL_SET
54 55

  
55 56
    self.needed_locks = {
56
      locking.LEVEL_NODE: lock_names,
57
      locking.LEVEL_NODE: lock_node_uuids,
57 58
      }
58 59

  
59 60
    self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
......
73 74

  
74 75
    """
75 76
    self.nodes = []
76
    self.master_node = self.cfg.GetMasterNode()
77
    self.master_node_uuid = self.cfg.GetMasterNode()
78
    master_node_obj = self.cfg.GetNodeInfo(self.master_node_uuid)
77 79

  
78 80
    assert self.op.power_delay >= 0.0
79 81

  
80
    if self.op.node_names:
82
    if self.op.node_uuids:
81 83
      if (self.op.command in self._SKIP_MASTER and
82
          self.master_node in self.op.node_names):
83
        master_node_obj = self.cfg.GetNodeInfo(self.master_node)
84
          master_node_obj.uuid in self.op.node_uuids):
84 85
        master_oob_handler = SupportsOob(self.cfg, master_node_obj)
85 86

  
86 87
        if master_oob_handler:
87 88
          additional_text = ("run '%s %s %s' if you want to operate on the"
88 89
                             " master regardless") % (master_oob_handler,
89 90
                                                      self.op.command,
90
                                                      self.master_node)
91
                                                      master_node_obj.name)
91 92
        else:
92 93
          additional_text = "it does not support out-of-band operations"
93 94

  
94 95
        raise errors.OpPrereqError(("Operating on the master node %s is not"
95 96
                                    " allowed for %s; %s") %
96
                                   (self.master_node, self.op.command,
97
                                   (master_node_obj.name, self.op.command,
97 98
                                    additional_text), errors.ECODE_INVAL)
98 99
    else:
99
      self.op.node_names = self.cfg.GetNodeList()
100
      self.op.node_uuids = self.cfg.GetNodeList()
100 101
      if self.op.command in self._SKIP_MASTER:
101
        self.op.node_names.remove(self.master_node)
102
        self.op.node_uuids.remove(master_node_obj.uuid)
102 103

  
103 104
    if self.op.command in self._SKIP_MASTER:
104
      assert self.master_node not in self.op.node_names
105
      assert master_node_obj.uuid not in self.op.node_uuids
105 106

  
106
    for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
107
    for node_uuid in self.op.node_uuids:
108
      node = self.cfg.GetNodeInfo(node_uuid)
107 109
      if node is None:
108
        raise errors.OpPrereqError("Node %s not found" % node_name,
110
        raise errors.OpPrereqError("Node %s not found" % node_uuid,
109 111
                                   errors.ECODE_NOENT)
110
      else:
111
        self.nodes.append(node)
112

  
113
      self.nodes.append(node)
112 114

  
113 115
      if (not self.op.ignore_status and
114 116
          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
115 117
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
116
                                    " not marked offline") % node_name,
118
                                    " not marked offline") % node.name,
117 119
                                   errors.ECODE_STATE)
118 120

  
119 121
  def Exec(self, feedback_fn):
120 122
    """Execute OOB and return result if we expect any.
121 123

  
122 124
    """
123
    master_node = self.master_node
124 125
    ret = []
125 126

  
126 127
    for idx, node in enumerate(utils.NiceSort(self.nodes,
......
136 137

  
137 138
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
138 139
                   self.op.command, oob_program, node.name)
139
      result = self.rpc.call_run_oob(master_node, oob_program,
140
      result = self.rpc.call_run_oob(self.master_node_uuid, oob_program,
140 141
                                     self.op.command, node.name,
141 142
                                     self.op.timeout)
142 143

  
......
234 235

  
235 236
    # The following variables interact with _QueryBase._GetNames
236 237
    if self.names:
237
      self.wanted = self.names
238
      self.wanted = [lu.cfg.GetNodeInfoByName(name).uuid for name in self.names]
238 239
    else:
239 240
      self.wanted = locking.ALL_SET
240 241

  
......
247 248
  def _DiagnoseByProvider(rlist):
248 249
    """Remaps a per-node return list into an a per-provider per-node dictionary
249 250

  
250
    @param rlist: a map with node names as keys and ExtStorage objects as values
251
    @param rlist: a map with node uuids as keys and ExtStorage objects as values
251 252

  
252 253
    @rtype: dict
253 254
    @return: a dictionary with extstorage providers as keys and as
254
        value another map, with nodes as keys and tuples of
255
        value another map, with node uuids as keys and tuples of
255 256
        (path, status, diagnose, parameters) as values, eg::
256 257

  
257
          {"provider1": {"node1": [(/usr/lib/..., True, "", [])]
258
                         "node2": [(/srv/..., False, "missing file")]
259
                         "node3": [(/srv/..., True, "", [])]
258
          {"provider1": {"node_uuid1": [(/usr/lib/..., True, "", [])]
259
                         "node_uuid2": [(/srv/..., False, "missing file")]
260
                         "node_uuid3": [(/srv/..., True, "", [])]
260 261
          }
261 262

  
262 263
    """
......
264 265
    # we build here the list of nodes that didn't fail the RPC (at RPC
265 266
    # level), so that nodes with a non-responding node daemon don't
266 267
    # make all OSes invalid
267
    good_nodes = [node_name for node_name in rlist
268
                  if not rlist[node_name].fail_msg]
269
    for node_name, nr in rlist.items():
268
    good_nodes = [node_uuid for node_uuid in rlist
269
                  if not rlist[node_uuid].fail_msg]
270
    for node_uuid, nr in rlist.items():
270 271
      if nr.fail_msg or not nr.payload:
271 272
        continue
272 273
      for (name, path, status, diagnose, params) in nr.payload:
......
274 275
          # build a list of nodes for this os containing empty lists
275 276
          # for each node in node_list
276 277
          all_es[name] = {}
277
          for nname in good_nodes:
278
            all_es[name][nname] = []
278
          for nuuid in good_nodes:
279
            all_es[name][nuuid] = []
279 280
        # convert params from [name, help] to (name, help)
280 281
        params = [tuple(v) for v in params]
281
        all_es[name][node_name].append((path, status, diagnose, params))
282
        all_es[name][node_uuid].append((path, status, diagnose, params))
282 283
    return all_es
283 284

  
284 285
  def _GetQueryData(self, lu):
......
291 292
                           if level != locking.LEVEL_CLUSTER) or
292 293
                self.do_locking or self.use_locking)
293 294

  
294
    valid_nodes = [node.name
295
    valid_nodes = [node.uuid
295 296
                   for node in lu.cfg.GetAllNodesInfo().values()
296 297
                   if not node.offline and node.vm_capable]
297 298
    pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
......
382 383

  
383 384
  def ExpandNames(self):
384 385
    if self.op.nodes:
385
      self.op.nodes = GetWantedNodes(self, self.op.nodes)
386
      (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
386 387

  
387 388
    self.needed_locks = {
388
      locking.LEVEL_NODE: self.op.nodes,
389
      locking.LEVEL_NODE: self.op.node_uuids,
389 390
      }
390 391
    self.share_locks = {
391 392
      locking.LEVEL_NODE: not self.op.use_locking,
......
403 404
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
404 405

  
405 406
    # Check if correct locks are held
406
    assert set(self.op.nodes).issubset(owned_nodes)
407
    assert set(self.op.node_uuids).issubset(owned_nodes)
407 408

  
408
    rpcres = self.rpc.call_restricted_command(self.op.nodes, self.op.command)
409
    rpcres = self.rpc.call_restricted_command(self.op.node_uuids,
410
                                              self.op.command)
409 411

  
410 412
    result = []
411 413

  
412
    for node_name in self.op.nodes:
413
      nres = rpcres[node_name]
414
    for node_uuid in self.op.node_uuids:
415
      nres = rpcres[node_uuid]
414 416
      if nres.fail_msg:
415 417
        msg = ("Command '%s' on node '%s' failed: %s" %
416
               (self.op.command, node_name, nres.fail_msg))
418
               (self.op.command, self.cfg.GetNodeName(node_uuid),
419
                nres.fail_msg))
417 420
        result.append((False, msg))
418 421
      else:
419 422
        result.append((True, nres.payload))

Also available in: Unified diff