Revision 1c3231aa lib/cmdlib/instance_query.py

b/lib/cmdlib/instance_query.py
104 104

  
105 105
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
106 106

  
107
    instance_list = [all_info[name] for name in instance_names]
108
    nodes = frozenset(itertools.chain(*(inst.all_nodes
109
                                        for inst in instance_list)))
107
    instance_list = [all_info[node] for node in instance_names]
108
    node_uuids = frozenset(itertools.chain(*(inst.all_nodes
109
                                             for inst in instance_list)))
110 110
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
111
    bad_nodes = []
112
    offline_nodes = []
111
    bad_node_uuids = []
112
    offline_node_uuids = []
113 113
    wrongnode_inst = set()
114 114

  
115 115
    # Gather data as requested
116 116
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
117 117
      live_data = {}
118
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list,
118
      node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list,
119 119
                                                 cluster.hvparams)
120
      for name in nodes:
121
        result = node_data[name]
120
      for node_uuid in node_uuids:
121
        result = node_data[node_uuid]
122 122
        if result.offline:
123 123
          # offline nodes will be in both lists
124 124
          assert result.fail_msg
125
          offline_nodes.append(name)
125
          offline_node_uuids.append(node_uuid)
126 126
        if result.fail_msg:
127
          bad_nodes.append(name)
127
          bad_node_uuids.append(node_uuid)
128 128
        elif result.payload:
129 129
          for inst in result.payload:
130 130
            if inst in all_info:
131
              if all_info[inst].primary_node == name:
131
              if all_info[inst].primary_node == node_uuid:
132 132
                live_data.update(result.payload)
133 133
              else:
134 134
                wrongnode_inst.add(inst)
......
136 136
              # orphan instance; we don't list it here as we don't
137 137
              # handle this case yet in the output of instance listing
138 138
              logging.warning("Orphan instance '%s' found on node %s",
139
                              inst, name)
139
                              inst, lu.cfg.GetNodeName(node_uuid))
140 140
              # else no instance is alive
141 141
    else:
142 142
      live_data = {}
......
156 156
      for inst in instance_list:
157 157
        if inst.name in live_data:
158 158
          # Instance is running
159
          consinfo[inst.name] = GetInstanceConsole(cluster, inst)
159
          consinfo[inst.name] = \
160
            GetInstanceConsole(cluster, inst,
161
                               lu.cfg.GetNodeInfo(inst.primary_node))
160 162
        else:
161 163
          consinfo[inst.name] = None
162 164
      assert set(consinfo.keys()) == set(instance_names)
......
164 166
      consinfo = None
165 167

  
166 168
    if query.IQ_NODES in self.requested_data:
167
      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
168
                                            instance_list)))
169
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
169
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids))
170 170
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
171 171
                    for uuid in set(map(operator.attrgetter("group"),
172 172
                                        nodes.values())))
......
182 182
      networks = None
183 183

  
184 184
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
185
                                   disk_usage, offline_nodes, bad_nodes,
186
                                   live_data, wrongnode_inst, consinfo,
187
                                   nodes, groups, networks)
185
                                   disk_usage, offline_node_uuids,
186
                                   bad_node_uuids, live_data, wrongnode_inst,
187
                                   consinfo, nodes, groups, networks)
188 188

  
189 189

  
190 190
class LUInstanceQuery(NoHooksLU):
......
273 273
    """
274 274
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
275 275
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
276
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
276
    owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
277 277
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
278 278

  
279 279
    if self.wanted_names is None:
......
283 283
    instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
284 284

  
285 285
    if self.op.use_locking:
286
      CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
287
                               None)
286
      CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
287
                               owned_node_uuids, None)
288 288
    else:
289 289
      assert not (owned_instances or owned_groups or
290
                  owned_nodes or owned_networks)
290
                  owned_node_uuids or owned_networks)
291 291

  
292 292
    self.wanted_instances = instances.values()
293 293

  
294
  def _ComputeBlockdevStatus(self, node, instance, dev):
294
  def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
295 295
    """Returns the status of a block device
296 296

  
297 297
    """
298
    if self.op.static or not node:
298
    if self.op.static or not node_uuid:
299 299
      return None
300 300

  
301
    self.cfg.SetDiskID(dev, node)
301
    self.cfg.SetDiskID(dev, node_uuid)
302 302

  
303
    result = self.rpc.call_blockdev_find(node, dev)
303
    result = self.rpc.call_blockdev_find(node_uuid, dev)
304 304
    if result.offline:
305 305
      return None
306 306

  
......
314 314
            status.sync_percent, status.estimated_time,
315 315
            status.is_degraded, status.ldisk_status)
316 316

  
317
  def _ComputeDiskStatus(self, instance, snode, dev):
317
  def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
318 318
    """Compute block device status.
319 319

  
320 320
    """
321 321
    (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
322 322

  
323
    return self._ComputeDiskStatusInner(instance, snode, anno_dev)
323
    return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
324
                                        anno_dev)
324 325

  
325
  def _ComputeDiskStatusInner(self, instance, snode, dev):
326
  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
327
                              dev):
326 328
    """Compute block device status.
327 329

  
328 330
    @attention: The device has to be annotated already.
329 331

  
330 332
    """
333
    drbd_info = None
331 334
    if dev.dev_type in constants.LDS_DRBD:
332 335
      # we change the snode then (otherwise we use the one passed in)
333 336
      if dev.logical_id[0] == instance.primary_node:
334
        snode = dev.logical_id[1]
337
        snode_uuid = dev.logical_id[1]
335 338
      else:
336
        snode = dev.logical_id[0]
339
        snode_uuid = dev.logical_id[0]
340
      drbd_info = {
341
        "primary_node": node_uuid2name_fn(instance.primary_node),
342
        "primary_minor": dev.logical_id[3],
343
        "secondary_node": node_uuid2name_fn(snode_uuid),
344
        "secondary_minor": dev.logical_id[4],
345
        "port": dev.logical_id[2],
346
        "secret": dev.logical_id[5],
347
      }
337 348

  
338 349
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
339 350
                                              instance, dev)
340
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
351
    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
341 352

  
342 353
    if dev.children:
343 354
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
344
                                        instance, snode),
355
                                        instance, snode_uuid,
356
                                        node_uuid2name_fn),
345 357
                         dev.children)
346 358
    else:
347 359
      dev_children = []
......
350 362
      "iv_name": dev.iv_name,
351 363
      "dev_type": dev.dev_type,
352 364
      "logical_id": dev.logical_id,
365
      "drbd_info": drbd_info,
353 366
      "physical_id": dev.physical_id,
354 367
      "pstatus": dev_pstatus,
355 368
      "sstatus": dev_sstatus,
......
367 380

  
368 381
    cluster = self.cfg.GetClusterInfo()
369 382

  
370
    node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
371
    nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
383
    node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
384
    nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
372 385

  
373 386
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
374 387
                                                 for node in nodes.values()))
375 388

  
376
    group2name_fn = lambda uuid: groups[uuid].name
377 389
    for instance in self.wanted_instances:
378 390
      pnode = nodes[instance.primary_node]
379 391

  
......
387 399
        remote_info = self.rpc.call_instance_info(
388 400
            instance.primary_node, instance.name, instance.hypervisor,
389 401
            cluster.hvparams[instance.hypervisor])
390
        remote_info.Raise("Error checking node %s" % instance.primary_node)
402
        remote_info.Raise("Error checking node %s" % pnode.name)
391 403
        remote_info = remote_info.payload
392 404
        if remote_info and "state" in remote_info:
393 405
          remote_state = "up"
......
397 409
          else:
398 410
            remote_state = instance.admin_state
399 411

  
400
      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
412
      group2name_fn = lambda uuid: groups[uuid].name
413
      node_uuid2name_fn = lambda uuid: nodes[uuid].name
414

  
415
      disks = map(compat.partial(self._ComputeDiskStatus, instance,
416
                                 node_uuid2name_fn),
401 417
                  instance.disks)
402 418

  
403
      snodes_group_uuids = [nodes[snode_name].group
404
                            for snode_name in instance.secondary_nodes]
419
      snodes_group_uuids = [nodes[snode_uuid].group
420
                            for snode_uuid in instance.secondary_nodes]
405 421

  
406 422
      result[instance.name] = {
407 423
        "name": instance.name,
408 424
        "config_state": instance.admin_state,
409 425
        "run_state": remote_state,
410
        "pnode": instance.primary_node,
426
        "pnode": pnode.name,
411 427
        "pnode_group_uuid": pnode.group,
412 428
        "pnode_group_name": group2name_fn(pnode.group),
413
        "snodes": instance.secondary_nodes,
429
        "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
414 430
        "snodes_group_uuids": snodes_group_uuids,
415 431
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
416 432
        "os": instance.os,

Also available in: Unified diff