Revision 78519c10

b/lib/backend.py
522 522
  raise errors.QuitGanetiException(True, "Shutdown scheduled")
523 523

  
524 524

  
525
def GetNodeInfo(vgname, hypervisor_type):
525
def _GetVgInfo(name):
526
  """Retrieves information about a LVM volume group.
527

  
528
  """
529
  # TODO: GetVGInfo supports returning information for multiple VGs at once
530
  vginfo = bdev.LogicalVolume.GetVGInfo([name])
531
  if vginfo:
532
    vg_free = int(round(vginfo[0][0], 0))
533
    vg_size = int(round(vginfo[0][1], 0))
534
  else:
535
    vg_free = None
536
    vg_size = None
537

  
538
  return {
539
    "name": name,
540
    "free": vg_free,
541
    "size": vg_size,
542
    }
543

  
544

  
545
def _GetHvInfo(name):
546
  """Retrieves node information from a hypervisor.
547

  
548
  The information returned depends on the hypervisor. Common items:
549

  
550
    - vg_size is the size of the configured volume group in MiB
551
    - vg_free is the free size of the volume group in MiB
552
    - memory_dom0 is the memory allocated for domain0 in MiB
553
    - memory_free is the currently available (free) ram in MiB
554
    - memory_total is the total number of ram in MiB
555
    - hv_version: the hypervisor version, if available
556

  
557
  """
558
  return hypervisor.GetHypervisor(name).GetNodeInfo()
559

  
560

  
561
def _GetNamedNodeInfo(names, fn):
562
  """Calls C{fn} for all names in C{names} and returns a dictionary.
563

  
564
  @rtype: None or dict
565

  
566
  """
567
  if names is None:
568
    return None
569
  else:
570
    return dict((name, fn(name)) for name in names)
571

  
572

  
573
def GetNodeInfo(vg_names, hv_names):
526 574
  """Gives back a hash with different information about the node.
527 575

  
528
  @type vgname: C{string}
529
  @param vgname: the name of the volume group to ask for disk space information
530
  @type hypervisor_type: C{str}
531
  @param hypervisor_type: the name of the hypervisor to ask for
532
      memory information
533
  @rtype: C{dict}
534
  @return: dictionary with the following keys:
535
      - vg_size is the size of the configured volume group in MiB
536
      - vg_free is the free size of the volume group in MiB
537
      - memory_dom0 is the memory allocated for domain0 in MiB
538
      - memory_free is the currently available (free) ram in MiB
539
      - memory_total is the total number of ram in MiB
540
      - hv_version: the hypervisor version, if available
541

  
542
  """
543
  outputarray = {}
544

  
545
  if vgname is not None:
546
    vginfo = bdev.LogicalVolume.GetVGInfo([vgname])
547
    vg_free = vg_size = None
548
    if vginfo:
549
      vg_free = int(round(vginfo[0][0], 0))
550
      vg_size = int(round(vginfo[0][1], 0))
551
    outputarray["vg_size"] = vg_size
552
    outputarray["vg_free"] = vg_free
553

  
554
  if hypervisor_type is not None:
555
    hyper = hypervisor.GetHypervisor(hypervisor_type)
556
    hyp_info = hyper.GetNodeInfo()
557
    if hyp_info is not None:
558
      outputarray.update(hyp_info)
559

  
560
  outputarray["bootid"] = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
561

  
562
  return outputarray
576
  @type vg_names: list of string
577
  @param vg_names: Names of the volume groups to ask for disk space information
578
  @type hv_names: list of string
579
  @param hv_names: Names of the hypervisors to ask for node information
580
  @rtype: tuple; (string, None/dict, None/dict)
581
  @return: Tuple containing boot ID, volume group information and hypervisor
582
    information
583

  
584
  """
585
  bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
586
  vg_info = _GetNamedNodeInfo(vg_names, _GetVgInfo)
587
  hv_info = _GetNamedNodeInfo(hv_names, _GetHvInfo)
588

  
589
  return (bootid, vg_info, hv_info)
563 590

  
564 591

  
565 592
def VerifyNode(what, cluster_name):
b/lib/cmdlib.py
573 573
  return dict.fromkeys(locking.LEVELS, 1)
574 574

  
575 575

  
576
def _MakeLegacyNodeInfo(data):
577
  """Formats the data returned by L{rpc.RpcRunner.call_node_info}.
578

  
579
  Converts the data into a single dictionary. This is fine for most use cases,
580
  but some require information from more than one volume group or hypervisor.
581

  
582
  """
583
  (bootid, (vg_info, ), (hv_info, )) = data
584

  
585
  return utils.JoinDisjointDicts(utils.JoinDisjointDicts(vg_info, hv_info), {
586
    "bootid": bootid,
587
    })
588

  
589

  
576 590
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
577 591
  """Checks if the owned node groups are still correct for an instance.
578 592

  
......
4591 4605
      # filter out non-vm_capable nodes
4592 4606
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
4593 4607

  
4594
      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
4595
                                        lu.cfg.GetHypervisorType())
4596
      live_data = dict((name, nresult.payload)
4608
      node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
4609
                                        [lu.cfg.GetHypervisorType()])
4610
      live_data = dict((name, _MakeLegacyNodeInfo(nresult.payload))
4597 4611
                       for (name, nresult) in node_data.items()
4598 4612
                       if not nresult.fail_msg and nresult.payload)
4599 4613
    else:
......
6012 6026
      we cannot check the node
6013 6027

  
6014 6028
  """
6015
  nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
6029
  nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
6016 6030
  nodeinfo[node].Raise("Can't get data from node %s" % node,
6017 6031
                       prereq=True, ecode=errors.ECODE_ENVIRON)
6018
  free_mem = nodeinfo[node].payload.get("memory_free", None)
6032
  (_, _, (hv_info, )) = nodeinfo[node].payload
6033

  
6034
  free_mem = hv_info.get("memory_free", None)
6019 6035
  if not isinstance(free_mem, int):
6020 6036
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
6021 6037
                               " was '%s'" % (node, free_mem),
......
6070 6086
      or we cannot check the node
6071 6087

  
6072 6088
  """
6073
  nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
6089
  nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
6074 6090
  for node in nodenames:
6075 6091
    info = nodeinfo[node]
6076 6092
    info.Raise("Cannot get current information from node %s" % node,
6077 6093
               prereq=True, ecode=errors.ECODE_ENVIRON)
6078
    vg_free = info.payload.get("vg_free", None)
6094
    (_, (vg_info, ), _) = info.payload
6095
    vg_free = vg_info.get("vg_free", None)
6079 6096
    if not isinstance(vg_free, int):
6080 6097
      raise errors.OpPrereqError("Can't compute free disk space on node"
6081 6098
                                 " %s for vg %s, result was '%s'" %
......
6105 6122
      or we cannot check the node
6106 6123

  
6107 6124
  """
6108
  nodeinfo = lu.rpc.call_node_info(nodenames, None, hypervisor_name)
6125
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
6109 6126
  for node in nodenames:
6110 6127
    info = nodeinfo[node]
6111 6128
    info.Raise("Cannot get current information from node %s" % node,
6112 6129
               prereq=True, ecode=errors.ECODE_ENVIRON)
6113
    num_cpus = info.payload.get("cpu_total", None)
6130
    (_, _, (hv_info, )) = info.payload
6131
    num_cpus = hv_info.get("cpu_total", None)
6114 6132
    if not isinstance(num_cpus, int):
6115 6133
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
6116 6134
                                 " on node %s, result was '%s'" %
......
7678 7696

  
7679 7697
    # Check for hypervisor version mismatch and warn the user.
7680 7698
    nodeinfo = self.rpc.call_node_info([source_node, target_node],
7681
                                       None, self.instance.hypervisor)
7682
    src_info = nodeinfo[source_node]
7683
    dst_info = nodeinfo[target_node]
7684

  
7685
    if ((constants.HV_NODEINFO_KEY_VERSION in src_info.payload) and
7686
        (constants.HV_NODEINFO_KEY_VERSION in dst_info.payload)):
7687
      src_version = src_info.payload[constants.HV_NODEINFO_KEY_VERSION]
7688
      dst_version = dst_info.payload[constants.HV_NODEINFO_KEY_VERSION]
7699
                                       None, [self.instance.hypervisor])
7700
    for ninfo in nodeinfo.items():
7701
      ninfo.Raise("Unable to retrieve node information from node '%s'" %
7702
                  ninfo.node)
7703
    (_, _, (src_info, )) = nodeinfo[source_node].payload
7704
    (_, _, (dst_info, )) = nodeinfo[target_node].payload
7705

  
7706
    if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
7707
        (constants.HV_NODEINFO_KEY_VERSION in dst_info)):
7708
      src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
7709
      dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
7689 7710
      if src_version != dst_version:
7690 7711
        self.feedback_fn("* warning: hypervisor version mismatch between"
7691 7712
                         " source (%s) and target (%s) node" %
......
11377 11398
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
11378 11399
                                                  instance.hypervisor)
11379 11400
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
11380
                                         instance.hypervisor)
11401
                                         [instance.hypervisor])
11381 11402
      pninfo = nodeinfo[pnode]
11382 11403
      msg = pninfo.fail_msg
11383 11404
      if msg:
11384 11405
        # Assume the primary node is unreachable and go ahead
11385 11406
        self.warn.append("Can't get info from primary node %s: %s" %
11386 11407
                         (pnode, msg))
11387
      elif not isinstance(pninfo.payload.get("memory_free", None), int):
11388
        self.warn.append("Node data from primary node %s doesn't contain"
11389
                         " free memory information" % pnode)
11390
      elif instance_info.fail_msg:
11391
        self.warn.append("Can't get instance runtime information: %s" %
11392
                        instance_info.fail_msg)
11393 11408
      else:
11394
        if instance_info.payload:
11395
          current_mem = int(instance_info.payload["memory"])
11409
        (_, _, (pnhvinfo, )) = pninfo.payload
11410
        if not isinstance(pnhvinfo.get("memory_free", None), int):
11411
          self.warn.append("Node data from primary node %s doesn't contain"
11412
                           " free memory information" % pnode)
11413
        elif instance_info.fail_msg:
11414
          self.warn.append("Can't get instance runtime information: %s" %
11415
                          instance_info.fail_msg)
11396 11416
        else:
11397
          # Assume instance not running
11398
          # (there is a slight race condition here, but it's not very probable,
11399
          # and we have no other way to check)
11400
          current_mem = 0
11401
        #TODO(dynmem): do the appropriate check involving MINMEM
11402
        miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
11403
                    pninfo.payload["memory_free"])
11404
        if miss_mem > 0:
11405
          raise errors.OpPrereqError("This change will prevent the instance"
11406
                                     " from starting, due to %d MB of memory"
11407
                                     " missing on its primary node" % miss_mem,
11408
                                     errors.ECODE_NORES)
11417
          if instance_info.payload:
11418
            current_mem = int(instance_info.payload["memory"])
11419
          else:
11420
            # Assume instance not running
11421
            # (there is a slight race condition here, but it's not very
11422
            # probable, and we have no other way to check)
11423
            # TODO: Describe race condition
11424
            current_mem = 0
11425
          #TODO(dynmem): do the appropriate check involving MINMEM
11426
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
11427
                      pninfo.payload["memory_free"])
11428
          if miss_mem > 0:
11429
            raise errors.OpPrereqError("This change will prevent the instance"
11430
                                       " from starting, due to %d MB of memory"
11431
                                       " missing on its primary node" %
11432
                                       miss_mem,
11433
                                       errors.ECODE_NORES)
11409 11434

  
11410 11435
      if be_new[constants.BE_AUTO_BALANCE]:
11411 11436
        for node, nres in nodeinfo.items():
......
11413 11438
            continue
11414 11439
          nres.Raise("Can't get info from secondary node %s" % node,
11415 11440
                     prereq=True, ecode=errors.ECODE_STATE)
11416
          if not isinstance(nres.payload.get("memory_free", None), int):
11441
          (_, _, (nhvinfo, )) = nres.payload
11442
          if not isinstance(nhvinfo.get("memory_free", None), int):
11417 11443
            raise errors.OpPrereqError("Secondary node %s didn't return free"
11418 11444
                                       " memory information" % node,
11419 11445
                                       errors.ECODE_STATE)
11420 11446
          #TODO(dynmem): do the appropriate check involving MINMEM
11421
          elif be_new[constants.BE_MAXMEM] > nres.payload["memory_free"]:
11447
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
11422 11448
            raise errors.OpPrereqError("This change will prevent the instance"
11423 11449
                                       " from failover to its secondary node"
11424 11450
                                       " %s, due to not enough memory" % node,
......
13491 13517
    else:
13492 13518
      hypervisor_name = cluster_info.enabled_hypervisors[0]
13493 13519

  
13494
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
13495
                                        hypervisor_name)
13520
    node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()],
13521
                                        [hypervisor_name])
13496 13522
    node_iinfo = \
13497 13523
      self.rpc.call_all_instances_info(node_list,
13498 13524
                                       cluster_info.enabled_hypervisors)
......
13565 13591
        nresult.Raise("Can't get data for node %s" % nname)
13566 13592
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
13567 13593
                                nname)
13568
        remote_info = nresult.payload
13594
        remote_info = _MakeLegacyNodeInfo(nresult.payload)
13569 13595

  
13570 13596
        for attr in ["memory_total", "memory_free", "memory_dom0",
13571 13597
                     "vg_size", "vg_free", "cpu_total"]:
b/lib/rpc_defs.py
412 412
    ("address", None, "IP address"),
413 413
    ], None, "Checks if a node has the given IP address"),
414 414
  ("node_info", MULTI, TMO_URGENT, [
415
    ("vg_name", None,
416
     "Name of the volume group to ask for disk space information"),
417
    ("hypervisor_type", None,
418
     "Name of the hypervisor to ask for memory information"),
415
    ("vg_names", None,
416
     "Names of the volume groups to ask for disk space information"),
417
    ("hv_names", None,
418
     "Names of the hypervisors to ask for node information"),
419 419
    ], None, "Return node information"),
420 420
  ("node_verify", MULTI, TMO_NORMAL, [
421 421
    ("checkdict", None, None),
b/lib/server/noded.py
666 666
    """Query node information.
667 667

  
668 668
    """
669
    vgname, hypervisor_type = params
670
    return backend.GetNodeInfo(vgname, hypervisor_type)
669
    (vg_names, hv_names) = params
670
    return backend.GetNodeInfo(vg_names, hv_names)
671 671

  
672 672
  @staticmethod
673 673
  def perspective_etc_hosts_modify(params):

Also available in: Unified diff