Revision e69d05fd lib/cmdlib.py

b/lib/cmdlib.py
586 586
                          (node, node_result['node-net-test'][node]))
587 587

  
588 588
    hyp_result = node_result.get('hypervisor', None)
589
    if hyp_result is not None:
590
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
589
    if isinstance(hyp_result, dict):
590
      for hv_name, hv_result in hyp_result.iteritems():
591
        if hv_result is not None:
592
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
593
                      (hv_name, hv_result))
591 594
    return bad
592 595

  
593 596
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
......
721 724
      feedback_fn("  - ERROR: %s" % msg)
722 725

  
723 726
    vg_name = self.cfg.GetVGName()
727
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
724 728
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
725 729
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
726 730
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
......
739 743

  
740 744
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
741 745
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
742
    all_instanceinfo = rpc.call_instance_list(nodelist)
746
    all_instanceinfo = rpc.call_instance_list(nodelist, hypervisors)
743 747
    all_vglist = rpc.call_vg_list(nodelist)
744 748
    node_verify_param = {
745 749
      'filelist': file_names,
746 750
      'nodelist': nodelist,
747
      'hypervisor': None,
751
      'hypervisor': hypervisors,
748 752
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
749 753
                        for node in nodeinfo]
750 754
      }
751 755
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param,
752 756
                                      self.cfg.GetClusterName())
753 757
    all_rversion = rpc.call_version(nodelist)
754
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
758
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(),
759
                                   self.cfg.GetHypervisorType())
755 760

  
756 761
    for node in nodelist:
757 762
      feedback_fn("* Verifying node %s" % node)
......
1470 1475

  
1471 1476
    if self.dynamic_fields.intersection(self.op.output_fields):
1472 1477
      live_data = {}
1473
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1478
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1479
                                     self.cfg.GetHypervisorType())
1474 1480
      for name in nodenames:
1475 1481
        nodeinfo = node_data.get(name, None)
1476 1482
        if nodeinfo:
......
1808 1814
                       (fname, to_node))
1809 1815

  
1810 1816
    to_copy = []
1811
    if self.cfg.GetHypervisorType() == constants.HT_XEN_HVM31:
1817
    if constants.HT_XEN_HVM31 in self.cfg.GetClusterInfo().enabled_hypervisors:
1812 1818
      to_copy.append(constants.VNC_PASSWORD_FILE)
1813 1819
    for fname in to_copy:
1814 1820
      result = rpc.call_upload_file([node], fname)
......
1852 1858
      "master": self.cfg.GetMasterNode(),
1853 1859
      "architecture": (platform.architecture()[0], platform.machine()),
1854 1860
      "hypervisor_type": self.cfg.GetHypervisorType(),
1861
      "enabled_hypervisors": self.cfg.GetClusterInfo().enabled_hypervisors,
1855 1862
      }
1856 1863

  
1857 1864
    return result
......
2047 2054
  _ShutdownInstanceDisks.
2048 2055

  
2049 2056
  """
2050
  ins_l = rpc.call_instance_list([instance.primary_node])
2057
  ins_l = rpc.call_instance_list([instance.primary_node],
2058
                                 [instance.hypervisor])
2051 2059
  ins_l = ins_l[instance.primary_node]
2052 2060
  if not type(ins_l) is list:
2053 2061
    raise errors.OpExecError("Can't contact node '%s'" %
......
2081 2089
  return result
2082 2090

  
2083 2091

  
2084
def _CheckNodeFreeMemory(cfg, node, reason, requested):
2092
def _CheckNodeFreeMemory(cfg, node, reason, requested, hypervisor):
2085 2093
  """Checks if a node has enough free memory.
2086 2094

  
2087 2095
  This function check if a given node has the needed amount of free
......
2089 2097
  information from the node, this function raise an OpPrereqError
2090 2098
  exception.
2091 2099

  
2092
  Args:
2093
    - cfg: a ConfigWriter instance
2094
    - node: the node name
2095
    - reason: string to use in the error message
2096
    - requested: the amount of memory in MiB
2100
  @type cfg: C{config.ConfigWriter}
2101
  @param cfg: the ConfigWriter instance from which we get configuration data
2102
  @type node: C{str}
2103
  @param node: the node to check
2104
  @type reason: C{str}
2105
  @param reason: string to use in the error message
2106
  @type requested: C{int}
2107
  @param requested: the amount of memory in MiB to check for
2108
  @type hypervisor: C{str}
2109
  @param hypervisor: the hypervisor to ask for memory stats
2110
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2111
      we cannot check the node
2097 2112

  
2098 2113
  """
2099
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2114
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName(), hypervisor)
2100 2115
  if not nodeinfo or not isinstance(nodeinfo, dict):
2101 2116
    raise errors.OpPrereqError("Could not contact node %s for resource"
2102 2117
                             " information" % (node,))
......
2158 2173

  
2159 2174
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2160 2175
                         "starting instance %s" % instance.name,
2161
                         instance.memory)
2176
                         instance.memory, instance.hypervisor)
2162 2177

  
2163 2178
  def Exec(self, feedback_fn):
2164 2179
    """Start the instance.
......
2357 2372
    if instance.status != "down":
2358 2373
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2359 2374
                                 self.op.instance_name)
2360
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2375
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name,
2376
                                         instance.hypervisor)
2361 2377
    if remote_info:
2362 2378
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2363 2379
                                 (self.op.instance_name,
......
2434 2450
    if instance.status != "down":
2435 2451
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2436 2452
                                 self.op.instance_name)
2437
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2453
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name,
2454
                                         instance.hypervisor)
2438 2455
    if remote_info:
2439 2456
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2440 2457
                                 (self.op.instance_name,
......
2590 2607
      "hvm_boot_order", "hvm_acpi", "hvm_pae",
2591 2608
      "hvm_cdrom_image_path", "hvm_nic_type",
2592 2609
      "hvm_disk_type", "vnc_bind_address",
2593
      "serial_no",
2610
      "serial_no", "hypervisor",
2594 2611
      ])
2595 2612
    _CheckOutputFields(static=self.static_fields,
2596 2613
                       dynamic=self.dynamic_fields,
......
2642 2659
    # begin data gathering
2643 2660

  
2644 2661
    nodes = frozenset([inst.primary_node for inst in instance_list])
2662
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2645 2663

  
2646 2664
    bad_nodes = []
2647 2665
    if self.dynamic_fields.intersection(self.op.output_fields):
2648 2666
      live_data = {}
2649
      node_data = rpc.call_all_instances_info(nodes)
2667
      node_data = rpc.call_all_instances_info(nodes, hv_list)
2650 2668
      for name in nodes:
2651 2669
        result = node_data[name]
2652 2670
        if result:
......
2734 2752
            val = "default"
2735 2753
          else:
2736 2754
            val = "-"
2755
        elif field == "hypervisor":
2756
          val = instance.hypervisor
2737 2757
        else:
2738 2758
          raise errors.ParameterError(field)
2739 2759
        iout.append(val)
......
2795 2815
    target_node = secondary_nodes[0]
2796 2816
    # check memory requirements on the secondary node
2797 2817
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2798
                         instance.name, instance.memory)
2818
                         instance.name, instance.memory,
2819
                         instance.hypervisor)
2799 2820

  
2800 2821
    # check bridge existance
2801 2822
    brlist = [nic.bridge for nic in instance.nics]
......
3150 3171
    for attr in ["kernel_path", "initrd_path", "pnode", "snode",
3151 3172
                 "iallocator", "hvm_boot_order", "hvm_acpi", "hvm_pae",
3152 3173
                 "hvm_cdrom_image_path", "hvm_nic_type", "hvm_disk_type",
3153
                 "vnc_bind_address"]:
3174
                 "vnc_bind_address", "hypervisor"]:
3154 3175
      if not hasattr(self.op, attr):
3155 3176
        setattr(self.op, attr, None)
3156 3177

  
......
3327 3348
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3328 3349
                                 " instances")
3329 3350

  
3351
    # cheap checks (from the config only)
3352

  
3353
    if self.op.hypervisor is None:
3354
      self.op.hypervisor = self.cfg.GetHypervisorType()
3355

  
3356
    enabled_hvs = self.cfg.GetClusterInfo().enabled_hypervisors
3357
    if self.op.hypervisor not in enabled_hvs:
3358
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3359
                                 " cluster (%s)" % (self.op.hypervisor,
3360
                                  ",".join(enabled_hvs)))
3361

  
3362
    # costly checks (from nodes)
3363

  
3330 3364
    if self.op.mode == constants.INSTANCE_IMPORT:
3331 3365
      src_node = self.op.src_node
3332 3366
      src_path = self.op.src_path
......
3401 3435
    # Check lv size requirements
3402 3436
    if req_size is not None:
3403 3437
      nodenames = [pnode.name] + self.secondaries
3404
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3438
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3439
                                    self.op.hypervisor)
3405 3440
      for node in nodenames:
3406 3441
        info = nodeinfo.get(node, None)
3407 3442
        if not info:
......
3435 3470
    if self.op.start:
3436 3471
      _CheckNodeFreeMemory(self.cfg, self.pnode.name,
3437 3472
                           "creating instance %s" % self.op.instance_name,
3438
                           self.op.mem_size)
3473
                           self.op.mem_size, self.op.hypervisor)
3439 3474

  
3440 3475
    # hvm_cdrom_image_path verification
3441 3476
    if self.op.hvm_cdrom_image_path is not None:
......
3458 3493
                                   self.op.vnc_bind_address)
3459 3494

  
3460 3495
    # Xen HVM device type checks
3461
    if self.cfg.GetHypervisorType() == constants.HT_XEN_HVM31:
3496
    if self.op.hypervisor == constants.HT_XEN_HVM31:
3462 3497
      if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
3463 3498
        raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
3464 3499
                                   " hypervisor" % self.op.hvm_nic_type)
......
3487 3522
    if self.inst_ip is not None:
3488 3523
      nic.ip = self.inst_ip
3489 3524

  
3490
    ht_kind = self.cfg.GetHypervisorType()
3525
    ht_kind = self.op.hypervisor
3491 3526
    if ht_kind in constants.HTS_REQ_PORT:
3492 3527
      network_port = self.cfg.AllocatePort()
3493 3528
    else:
......
3533 3568
                            vnc_bind_address=self.op.vnc_bind_address,
3534 3569
                            hvm_nic_type=self.op.hvm_nic_type,
3535 3570
                            hvm_disk_type=self.op.hvm_disk_type,
3571
                            hypervisor=self.op.hypervisor,
3536 3572
                            )
3537 3573

  
3538 3574
    feedback_fn("* creating instance disks...")
......
3632 3668
    instance = self.instance
3633 3669
    node = instance.primary_node
3634 3670

  
3635
    node_insts = rpc.call_instance_list([node])[node]
3671
    node_insts = rpc.call_instance_list([node],
3672
                                        [instance.hypervisor])[node]
3636 3673
    if node_insts is False:
3637 3674
      raise errors.OpExecError("Can't connect to node %s." % node)
3638 3675

  
......
3641 3678

  
3642 3679
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3643 3680

  
3644
    hyper = hypervisor.GetHypervisor(self.cfg)
3681
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
3645 3682
    console_cmd = hyper.GetShellCommandForConsole(instance)
3646 3683

  
3647 3684
    # build ssh cmdline
......
4243 4280
                                 (self.op.disk, instance.name))
4244 4281

  
4245 4282
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4246
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
4283
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4284
                                  instance.hypervisor)
4247 4285
    for node in nodenames:
4248 4286
      info = nodeinfo.get(node, None)
4249 4287
      if not info:
......
4366 4404
    result = {}
4367 4405
    for instance in self.wanted_instances:
4368 4406
      remote_info = rpc.call_instance_info(instance.primary_node,
4369
                                                instance.name)
4407
                                           instance.name,
4408
                                           instance.hypervisor)
4370 4409
      if remote_info and "state" in remote_info:
4371 4410
        remote_state = "up"
4372 4411
      else:
......
4390 4429
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4391 4430
        "disks": disks,
4392 4431
        "vcpus": instance.vcpus,
4432
        "hypervisor": instance.hypervisor,
4393 4433
        }
4394 4434

  
4395
      htkind = self.cfg.GetHypervisorType()
4435
      htkind = instance.hypervisor
4396 4436
      if htkind == constants.HT_XEN_PVM30:
4397 4437
        idict["kernel_path"] = instance.kernel_path
4398 4438
        idict["initrd_path"] = instance.initrd_path
......
4589 4629
      pnode = self.instance.primary_node
4590 4630
      nodelist = [pnode]
4591 4631
      nodelist.extend(instance.secondary_nodes)
4592
      instance_info = rpc.call_instance_info(pnode, instance.name)
4593
      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
4632
      instance_info = rpc.call_instance_info(pnode, instance.name,
4633
                                             instance.hypervisor)
4634
      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(),
4635
                                    instance.hypervisor)
4594 4636

  
4595 4637
      if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
4596 4638
        # Assume the primary node is unreachable and go ahead
......
4617 4659
                           " node %s" % node)
4618 4660

  
4619 4661
    # Xen HVM device type checks
4620
    if self.cfg.GetHypervisorType() == constants.HT_XEN_HVM31:
4662
    if instance.hypervisor == constants.HT_XEN_HVM31:
4621 4663
      if self.op.hvm_nic_type is not None:
4622 4664
        if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
4623 4665
          raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
......
5180 5222

  
5181 5223
    """
5182 5224
    cfg = self.cfg
5225
    cluster_info = cfg.GetClusterInfo()
5183 5226
    # cluster data
5184 5227
    data = {
5185 5228
      "version": 1,
5186 5229
      "cluster_name": self.cfg.GetClusterName(),
5187
      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
5188
      "hypervisor_type": self.cfg.GetHypervisorType(),
5230
      "cluster_tags": list(cluster_info.GetTags()),
5231
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5189 5232
      # we don't have job IDs
5190 5233
      }
5191 5234

  
......
5194 5237
    # node data
5195 5238
    node_results = {}
5196 5239
    node_list = cfg.GetNodeList()
5197
    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
5240
    # FIXME: here we have only one hypervisor information, but
5241
    # instance can belong to different hypervisors
5242
    node_data = rpc.call_node_info(node_list, cfg.GetVGName(),
5243
                                   cfg.GetHypervisorType())
5198 5244
    for nname in node_list:
5199 5245
      ninfo = cfg.GetNodeInfo(nname)
5200 5246
      if nname not in node_data or not isinstance(node_data[nname], dict):
......
5250 5296
        "nics": nic_data,
5251 5297
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5252 5298
        "disk_template": iinfo.disk_template,
5299
        "hypervisor": iinfo.hypervisor,
5253 5300
        }
5254 5301
      instance_data[iinfo.name] = pir
5255 5302

  

Also available in: Unified diff