Makefile: Fix list of directories
[ganeti-local] / lib / cmdlib.py
index c91c2d1..753120c 100644 (file)
@@ -2093,7 +2093,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
       msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
              (item, hv_name))
       try:
-        hv_class = hypervisor.GetHypervisor(hv_name)
+        hv_class = hypervisor.GetHypervisorClass(hv_name)
         utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
         hv_class.CheckParameterSyntax(hv_params)
       except errors.GenericError, err:
@@ -3167,7 +3167,12 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
                                  len(s) == 2 for s in statuses)
                       for inst, nnames in instdisk.items()
                       for nname, statuses in nnames.items())
-    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
+    if __debug__:
+      instdisk_keys = set(instdisk)
+      instanceinfo_keys = set(instanceinfo)
+      assert instdisk_keys == instanceinfo_keys, \
+        ("instdisk keys (%s) do not match instanceinfo keys (%s)" %
+         (instdisk_keys, instanceinfo_keys))
 
     return instdisk
 
@@ -4259,7 +4264,10 @@ class LUClusterSetParams(LogicalUnit):
           self.new_os_hvp[os_name] = hvs
         else:
           for hv_name, hv_dict in hvs.items():
-            if hv_name not in self.new_os_hvp[os_name]:
+            if hv_dict is None:
+              # Delete if it exists
+              self.new_os_hvp[os_name].pop(hv_name, None)
+            elif hv_name not in self.new_os_hvp[os_name]:
               self.new_os_hvp[os_name][hv_name] = hv_dict
             else:
               self.new_os_hvp[os_name][hv_name].update(hv_dict)
@@ -4305,7 +4313,7 @@ class LUClusterSetParams(LogicalUnit):
             (self.op.enabled_hypervisors and
              hv_name in self.op.enabled_hypervisors)):
           # either this is a new hypervisor, or its parameters have changed
-          hv_class = hypervisor.GetHypervisor(hv_name)
+          hv_class = hypervisor.GetHypervisorClass(hv_name)
           utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
           hv_class.CheckParameterSyntax(hv_params)
           _CheckHVParams(self, node_list, hv_name, hv_params)
@@ -4319,7 +4327,7 @@ class LUClusterSetParams(LogicalUnit):
           # we need to fill in the new os_hvp on top of the actual hv_p
           cluster_defaults = self.new_hvparams.get(hv_name, {})
           new_osp = objects.FillDict(cluster_defaults, hv_params)
-          hv_class = hypervisor.GetHypervisor(hv_name)
+          hv_class = hypervisor.GetHypervisorClass(hv_name)
           hv_class.CheckParameterSyntax(new_osp)
           _CheckHVParams(self, node_list, hv_name, new_osp)
 
@@ -4541,12 +4549,14 @@ def _ComputeAncillaryFiles(cluster, redist):
   files_vm = set(
     filename
     for hv_name in cluster.enabled_hypervisors
-    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
+    for filename in
+      hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
 
   files_opt |= set(
     filename
     for hv_name in cluster.enabled_hypervisors
-    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
+    for filename in
+      hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
 
   # Filenames in each category must be unique
   all_files_set = files_all | files_mc | files_vm
@@ -5153,6 +5163,159 @@ class LUOsDiagnose(NoHooksLU):
     return self.oq.OldStyleQuery(self)
 
 
+class _ExtStorageQuery(_QueryBase):
+  FIELDS = query.EXTSTORAGE_FIELDS
+
+  def ExpandNames(self, lu):
+    # Lock all nodes in shared mode
+    # Temporary removal of locks, should be reverted later
+    # TODO: reintroduce locks when they are lighter-weight
+    lu.needed_locks = {}
+    #self.share_locks[locking.LEVEL_NODE] = 1
+    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+    # The following variables interact with _QueryBase._GetNames
+    if self.names:
+      self.wanted = self.names
+    else:
+      self.wanted = locking.ALL_SET
+
+    self.do_locking = self.use_locking
+
+  def DeclareLocks(self, lu, level):
+    pass
+
+  @staticmethod
+  def _DiagnoseByProvider(rlist):
+    """Remaps a per-node return list into an a per-provider per-node dictionary
+
+    @param rlist: a map with node names as keys and ExtStorage objects as values
+
+    @rtype: dict
+    @return: a dictionary with extstorage providers as keys and as
+        value another map, with nodes as keys and tuples of
+        (path, status, diagnose, parameters) as values, eg::
+
+          {"provider1": {"node1": [(/usr/lib/..., True, "", [])]
+                         "node2": [(/srv/..., False, "missing file")]
+                         "node3": [(/srv/..., True, "", [])]
+          }
+
+    """
+    all_es = {}
+    # we build here the list of nodes that didn't fail the RPC (at RPC
+    # level), so that nodes with a non-responding node daemon don't
+    # make all OSes invalid
+    good_nodes = [node_name for node_name in rlist
+                  if not rlist[node_name].fail_msg]
+    for node_name, nr in rlist.items():
+      if nr.fail_msg or not nr.payload:
+        continue
+      for (name, path, status, diagnose, params) in nr.payload:
+        if name not in all_es:
+          # build a list of nodes for this os containing empty lists
+          # for each node in node_list
+          all_es[name] = {}
+          for nname in good_nodes:
+            all_es[name][nname] = []
+        # convert params from [name, help] to (name, help)
+        params = [tuple(v) for v in params]
+        all_es[name][node_name].append((path, status, diagnose, params))
+    return all_es
+
+  def _GetQueryData(self, lu):
+    """Computes the list of nodes and their attributes.
+
+    """
+    # Locking is not used
+    assert not (compat.any(lu.glm.is_owned(level)
+                           for level in locking.LEVELS
+                           if level != locking.LEVEL_CLUSTER) or
+                self.do_locking or self.use_locking)
+
+    valid_nodes = [node.name
+                   for node in lu.cfg.GetAllNodesInfo().values()
+                   if not node.offline and node.vm_capable]
+    pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
+
+    data = {}
+
+    nodegroup_list = lu.cfg.GetNodeGroupList()
+
+    for (es_name, es_data) in pol.items():
+      # For every provider compute the nodegroup validity.
+      # To do this we need to check the validity of each node in es_data
+      # and then construct the corresponding nodegroup dict:
+      #      { nodegroup1: status
+      #        nodegroup2: status
+      #      }
+      ndgrp_data = {}
+      for nodegroup in nodegroup_list:
+        ndgrp = lu.cfg.GetNodeGroup(nodegroup)
+
+        nodegroup_nodes = ndgrp.members
+        nodegroup_name = ndgrp.name
+        node_statuses = []
+
+        for node in nodegroup_nodes:
+          if node in valid_nodes:
+            if es_data[node] != []:
+              node_status = es_data[node][0][1]
+              node_statuses.append(node_status)
+            else:
+              node_statuses.append(False)
+
+        if False in node_statuses:
+          ndgrp_data[nodegroup_name] = False
+        else:
+          ndgrp_data[nodegroup_name] = True
+
+      # Compute the provider's parameters
+      parameters = set()
+      for idx, esl in enumerate(es_data.values()):
+        valid = bool(esl and esl[0][1])
+        if not valid:
+          break
+
+        node_params = esl[0][3]
+        if idx == 0:
+          # First entry
+          parameters.update(node_params)
+        else:
+          # Filter out inconsistent values
+          parameters.intersection_update(node_params)
+
+      params = list(parameters)
+
+      # Now fill all the info for this provider
+      info = query.ExtStorageInfo(name=es_name, node_status=es_data,
+                                  nodegroup_status=ndgrp_data,
+                                  parameters=params)
+
+      data[es_name] = info
+
+    # Prepare data in requested order
+    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
+            if name in data]
+
+
+class LUExtStorageDiagnose(NoHooksLU):
+  """Logical unit for ExtStorage diagnose/query.
+
+  """
+  REQ_BGL = False
+
+  def CheckArguments(self):
+    self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
+                               self.op.output_fields, False)
+
+  def ExpandNames(self):
+    self.eq.ExpandNames(self)
+
+  def Exec(self, feedback_fn):
+    return self.eq.OldStyleQuery(self)
+
+
 class LUNodeRemove(LogicalUnit):
   """Logical unit for removing a node.
 
@@ -5283,8 +5446,9 @@ class _NodeQuery(_QueryBase):
       # filter out non-vm_capable nodes
       toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
 
+      es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, toquery_nodes)
       node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
-                                        [lu.cfg.GetHypervisorType()])
+                                        [lu.cfg.GetHypervisorType()], es_flags)
       live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload))
                        for (name, nresult) in node_data.items()
                        if not nresult.fail_msg and nresult.payload)
@@ -6822,7 +6986,7 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
       we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
+  nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name], False)
   nodeinfo[node].Raise("Can't get data from node %s" % node,
                        prereq=True, ecode=errors.ECODE_ENVIRON)
   (_, _, (hv_info, )) = nodeinfo[node].payload
@@ -6883,7 +7047,8 @@ def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
       or we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
+  es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, nodenames)
+  nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None, es_flags)
   for node in nodenames:
     info = nodeinfo[node]
     info.Raise("Cannot get current information from node %s" % node,
@@ -6919,7 +7084,7 @@ def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
       or we cannot check the node
 
   """
-  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
+  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
   for node in nodenames:
     info = nodeinfo[node]
     info.Raise("Cannot get current information from node %s" % node,
@@ -6997,7 +7162,7 @@ class LUInstanceStartup(LogicalUnit):
       utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
       filled_hvp = cluster.FillHV(instance)
       filled_hvp.update(self.op.hvparams)
-      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
+      hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
       hv_type.CheckParameterSyntax(filled_hvp)
       _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
 
@@ -7375,7 +7540,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
                                         disks=[{constants.IDISK_SIZE: d.size,
                                                 constants.IDISK_MODE: d.mode}
                                                 for d in self.instance.disks],
-                                        hypervisor=self.instance.hypervisor)
+                                        hypervisor=self.instance.hypervisor,
+                                        node_whitelist=None)
     ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
     ial.Run(self.op.iallocator)
@@ -8735,7 +8901,7 @@ class TLMigrateInstance(Tasklet):
 
     # Check for hypervisor version mismatch and warn the user.
     nodeinfo = self.rpc.call_node_info([source_node, target_node],
-                                       None, [self.instance.hypervisor])
+                                       None, [self.instance.hypervisor], False)
     for ninfo in nodeinfo.values():
       ninfo.Raise("Unable to retrieve node information from node '%s'" %
                   ninfo.node)
@@ -10335,7 +10501,7 @@ class LUInstanceCreate(LogicalUnit):
     utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
     filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
                                       self.op.hvparams)
-    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
+    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
     hv_type.CheckParameterSyntax(filled_hvp)
     self.hv_full = filled_hvp
     # check that we don't specify global parameters on an instance
@@ -10488,6 +10654,16 @@ class LUInstanceCreate(LogicalUnit):
                         " from the first disk's node group will be"
                         " used")
 
+    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+      nodes = [pnode]
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
+        nodes.append(snode)
+      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+      if compat.any(map(has_es, nodes)):
+        raise errors.OpPrereqError("Disk template %s not supported with"
+                                   " exclusive storage" % self.op.disk_template,
+                                   errors.ECODE_STATE)
+
     nodenames = [pnode.name] + self.secondaries
 
     # Verify instance specs
@@ -11095,7 +11271,7 @@ def _GetInstanceConsole(cluster, instance):
   @rtype: dict
 
   """
-  hyper = hypervisor.GetHypervisor(instance.hypervisor)
+  hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
   # beparams and hvparams are passed separately, to avoid editing the
   # instance and then saving the defaults in the instance itself.
   hvparams = cluster.FillHV(instance)
@@ -12409,14 +12585,22 @@ class LUInstanceGrowDisk(LogicalUnit):
                                    utils.FormatUnit(self.delta, "h"),
                                    errors.ECODE_INVAL)
 
-    if instance.disk_template not in (constants.DT_FILE,
-                                      constants.DT_SHARED_FILE,
-                                      constants.DT_RBD,
-                                      constants.DT_EXT):
+    self._CheckDiskSpace(nodenames, self.disk.ComputeGrowth(self.delta))
+
+  def _CheckDiskSpace(self, nodenames, req_vgspace):
+    template = self.instance.disk_template
+    if template not in (constants.DTS_NO_FREE_SPACE_CHECK):
       # TODO: check the free disk space for file, when that feature will be
       # supported
-      _CheckNodesFreeDiskPerVG(self, nodenames,
-                               self.disk.ComputeGrowth(self.delta))
+      nodes = map(self.cfg.GetNodeInfo, nodenames)
+      es_nodes = filter(lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n),
+                        nodes)
+      if es_nodes:
+        # With exclusive storage we need to something smarter than just looking
+        # at free space; for now, let's simply abort the operation.
+        raise errors.OpPrereqError("Cannot grow disks when exclusive_storage"
+                                   " is enabled", errors.ECODE_STATE)
+      _CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
 
   def Exec(self, feedback_fn):
     """Execute disk grow.
@@ -13240,7 +13424,65 @@ class LUInstanceSetParams(LogicalUnit):
     private.params = new_params
     private.filled = new_filled_params
 
-  def CheckPrereq(self): # pylint: disable=R0914
+  def _PreCheckDiskTemplate(self, pnode_info):
+    """CheckPrereq checks related to a new disk template."""
+    # Arguments are passed to avoid configuration lookups
+    instance = self.instance
+    pnode = instance.primary_node
+    cluster = self.cluster
+    if instance.disk_template == self.op.disk_template:
+      raise errors.OpPrereqError("Instance already has disk template %s" %
+                                 instance.disk_template, errors.ECODE_INVAL)
+
+    if (instance.disk_template,
+        self.op.disk_template) not in self._DISK_CONVERSIONS:
+      raise errors.OpPrereqError("Unsupported disk template conversion from"
+                                 " %s to %s" % (instance.disk_template,
+                                                self.op.disk_template),
+                                 errors.ECODE_INVAL)
+    _CheckInstanceState(self, instance, INSTANCE_DOWN,
+                        msg="cannot change disk template")
+    if self.op.disk_template in constants.DTS_INT_MIRROR:
+      if self.op.remote_node == pnode:
+        raise errors.OpPrereqError("Given new secondary node %s is the same"
+                                   " as the primary node of the instance" %
+                                   self.op.remote_node, errors.ECODE_STATE)
+      _CheckNodeOnline(self, self.op.remote_node)
+      _CheckNodeNotDrained(self, self.op.remote_node)
+      # FIXME: here we assume that the old instance type is DT_PLAIN
+      assert instance.disk_template == constants.DT_PLAIN
+      disks = [{constants.IDISK_SIZE: d.size,
+                constants.IDISK_VG: d.logical_id[0]}
+               for d in instance.disks]
+      required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
+      _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
+
+      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
+      snode_group = self.cfg.GetNodeGroup(snode_info.group)
+      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
+                                                              snode_group)
+      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
+                              ignore=self.op.ignore_ipolicy)
+      if pnode_info.group != snode_info.group:
+        self.LogWarning("The primary and secondary nodes are in two"
+                        " different node groups; the disk parameters"
+                        " from the first disk's node group will be"
+                        " used")
+
+    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+      # Make sure none of the nodes require exclusive storage
+      nodes = [pnode_info]
+      if self.op.disk_template in constants.DTS_INT_MIRROR:
+        assert snode_info
+        nodes.append(snode_info)
+      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+      if compat.any(map(has_es, nodes)):
+        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
+                  " storage is enabled" % (instance.disk_template,
+                                           self.op.disk_template))
+        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
+
+  def CheckPrereq(self):
     """Check prerequisites.
 
     This only checks the instance list against the existing names.
@@ -13318,44 +13560,7 @@ class LUInstanceSetParams(LogicalUnit):
       "Can't modify disk template and apply disk changes at the same time"
 
     if self.op.disk_template:
-      if instance.disk_template == self.op.disk_template:
-        raise errors.OpPrereqError("Instance already has disk template %s" %
-                                   instance.disk_template, errors.ECODE_INVAL)
-
-      if (instance.disk_template,
-          self.op.disk_template) not in self._DISK_CONVERSIONS:
-        raise errors.OpPrereqError("Unsupported disk template conversion from"
-                                   " %s to %s" % (instance.disk_template,
-                                                  self.op.disk_template),
-                                   errors.ECODE_INVAL)
-      _CheckInstanceState(self, instance, INSTANCE_DOWN,
-                          msg="cannot change disk template")
-      if self.op.disk_template in constants.DTS_INT_MIRROR:
-        if self.op.remote_node == pnode:
-          raise errors.OpPrereqError("Given new secondary node %s is the same"
-                                     " as the primary node of the instance" %
-                                     self.op.remote_node, errors.ECODE_STATE)
-        _CheckNodeOnline(self, self.op.remote_node)
-        _CheckNodeNotDrained(self, self.op.remote_node)
-        # FIXME: here we assume that the old instance type is DT_PLAIN
-        assert instance.disk_template == constants.DT_PLAIN
-        disks = [{constants.IDISK_SIZE: d.size,
-                  constants.IDISK_VG: d.logical_id[0]}
-                 for d in instance.disks]
-        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
-        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
-
-        snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
-        snode_group = self.cfg.GetNodeGroup(snode_info.group)
-        ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
-                                                                snode_group)
-        _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
-                                ignore=self.op.ignore_ipolicy)
-        if pnode_info.group != snode_info.group:
-          self.LogWarning("The primary and secondary nodes are in two"
-                          " different node groups; the disk parameters"
-                          " from the first disk's node group will be"
-                          " used")
+      self._PreCheckDiskTemplate(pnode_info)
 
     # hvparams processing
     if self.op.hvparams:
@@ -13365,7 +13570,7 @@ class LUInstanceSetParams(LogicalUnit):
       hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
 
       # local check
-      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
+      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
       _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
       self.hv_proposed = self.hv_new = hv_new # the new actual values
       self.hv_inst = i_hvdict # the new dict (without defaults)
@@ -13435,7 +13640,7 @@ class LUInstanceSetParams(LogicalUnit):
       instance_info = self.rpc.call_instance_info(pnode, instance.name,
                                                   instance.hypervisor)
       nodeinfo = self.rpc.call_node_info(mem_check_list, None,
-                                         [instance.hypervisor])
+                                         [instance.hypervisor], False)
       pninfo = nodeinfo[pnode]
       msg = pninfo.fail_msg
       if msg:
@@ -16325,7 +16530,7 @@ class LUNetworkQuery(NoHooksLU):
 
   def CheckArguments(self):
     self.nq = _NetworkQuery(qlang.MakeSimpleFilter("name", self.op.names),
-                            self.op.output_fields, False)
+                            self.op.output_fields, self.op.use_locking)
 
   def ExpandNames(self):
     self.nq.ExpandNames(self)
@@ -16553,6 +16758,7 @@ _QUERY_IMPL = {
   constants.QR_GROUP: _GroupQuery,
   constants.QR_NETWORK: _NetworkQuery,
   constants.QR_OS: _OsQuery,
+  constants.QR_EXTSTORAGE: _ExtStorageQuery,
   constants.QR_EXPORT: _ExportQuery,
   }