mcpu: Use new timeout class for timeout
[ganeti-local] / lib / cmdlib.py
index 3ae94b6..a072282 100644 (file)
@@ -722,6 +722,26 @@ def _CheckInstanceBridgesExist(lu, instance, node=None):
   _CheckNicsBridgesExist(lu, instance.nics, node)
 
 
+def _CheckOSVariant(os, name):
+  """Check whether an OS name conforms to the os variants specification.
+
+  @type os: L{objects.OS}
+  @param os: OS object to check
+  @type name: string
+  @param name: OS name passed by the user, to check for validity
+
+  """
+  if not os.supported_variants:
+    return
+  try:
+    variant = name.split("+", 1)[1]
+  except IndexError:
+    raise errors.OpPrereqError("OS name must include a variant")
+
+  if variant not in os.supported_variants:
+    raise errors.OpPrereqError("Unsupported OS variant")
+
+
 def _GetNodeInstancesInner(cfg, fn):
   return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
 
@@ -1915,8 +1935,7 @@ class LUSetClusterParams(LogicalUnit):
       invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
       if invalid_hvs:
         raise errors.OpPrereqError("Enabled hypervisors contains invalid"
-                                   " entries: %s" %
-                                   utils.CommaJoin(invalid_hvs))
+                                   " entries: %s" % " ,".join(invalid_hvs))
     else:
       self.hv_list = cluster.enabled_hypervisors
 
@@ -2145,7 +2164,9 @@ class LUDiagnoseOS(NoHooksLU):
   _OP_REQP = ["output_fields", "names"]
   REQ_BGL = False
   _FIELDS_STATIC = utils.FieldSet()
-  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
+  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
+  # Fields that need calculation of global os validity
+  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
 
   def ExpandNames(self):
     if self.op.names:
@@ -2193,14 +2214,14 @@ class LUDiagnoseOS(NoHooksLU):
     for node_name, nr in rlist.items():
       if nr.fail_msg or not nr.payload:
         continue
-      for name, path, status, diagnose in nr.payload:
+      for name, path, status, diagnose, variants in nr.payload:
         if name not in all_os:
           # build a list of nodes for this os containing empty lists
           # for each node in node_list
           all_os[name] = {}
           for nname in good_nodes:
             all_os[name][nname] = []
-        all_os[name][node_name].append((path, status, diagnose))
+        all_os[name][node_name].append((path, status, diagnose, variants))
     return all_os
 
   def Exec(self, feedback_fn):
@@ -2211,18 +2232,38 @@ class LUDiagnoseOS(NoHooksLU):
     node_data = self.rpc.call_os_diagnose(valid_nodes)
     pol = self._DiagnoseByOS(valid_nodes, node_data)
     output = []
+    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
+    calc_variants = "variants" in self.op.output_fields
+
     for os_name, os_data in pol.items():
       row = []
+      if calc_valid:
+        valid = True
+        variants = None
+        for osl in os_data.values():
+          valid = valid and osl and osl[0][1]
+          if not valid:
+            variants = None
+            break
+          if calc_variants:
+            node_variants = osl[0][3]
+            if variants is None:
+              variants = node_variants
+            else:
+              variants = [v for v in variants if v in node_variants]
+
       for field in self.op.output_fields:
         if field == "name":
           val = os_name
         elif field == "valid":
-          val = utils.all([osl and osl[0][1] for osl in os_data.values()])
+          val = valid
         elif field == "node_status":
           # this is just a copy of the dict
           val = {}
           for node_name, nos_list in os_data.items():
             val[node_name] = nos_list
+        elif field == "variants":
+          val =  variants
         else:
           raise errors.ParameterError(field)
         row.append(val)
@@ -2355,7 +2396,6 @@ class LUQueryNodes(NoHooksLU):
       # if we don't request only static fields, we need to lock the nodes
       self.needed_locks[locking.LEVEL_NODE] = self.wanted
 
-
   def CheckPrereq(self):
     """Check prerequisites.
 
@@ -2979,6 +3019,7 @@ class LUSetNodeParams(LogicalUnit):
 
     # Boolean value that tells us whether we're offlining or draining the node
     offline_or_drain = self.op.offline == True or self.op.drained == True
+    deoffline_or_drain = self.op.offline == False or self.op.drained == False
 
     if (node.master_candidate and
         (self.op.master_candidate == False or offline_or_drain)):
@@ -3002,6 +3043,13 @@ class LUSetNodeParams(LogicalUnit):
       raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
                                  " to master_candidate" % node.name)
 
+    # If we're being deofflined/drained, we'll MC ourself if needed
+    if (deoffline_or_drain and not offline_or_drain and not
+        self.op.master_candidate == True):
+      self.op.master_candidate = _DecideSelfPromotion(self)
+      if self.op.master_candidate:
+        self.LogInfo("Autopromoting node to master candidate")
+
     return
 
   def Exec(self, feedback_fn):
@@ -3550,6 +3598,13 @@ class LURebootInstance(LogicalUnit):
   _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
   REQ_BGL = False
 
+  def CheckArguments(self):
+    """Check the arguments.
+
+    """
+    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
   def ExpandNames(self):
     if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
                                    constants.INSTANCE_REBOOT_HARD,
@@ -3569,6 +3624,7 @@ class LURebootInstance(LogicalUnit):
     env = {
       "IGNORE_SECONDARIES": self.op.ignore_secondaries,
       "REBOOT_TYPE": self.op.reboot_type,
+      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
       }
     env.update(_BuildInstanceHookEnvByObject(self, self.instance))
     nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
@@ -3604,10 +3660,12 @@ class LURebootInstance(LogicalUnit):
       for disk in instance.disks:
         self.cfg.SetDiskID(disk, node_current)
       result = self.rpc.call_instance_reboot(node_current, instance,
-                                             reboot_type)
+                                             reboot_type,
+                                             self.shutdown_timeout)
       result.Raise("Could not reboot instance")
     else:
-      result = self.rpc.call_instance_shutdown(node_current, instance)
+      result = self.rpc.call_instance_shutdown(node_current, instance,
+                                               self.shutdown_timeout)
       result.Raise("Could not shutdown instance for full reboot")
       _ShutdownInstanceDisks(self, instance)
       _StartInstanceDisks(self, instance, ignore_secondaries)
@@ -3630,6 +3688,13 @@ class LUShutdownInstance(LogicalUnit):
   _OP_REQP = ["instance_name"]
   REQ_BGL = False
 
+  def CheckArguments(self):
+    """Check the arguments.
+
+    """
+    self.timeout = getattr(self.op, "timeout",
+                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
   def ExpandNames(self):
     self._ExpandAndLockInstance()
 
@@ -3640,6 +3705,7 @@ class LUShutdownInstance(LogicalUnit):
 
     """
     env = _BuildInstanceHookEnvByObject(self, self.instance)
+    env["TIMEOUT"] = self.timeout
     nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
     return env, nl, nl
 
@@ -3660,8 +3726,9 @@ class LUShutdownInstance(LogicalUnit):
     """
     instance = self.instance
     node_current = instance.primary_node
+    timeout = self.timeout
     self.cfg.MarkInstanceDown(instance.name)
-    result = self.rpc.call_instance_shutdown(node_current, instance)
+    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
     msg = result.fail_msg
     if msg:
       self.proc.LogWarning("Could not shutdown instance: %s" % msg)
@@ -3719,6 +3786,7 @@ class LUReinstallInstance(LogicalUnit):
                                   instance.primary_node))
 
     self.op.os_type = getattr(self.op, "os_type", None)
+    self.op.force_variant = getattr(self.op, "force_variant", False)
     if self.op.os_type is not None:
       # OS verification
       pnode = self.cfg.GetNodeInfo(
@@ -3729,6 +3797,8 @@ class LUReinstallInstance(LogicalUnit):
       result = self.rpc.call_os_get(pnode.name, self.op.os_type)
       result.Raise("OS '%s' not in supported OS list for primary node %s" %
                    (self.op.os_type, pnode.name), prereq=True)
+      if not self.op.force_variant:
+        _CheckOSVariant(result.payload, self.op.os_type)
 
     self.instance = instance
 
@@ -3948,6 +4018,13 @@ class LURemoveInstance(LogicalUnit):
   _OP_REQP = ["instance_name", "ignore_failures"]
   REQ_BGL = False
 
+  def CheckArguments(self):
+    """Check the arguments.
+
+    """
+    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
@@ -3964,6 +4041,7 @@ class LURemoveInstance(LogicalUnit):
 
     """
     env = _BuildInstanceHookEnvByObject(self, self.instance)
+    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
     nl = [self.cfg.GetMasterNode()]
     return env, nl, nl
 
@@ -3985,7 +4063,8 @@ class LURemoveInstance(LogicalUnit):
     logging.info("Shutting down instance %s on node %s",
                  instance.name, instance.primary_node)
 
-    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
+    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
+                                             self.shutdown_timeout)
     msg = result.fail_msg
     if msg:
       if self.op.ignore_failures:
@@ -4296,6 +4375,13 @@ class LUFailoverInstance(LogicalUnit):
   _OP_REQP = ["instance_name", "ignore_consistency"]
   REQ_BGL = False
 
+  def CheckArguments(self):
+    """Check the arguments.
+
+    """
+    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     self.needed_locks[locking.LEVEL_NODE] = []
@@ -4313,6 +4399,7 @@ class LUFailoverInstance(LogicalUnit):
     """
     env = {
       "IGNORE_CONSISTENCY": self.op.ignore_consistency,
+      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
       }
     env.update(_BuildInstanceHookEnvByObject(self, self.instance))
     nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
@@ -4377,7 +4464,8 @@ class LUFailoverInstance(LogicalUnit):
     logging.info("Shutting down instance %s on node %s",
                  instance.name, source_node)
 
-    result = self.rpc.call_instance_shutdown(source_node, instance)
+    result = self.rpc.call_instance_shutdown(source_node, instance,
+                                             self.shutdown_timeout)
     msg = result.fail_msg
     if msg:
       if self.op.ignore_consistency:
@@ -4469,6 +4557,13 @@ class LUMoveInstance(LogicalUnit):
   _OP_REQP = ["instance_name", "target_node"]
   REQ_BGL = False
 
+  def CheckArguments(self):
+    """Check the arguments.
+
+    """
+    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     target_node = self.cfg.ExpandNodeName(self.op.target_node)
@@ -4491,6 +4586,7 @@ class LUMoveInstance(LogicalUnit):
     """
     env = {
       "TARGET_NODE": self.op.target_node,
+      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
       }
     env.update(_BuildInstanceHookEnvByObject(self, self.instance))
     nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
@@ -4554,7 +4650,8 @@ class LUMoveInstance(LogicalUnit):
     self.LogInfo("Shutting down instance %s on source node %s",
                  instance.name, source_node)
 
-    result = self.rpc.call_instance_shutdown(source_node, instance)
+    result = self.rpc.call_instance_shutdown(source_node, instance,
+                                             self.shutdown_timeout)
     msg = result.fail_msg
     if msg:
       if self.op.ignore_consistency:
@@ -5551,9 +5648,15 @@ class LUCreateInstance(LogicalUnit):
           self.op.src_path = src_path = \
             os.path.join(constants.EXPORT_DIR, src_path)
 
+      # On import force_variant must be True, because if we forced it at
+      # initial install, our only chance when importing it back is that it
+      # works again!
+      self.op.force_variant = True
+
     else: # INSTANCE_CREATE
       if getattr(self.op, "os_type", None) is None:
         raise errors.OpPrereqError("No guest OS specified")
+      self.op.force_variant = getattr(self.op, "force_variant", False)
 
   def _RunAllocator(self):
     """Run the allocator based on input opcode.
@@ -5783,6 +5886,8 @@ class LUCreateInstance(LogicalUnit):
     result = self.rpc.call_os_get(pnode.name, self.op.os_type)
     result.Raise("OS '%s' not in supported os list for primary node %s" %
                  (self.op.os_type, pnode.name), prereq=True)
+    if not self.op.force_variant:
+      _CheckOSVariant(result.payload, self.op.os_type)
 
     _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
 
@@ -7619,6 +7724,13 @@ class LUExportInstance(LogicalUnit):
   _OP_REQP = ["instance_name", "target_node", "shutdown"]
   REQ_BGL = False
 
+  def CheckArguments(self):
+    """Check the arguments.
+
+    """
+    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
   def ExpandNames(self):
     self._ExpandAndLockInstance()
     # FIXME: lock only instance primary and destination node
@@ -7644,6 +7756,7 @@ class LUExportInstance(LogicalUnit):
     env = {
       "EXPORT_NODE": self.op.target_node,
       "EXPORT_DO_SHUTDOWN": self.op.shutdown,
+      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
       }
     env.update(_BuildInstanceHookEnvByObject(self, self.instance))
     nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
@@ -7688,7 +7801,8 @@ class LUExportInstance(LogicalUnit):
     if self.op.shutdown:
       # shutdown the instance, but not the disks
       feedback_fn("Shutting down instance %s" % instance.name)
-      result = self.rpc.call_instance_shutdown(src_node, instance)
+      result = self.rpc.call_instance_shutdown(src_node, instance,
+                                               self.shutdown_timeout)
       result.Raise("Could not shutdown instance %s on"
                    " node %s" % (instance.name, src_node))