Refactor ispecs in ipolicy structures
[ganeti-local] / lib / cli.py
index f46a0a0..7857e18 100644 (file)
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -81,7 +81,9 @@ __all__ = [
   "DST_NODE_OPT",
   "EARLY_RELEASE_OPT",
   "ENABLED_HV_OPT",
+  "ENABLED_STORAGE_TYPES_OPT",
   "ERROR_CODES_OPT",
+  "FAILURE_ONLY_OPT",
   "FIELDS_OPT",
   "FILESTORE_DIR_OPT",
   "FILESTORE_DRIVER_OPT",
@@ -116,7 +118,6 @@ __all__ = [
   "NET_OPT",
   "NETWORK_OPT",
   "NETWORK6_OPT",
-  "NETWORK_TYPE_OPT",
   "NEW_CLUSTER_CERT_OPT",
   "NEW_CLUSTER_DOMAIN_SECRET_OPT",
   "NEW_CONFD_HMAC_KEY_OPT",
@@ -165,6 +166,7 @@ __all__ = [
   "PRIORITY_OPT",
   "RAPI_CERT_OPT",
   "READD_OPT",
+  "REASON_OPT",
   "REBOOT_TYPE_OPT",
   "REMOVE_INSTANCE_OPT",
   "REMOVE_RESERVED_IPS_OPT",
@@ -232,6 +234,8 @@ __all__ = [
   "FormatError",
   "FormatQueryResult",
   "FormatParameterDict",
+  "FormatParamsDictInfo",
+  "PrintGenericInfo",
   "GenerateTable",
   "AskUser",
   "FormatTimestamp",
@@ -261,6 +265,7 @@ __all__ = [
   "ArgNetwork",
   "ArgNode",
   "ArgOs",
+  "ArgExtStorage",
   "ArgSuggest",
   "ArgUnknown",
   "OPT_COMPL_INST_ADD_NODES",
@@ -271,6 +276,7 @@ __all__ = [
   "OPT_COMPL_ONE_NODEGROUP",
   "OPT_COMPL_ONE_NETWORK",
   "OPT_COMPL_ONE_OS",
+  "OPT_COMPL_ONE_EXTSTORAGE",
   "cli_option",
   "SplitNodeOption",
   "CalculateOSNames",
@@ -314,6 +320,17 @@ TISPECS_CLUSTER_TYPES = {
   constants.ISPECS_STD: constants.VTYPE_INT,
   }
 
+#: User-friendly names for query2 field types
+_QFT_NAMES = {
+  constants.QFT_UNKNOWN: "Unknown",
+  constants.QFT_TEXT: "Text",
+  constants.QFT_BOOL: "Boolean",
+  constants.QFT_NUMBER: "Number",
+  constants.QFT_UNIT: "Storage size",
+  constants.QFT_TIMESTAMP: "Timestamp",
+  constants.QFT_OTHER: "Custom",
+  }
+
 
 class _Argument:
   def __init__(self, min=0, max=None): # pylint: disable=W0622
@@ -373,6 +390,7 @@ class ArgNetwork(_Argument):
 
   """
 
+
 class ArgGroup(_Argument):
   """Node group argument.
 
@@ -409,6 +427,12 @@ class ArgOs(_Argument):
   """
 
 
+class ArgExtStorage(_Argument):
+  """ExtStorage argument.
+
+  """
+
+
 ARGS_NONE = []
 ARGS_MANY_INSTANCES = [ArgInstance()]
 ARGS_MANY_NETWORKS = [ArgNetwork()]
@@ -435,6 +459,7 @@ def _ExtractTagsObject(opts, args):
     retval = kind, None
   elif kind in (constants.TAG_NODEGROUP,
                 constants.TAG_NODE,
+                constants.TAG_NETWORK,
                 constants.TAG_INSTANCE):
     if not args:
       raise errors.OpPrereqError("no arguments passed to the command",
@@ -658,16 +683,18 @@ def check_maybefloat(option, opt, value): # pylint: disable=W0613
  OPT_COMPL_ONE_NODE,
  OPT_COMPL_ONE_INSTANCE,
  OPT_COMPL_ONE_OS,
+ OPT_COMPL_ONE_EXTSTORAGE,
  OPT_COMPL_ONE_IALLOCATOR,
  OPT_COMPL_ONE_NETWORK,
  OPT_COMPL_INST_ADD_NODES,
- OPT_COMPL_ONE_NODEGROUP) = range(100, 108)
+ OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
 
-OPT_COMPL_ALL = frozenset([
+OPT_COMPL_ALL = compat.UniqueFrozenset([
   OPT_COMPL_MANY_NODES,
   OPT_COMPL_ONE_NODE,
   OPT_COMPL_ONE_INSTANCE,
   OPT_COMPL_ONE_OS,
+  OPT_COMPL_ONE_EXTSTORAGE,
   OPT_COMPL_ONE_IALLOCATOR,
   OPT_COMPL_ONE_NETWORK,
   OPT_COMPL_INST_ADD_NODES,
@@ -1132,6 +1159,12 @@ ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
                             help="Comma-separated list of hypervisors",
                             type="string", default=None)
 
+ENABLED_STORAGE_TYPES_OPT = cli_option("--enabled-storage-types",
+                                       dest="enabled_storage_types",
+                                       help="Comma-separated list of "
+                                            "storage methods",
+                                       type="string", default=None)
+
 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
                             type="keyval", default={},
                             help="NIC parameters")
@@ -1361,6 +1394,15 @@ SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
                               action="store_true",
                               help="Show machine name for every line in output")
 
+FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
+                              action="store_true",
+                              help=("Hide successful results and show failures"
+                                    " only (determined by the exit code)"))
+
+REASON_OPT = cli_option("--reason", default=None,
+                        help="The reason for executing a VM-state-changing"
+                             " operation")
+
 
 def _PriorityOptionCb(option, _, value, parser):
   """Callback for processing C{--priority} option.
@@ -1498,10 +1540,6 @@ REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
                                      help="Comma-delimited list of"
                                      " reserved IPs to remove")
 
-NETWORK_TYPE_OPT = cli_option("--network-type",
-                              action="store", default=None, dest="network_type",
-                              help="Network type: private, public, None")
-
 NETWORK6_OPT = cli_option("--network6",
                           action="store", default=None, dest="network6",
                           help="IP network in CIDR notation")
@@ -2187,7 +2225,15 @@ def GetClient(query=False):
       connected to the query socket instead of the masterd socket
 
   """
-  if query and constants.ENABLE_SPLIT_QUERY:
+  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
+  if override_socket:
+    if override_socket == constants.LUXI_OVERRIDE_MASTER:
+      address = pathutils.MASTER_SOCKET
+    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
+      address = pathutils.QUERY_SOCKET
+    else:
+      address = override_socket
+  elif query and constants.ENABLE_SPLIT_QUERY:
     address = pathutils.QUERY_SOCKET
   else:
     address = None
@@ -2273,8 +2319,14 @@ def FormatError(err):
   elif isinstance(err, errors.ParameterError):
     obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
   elif isinstance(err, luxi.NoMasterError):
-    obuf.write("Cannot communicate with the master daemon.\nIs it running"
-               " and listening for connections?")
+    if err.args[0] == pathutils.MASTER_SOCKET:
+      daemon = "the master daemon"
+    elif err.args[0] == pathutils.QUERY_SOCKET:
+      daemon = "the config daemon"
+    else:
+      daemon = "socket '%s'" % str(err.args[0])
+    obuf.write("Cannot communicate with %s.\nIs the process running"
+               " and listening for connections?" % daemon)
   elif isinstance(err, luxi.TimeoutError):
     obuf.write("Timeout while talking to the master daemon. Jobs might have"
                " been submitted and will continue to run even if the call"
@@ -3056,6 +3108,21 @@ def GenericList(resource, fields, names, unit, separator, header, cl=None,
   return constants.EXIT_SUCCESS
 
 
+def _FieldDescValues(fdef):
+  """Helper function for L{GenericListFields} to get query field description.
+
+  @type fdef: L{objects.QueryFieldDefinition}
+  @rtype: list
+
+  """
+  return [
+    fdef.name,
+    _QFT_NAMES.get(fdef.kind, fdef.kind),
+    fdef.title,
+    fdef.doc,
+    ]
+
+
 def GenericListFields(resource, fields, separator, header, cl=None):
   """Generic implementation for listing fields for a resource.
 
@@ -3080,11 +3147,12 @@ def GenericListFields(resource, fields, separator, header, cl=None):
 
   columns = [
     TableColumn("Name", str, False),
+    TableColumn("Type", str, False),
     TableColumn("Title", str, False),
     TableColumn("Description", str, False),
     ]
 
-  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
+  rows = map(_FieldDescValues, response.fields)
 
   for line in FormatTable(rows, columns, header, separator):
     ToStdout(line)
@@ -3544,6 +3612,27 @@ def FormatParameterDict(buf, param_dict, actual, level=1):
       buf.write(" %s\n" % val)
 
 
+def FormatParamsDictInfo(param_dict, actual):
+  """Formats a parameter dictionary.
+
+  @type param_dict: dict
+  @param param_dict: the own parameters
+  @type actual: dict
+  @param actual: the current parameter set (including defaults)
+  @rtype: dict
+  @return: dictionary where the value of each parameter is either a fully
+      formatted string or a dictionary containing formatted strings
+
+  """
+  ret = {}
+  for (key, data) in actual.items():
+    if isinstance(data, dict) and data:
+      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
+    else:
+      ret[key] = str(param_dict.get(key, "default (%s)" % data))
+  return ret
+
+
 def ConfirmOperation(names, list_type, text, extra=""):
   """Ask the user to confirm an operation on a list of list_type.
 
@@ -3596,24 +3685,9 @@ def _MaybeParseUnit(elements):
   return parsed
 
 
-def CreateIPolicyFromOpts(ispecs_mem_size=None,
-                          ispecs_cpu_count=None,
-                          ispecs_disk_count=None,
-                          ispecs_disk_size=None,
-                          ispecs_nic_count=None,
-                          ipolicy_disk_templates=None,
-                          ipolicy_vcpu_ratio=None,
-                          ipolicy_spindle_ratio=None,
-                          group_ipolicy=False,
-                          allowed_values=None,
-                          fill_all=False):
-  """Creation of instance policy based on command line options.
-
-  @param fill_all: whether for cluster policies we should ensure that
-    all values are filled
-
-
-  """
+def _InitIspecsFromOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
+                        ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
+                        group_ipolicy, allowed_values):
   try:
     if ispecs_mem_size:
       ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
@@ -3626,7 +3700,7 @@ def CreateIPolicyFromOpts(ispecs_mem_size=None,
                                errors.ECODE_INVAL)
 
   # prepare ipolicy dict
-  ipolicy_transposed = {
+  ispecs_transposed = {
     constants.ISPEC_MEM_SIZE: ispecs_mem_size,
     constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
     constants.ISPEC_DISK_COUNT: ispecs_disk_count,
@@ -3639,27 +3713,49 @@ def CreateIPolicyFromOpts(ispecs_mem_size=None,
     forced_type = TISPECS_GROUP_TYPES
   else:
     forced_type = TISPECS_CLUSTER_TYPES
-
-  for specs in ipolicy_transposed.values():
+  for specs in ispecs_transposed.values():
     utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
 
   # then transpose
-  ipolicy_out = objects.MakeEmptyIPolicy()
-  for name, specs in ipolicy_transposed.iteritems():
+  ispecs = {
+    constants.ISPECS_MIN: {},
+    constants.ISPECS_MAX: {},
+    constants.ISPECS_STD: {},
+    }
+  for (name, specs) in ispecs_transposed.iteritems():
     assert name in constants.ISPECS_PARAMETERS
     for key, val in specs.items(): # {min: .. ,max: .., std: ..}
-      ipolicy_out[key][name] = val
+      assert key in ispecs
+      ispecs[key][name] = val
+  for key in constants.ISPECS_MINMAX_KEYS:
+    ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
+  ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
+
+
+def CreateIPolicyFromOpts(ispecs_mem_size=None,
+                          ispecs_cpu_count=None,
+                          ispecs_disk_count=None,
+                          ispecs_disk_size=None,
+                          ispecs_nic_count=None,
+                          ipolicy_disk_templates=None,
+                          ipolicy_vcpu_ratio=None,
+                          ipolicy_spindle_ratio=None,
+                          group_ipolicy=False,
+                          allowed_values=None,
+                          fill_all=False):
+  """Creation of instance policy based on command line options.
+
+  @param fill_all: whether for cluster policies we should ensure that
+    all values are filled
+
+
+  """
+
+  ipolicy_out = objects.MakeEmptyIPolicy()
+  _InitIspecsFromOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
+                      ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
+                      group_ipolicy, allowed_values)
 
-  # no filldict for non-dicts
-  if not group_ipolicy and fill_all:
-    if ipolicy_disk_templates is None:
-      ipolicy_disk_templates = constants.DISK_TEMPLATES
-    if ipolicy_vcpu_ratio is None:
-      ipolicy_vcpu_ratio = \
-        constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO]
-    if ipolicy_spindle_ratio is None:
-      ipolicy_spindle_ratio = \
-        constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO]
   if ipolicy_disk_templates is not None:
     ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
   if ipolicy_vcpu_ratio is not None:
@@ -3669,4 +3765,97 @@ def CreateIPolicyFromOpts(ispecs_mem_size=None,
 
   assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
 
+  if not group_ipolicy and fill_all:
+    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
+
   return ipolicy_out
+
+
+def _SerializeGenericInfo(buf, data, level, afterkey=False):
+  """Formatting core of L{PrintGenericInfo}.
+
+  @param buf: (string) stream to accumulate the result into
+  @param data: data to format
+  @type level: int
+  @param level: depth in the data hierarchy, used for indenting
+  @type afterkey: bool
+  @param afterkey: True when we are in the middle of a line after a key (used
+      to properly add newlines or indentation)
+
+  """
+  baseind = "  "
+  if isinstance(data, dict):
+    if not data:
+      buf.write("\n")
+    else:
+      if afterkey:
+        buf.write("\n")
+        doindent = True
+      else:
+        doindent = False
+      for key in sorted(data):
+        if doindent:
+          buf.write(baseind * level)
+        else:
+          doindent = True
+        buf.write(key)
+        buf.write(": ")
+        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
+  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
+    # list of tuples (an ordered dictionary)
+    if afterkey:
+      buf.write("\n")
+      doindent = True
+    else:
+      doindent = False
+    for (key, val) in data:
+      if doindent:
+        buf.write(baseind * level)
+      else:
+        doindent = True
+      buf.write(key)
+      buf.write(": ")
+      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
+  elif isinstance(data, list):
+    if not data:
+      buf.write("\n")
+    else:
+      if afterkey:
+        buf.write("\n")
+        doindent = True
+      else:
+        doindent = False
+      for item in data:
+        if doindent:
+          buf.write(baseind * level)
+        else:
+          doindent = True
+        buf.write("-")
+        buf.write(baseind[1:])
+        _SerializeGenericInfo(buf, item, level + 1)
+  else:
+    # This branch should be only taken for strings, but it's practically
+    # impossible to guarantee that no other types are produced somewhere
+    buf.write(str(data))
+    buf.write("\n")
+
+
+def PrintGenericInfo(data):
+  """Print information formatted according to the hierarchy.
+
+  The output is a valid YAML string.
+
+  @param data: the data to print. It's a hierarchical structure whose elements
+      can be:
+        - dictionaries, where keys are strings and values are of any of the
+          types listed here
+        - lists of pairs (key, value), where key is a string and value is of
+          any of the types listed here; it's a way to encode ordered
+          dictionaries
+        - lists of any of the types listed here
+        - strings
+
+  """
+  buf = StringIO()
+  _SerializeGenericInfo(buf, data, 0)
+  ToStdout(buf.getvalue().rstrip("\n"))