Check if instances use disk templates that get disabled
[ganeti-local] / lib / client / gnt_cluster.py
index f1bedce..b5e1b77 100644 (file)
@@ -42,6 +42,7 @@ from ganeti import objects
 from ganeti import uidpool
 from ganeti import compat
 from ganeti import netutils
+from ganeti import pathutils
 
 
 ON_OPT = cli_option("--on", default=False,
@@ -49,12 +50,12 @@ ON_OPT = cli_option("--on", default=False,
                     help="Recover from an EPO")
 
 GROUPS_OPT = cli_option("--groups", default=False,
-                    action="store_true", dest="groups",
-                    help="Arguments are node groups instead of nodes")
+                        action="store_true", dest="groups",
+                        help="Arguments are node groups instead of nodes")
 
-SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
-                              action="store_true",
-                              help="Show machine name for every line in output")
+FORCE_FAILOVER = cli_option("--yes-do-it", dest="yes_do_it",
+                            help="Override interactive check for --no-voting",
+                            default=False, action="store_true")
 
 _EPO_PING_INTERVAL = 30 # 30 seconds between pings
 _EPO_PING_TIMEOUT = 1 # 1 second
@@ -143,7 +144,7 @@ def InitCluster(opts, args):
     utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
 
   # prepare ipolicy dict
-  ipolicy_raw = objects.CreateIPolicyFromOpts(
+  ipolicy_raw = CreateIPolicyFromOpts(
     ispecs_mem_size=opts.ispecs_mem_size,
     ispecs_cpu_count=opts.ispecs_cpu_count,
     ispecs_disk_count=opts.ispecs_disk_count,
@@ -151,6 +152,7 @@ def InitCluster(opts, args):
     ispecs_nic_count=opts.ispecs_nic_count,
     ipolicy_disk_templates=opts.ipolicy_disk_templates,
     ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
+    ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
     fill_all=True)
   ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_raw)
 
@@ -192,6 +194,12 @@ def InitCluster(opts, args):
 
   hv_state = dict(opts.hv_state)
 
+  enabled_storage_types = opts.enabled_storage_types
+  if enabled_storage_types is not None:
+    enabled_storage_types = enabled_storage_types.split(",")
+  else:
+    enabled_storage_types = list(constants.DEFAULT_ENABLED_STORAGE_TYPES)
+
   bootstrap.InitCluster(cluster_name=args[0],
                         secondary_ip=opts.secondary_ip,
                         vg_name=vg_name,
@@ -219,6 +227,7 @@ def InitCluster(opts, args):
                         use_external_mip_script=external_ip_setup_script,
                         hv_state=hv_state,
                         disk_state=disk_state,
+                        enabled_storage_types=enabled_storage_types,
                         )
   op = opcodes.OpClusterPostInit()
   SubmitOpCode(op, opts=opts)
@@ -333,7 +342,7 @@ def ShowClusterVersion(opts, args):
   @return: the desired exit code
 
   """
-  cl = GetClient()
+  cl = GetClient(query=True)
   result = cl.QueryClusterInfo()
   ToStdout("Software version: %s", result["software_version"])
   ToStdout("Internode protocol: %s", result["protocol_version"])
@@ -388,7 +397,7 @@ def ShowClusterConfig(opts, args):
   @return: the desired exit code
 
   """
-  cl = GetClient()
+  cl = GetClient(query=True)
   result = cl.QueryClusterInfo()
 
   ToStdout("Cluster name: %s", result["name"])
@@ -445,13 +454,15 @@ def ShowClusterConfig(opts, args):
            result["shared_file_storage_dir"])
   ToStdout("  - maintenance of node health: %s",
            result["maintain_node_health"])
-  ToStdout("  - uid pool: %s",
-            uidpool.FormatUidPool(result["uid_pool"],
-                                  roman=opts.roman_integers))
+  ToStdout("  - uid pool: %s", uidpool.FormatUidPool(result["uid_pool"]))
   ToStdout("  - default instance allocator: %s", result["default_iallocator"])
   ToStdout("  - primary ip version: %d", result["primary_ip_version"])
   ToStdout("  - preallocation wipe disks: %s", result["prealloc_wipe_disks"])
-  ToStdout("  - OS search path: %s", utils.CommaJoin(constants.OS_SEARCH_PATH))
+  ToStdout("  - OS search path: %s", utils.CommaJoin(pathutils.OS_SEARCH_PATH))
+  ToStdout("  - ExtStorage Providers search path: %s",
+           utils.CommaJoin(pathutils.ES_SEARCH_PATH))
+  ToStdout("  - enabled storage types: %s",
+           utils.CommaJoin(result["enabled_storage_types"]))
 
   ToStdout("Default node parameters:")
   _PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
@@ -462,6 +473,9 @@ def ShowClusterConfig(opts, args):
   ToStdout("Default nic parameters:")
   _PrintGroupedParams(result["nicparams"], roman=opts.roman_integers)
 
+  ToStdout("Default disk parameters:")
+  _PrintGroupedParams(result["diskparams"], roman=opts.roman_integers)
+
   ToStdout("Instance policy - limits for instances:")
   for key in constants.IPOLICY_ISPECS:
     ToStdout("  - %s", key)
@@ -498,7 +512,7 @@ def ClusterCopyFile(opts, args):
                            secondary_ips=opts.use_replication_network,
                            nodegroup=opts.nodegroup)
 
-  srun = ssh.SshRunner(cluster_name=cluster_name)
+  srun = ssh.SshRunner(cluster_name)
   for node in results:
     if not srun.CopyFileToNode(node, filename):
       ToStderr("Copy of file %s to node %s failed", filename, node)
@@ -533,7 +547,12 @@ def RunClusterCommand(opts, args):
     nodes.append(master_node)
 
   for name in nodes:
-    result = srun.Run(name, "root", command)
+    result = srun.Run(name, constants.SSH_LOGIN_USER, command)
+
+    if opts.failure_only and result.exit_code == constants.EXIT_SUCCESS:
+      # Do not output anything for successful commands
+      continue
+
     ToStdout("------------------------------------------------")
     if opts.show_machine_names:
       for line in result.output.splitlines():
@@ -666,6 +685,8 @@ def VerifyDisks(opts, args):
       ToStdout("You need to replace or recreate disks for all the above"
                " instances if this message persists after fixing broken nodes.")
       retcode = constants.EXIT_FAILURE
+    elif not instances:
+      ToStdout("No disks need to be activated.")
 
   return retcode
 
@@ -699,7 +720,7 @@ def MasterFailover(opts, args):
   @return: the desired exit code
 
   """
-  if opts.no_voting:
+  if opts.no_voting and not opts.yes_do_it:
     usertext = ("This will perform the failover even if most other nodes"
                 " are down, or if this node is outdated. This is dangerous"
                 " as it can lead to a non-consistent cluster. Check the"
@@ -783,7 +804,7 @@ def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
   return pem
 
 
-def _RenewCrypto(new_cluster_cert, new_rapi_cert, #pylint: disable=R0911
+def _RenewCrypto(new_cluster_cert, new_rapi_cert, # pylint: disable=R0911
                  rapi_cert_filename, new_spice_cert, spice_cert_filename,
                  spice_cacert_filename, new_confd_hmac_key, new_cds,
                  cds_filename, force):
@@ -875,20 +896,20 @@ def _RenewCrypto(new_cluster_cert, new_rapi_cert, #pylint: disable=R0911
     files_to_copy = []
 
     if new_cluster_cert:
-      files_to_copy.append(constants.NODED_CERT_FILE)
+      files_to_copy.append(pathutils.NODED_CERT_FILE)
 
     if new_rapi_cert or rapi_cert_pem:
-      files_to_copy.append(constants.RAPI_CERT_FILE)
+      files_to_copy.append(pathutils.RAPI_CERT_FILE)
 
     if new_spice_cert or spice_cert_pem:
-      files_to_copy.append(constants.SPICE_CERT_FILE)
-      files_to_copy.append(constants.SPICE_CACERT_FILE)
+      files_to_copy.append(pathutils.SPICE_CERT_FILE)
+      files_to_copy.append(pathutils.SPICE_CACERT_FILE)
 
     if new_confd_hmac_key:
-      files_to_copy.append(constants.CONFD_HMAC_KEY)
+      files_to_copy.append(pathutils.CONFD_HMAC_KEY)
 
     if new_cds or cds:
-      files_to_copy.append(constants.CLUSTER_DOMAIN_SECRET_FILE)
+      files_to_copy.append(pathutils.CLUSTER_DOMAIN_SECRET_FILE)
 
     if files_to_copy:
       for node_name in ctx.nonmaster_nodes:
@@ -948,6 +969,7 @@ def SetClusterParams(opts, args):
           opts.use_external_mip_script is not None or
           opts.prealloc_wipe_disks is not None or
           opts.hv_state or
+          opts.enabled_storage_types or
           opts.disk_state or
           opts.ispecs_mem_size or
           opts.ispecs_cpu_count or
@@ -955,7 +977,8 @@ def SetClusterParams(opts, args):
           opts.ispecs_disk_size or
           opts.ispecs_nic_count or
           opts.ipolicy_disk_templates is not None or
-          opts.ipolicy_vcpu_ratio is not None):
+          opts.ipolicy_vcpu_ratio is not None or
+          opts.ipolicy_spindle_ratio is not None):
     ToStderr("Please give at least one of the parameters.")
     return 1
 
@@ -979,6 +1002,10 @@ def SetClusterParams(opts, args):
   if hvlist is not None:
     hvlist = hvlist.split(",")
 
+  enabled_storage_types = opts.enabled_storage_types
+  if enabled_storage_types is not None:
+    enabled_storage_types = enabled_storage_types.split(",")
+
   # a list of (name, dict) we can pass directly to dict() (or [])
   hvparams = dict(opts.hvparams)
   for hv_params in hvparams.values():
@@ -999,7 +1026,7 @@ def SetClusterParams(opts, args):
   if ndparams is not None:
     utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
 
-  ipolicy = objects.CreateIPolicyFromOpts(
+  ipolicy = CreateIPolicyFromOpts(
     ispecs_mem_size=opts.ispecs_mem_size,
     ispecs_cpu_count=opts.ispecs_cpu_count,
     ispecs_disk_count=opts.ispecs_disk_count,
@@ -1007,6 +1034,7 @@ def SetClusterParams(opts, args):
     ispecs_nic_count=opts.ispecs_nic_count,
     ipolicy_disk_templates=opts.ipolicy_disk_templates,
     ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
+    ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
     )
 
   mnh = opts.maintain_node_health
@@ -1045,31 +1073,33 @@ def SetClusterParams(opts, args):
 
   hv_state = dict(opts.hv_state)
 
-  op = opcodes.OpClusterSetParams(vg_name=vg_name,
-                                  drbd_helper=drbd_helper,
-                                  enabled_hypervisors=hvlist,
-                                  hvparams=hvparams,
-                                  os_hvp=None,
-                                  beparams=beparams,
-                                  nicparams=nicparams,
-                                  ndparams=ndparams,
-                                  diskparams=diskparams,
-                                  ipolicy=ipolicy,
-                                  candidate_pool_size=opts.candidate_pool_size,
-                                  maintain_node_health=mnh,
-                                  uid_pool=uid_pool,
-                                  add_uids=add_uids,
-                                  remove_uids=remove_uids,
-                                  default_iallocator=opts.default_iallocator,
-                                  prealloc_wipe_disks=opts.prealloc_wipe_disks,
-                                  master_netdev=opts.master_netdev,
-                                  master_netmask=opts.master_netmask,
-                                  reserved_lvs=opts.reserved_lvs,
-                                  use_external_mip_script=ext_ip_script,
-                                  hv_state=hv_state,
-                                  disk_state=disk_state,
-                                  )
-  SubmitOpCode(op, opts=opts)
+  op = opcodes.OpClusterSetParams(
+    vg_name=vg_name,
+    drbd_helper=drbd_helper,
+    enabled_hypervisors=hvlist,
+    hvparams=hvparams,
+    os_hvp=None,
+    beparams=beparams,
+    nicparams=nicparams,
+    ndparams=ndparams,
+    diskparams=diskparams,
+    ipolicy=ipolicy,
+    candidate_pool_size=opts.candidate_pool_size,
+    maintain_node_health=mnh,
+    uid_pool=uid_pool,
+    add_uids=add_uids,
+    remove_uids=remove_uids,
+    default_iallocator=opts.default_iallocator,
+    prealloc_wipe_disks=opts.prealloc_wipe_disks,
+    master_netdev=opts.master_netdev,
+    master_netmask=opts.master_netmask,
+    reserved_lvs=opts.reserved_lvs,
+    use_external_mip_script=ext_ip_script,
+    hv_state=hv_state,
+    disk_state=disk_state,
+    enabled_storage_types=enabled_storage_types,
+    )
+  SubmitOrSend(op, opts)
   return 0
 
 
@@ -1390,7 +1420,9 @@ def _EpoOff(opts, node_list, inst_map):
     return constants.EXIT_FAILURE
 
 
-def Epo(opts, args):
+def Epo(opts, args, cl=None, _on_fn=_EpoOn, _off_fn=_EpoOff,
+        _confirm_fn=ConfirmOperation,
+        _stdout_fn=ToStdout, _stderr_fn=ToStderr):
   """EPO operations.
 
   @param opts: the command line options selected by the user
@@ -1401,32 +1433,29 @@ def Epo(opts, args):
 
   """
   if opts.groups and opts.show_all:
-    ToStderr("Only one of --groups or --all are allowed")
+    _stderr_fn("Only one of --groups or --all are allowed")
     return constants.EXIT_FAILURE
   elif args and opts.show_all:
-    ToStderr("Arguments in combination with --all are not allowed")
+    _stderr_fn("Arguments in combination with --all are not allowed")
     return constants.EXIT_FAILURE
 
-  client = GetClient()
+  if cl is None:
+    cl = GetClient()
 
   if opts.groups:
-    node_query_list = itertools.chain(*client.QueryGroups(names=args,
-                                                          fields=["node_list"],
-                                                          use_locking=False))
+    node_query_list = \
+      itertools.chain(*cl.QueryGroups(args, ["node_list"], False))
   else:
     node_query_list = args
 
-  result = client.QueryNodes(names=node_query_list,
-                             fields=["name", "master", "pinst_list",
-                                     "sinst_list", "powered", "offline"],
-                             use_locking=False)
+  result = cl.QueryNodes(node_query_list, ["name", "master", "pinst_list",
+                                           "sinst_list", "powered", "offline"],
+                         False)
+
+  all_nodes = map(compat.fst, result)
   node_list = []
   inst_map = {}
-  for (idx, (node, master, pinsts, sinsts, powered,
-             offline)) in enumerate(result):
-    # Normalize the node_query_list as well
-    if not opts.show_all:
-      node_query_list[idx] = node
+  for (node, master, pinsts, sinsts, powered, offline) in result:
     if not offline:
       for inst in (pinsts + sinsts):
         if inst in inst_map:
@@ -1442,25 +1471,26 @@ def Epo(opts, args):
       # already operating on the master at this point :)
       continue
     elif master and not opts.show_all:
-      ToStderr("%s is the master node, please do a master-failover to another"
-               " node not affected by the EPO or use --all if you intend to"
-               " shutdown the whole cluster", node)
+      _stderr_fn("%s is the master node, please do a master-failover to another"
+                 " node not affected by the EPO or use --all if you intend to"
+                 " shutdown the whole cluster", node)
       return constants.EXIT_FAILURE
     elif powered is None:
-      ToStdout("Node %s does not support out-of-band handling, it can not be"
-               " handled in a fully automated manner", node)
+      _stdout_fn("Node %s does not support out-of-band handling, it can not be"
+                 " handled in a fully automated manner", node)
     elif powered == opts.on:
-      ToStdout("Node %s is already in desired power state, skipping", node)
+      _stdout_fn("Node %s is already in desired power state, skipping", node)
     elif not offline or (offline and powered):
       node_list.append(node)
 
-  if not opts.force and not ConfirmOperation(node_query_list, "nodes", "epo"):
+  if not (opts.force or _confirm_fn(all_nodes, "nodes", "epo")):
     return constants.EXIT_FAILURE
 
   if opts.on:
-    return _EpoOn(opts, node_query_list, node_list, inst_map)
+    return _on_fn(opts, all_nodes, node_list, inst_map)
   else:
-    return _EpoOff(opts, node_list, inst_map)
+    return _off_fn(opts, node_list, inst_map)
+
 
 commands = {
   "init": (
@@ -1472,7 +1502,8 @@ commands = {
      MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
      DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
      NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT,
-     DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT] + INSTANCE_POLICY_OPTS,
+     DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT, ENABLED_STORAGE_TYPES_OPT]
+     + INSTANCE_POLICY_OPTS,
     "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
   "destroy": (
     DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
@@ -1498,7 +1529,7 @@ commands = {
     RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
     "[instance...]", "Updates mismatches in recorded disk sizes"),
   "master-failover": (
-    MasterFailover, ARGS_NONE, [NOVOTING_OPT],
+    MasterFailover, ARGS_NONE, [NOVOTING_OPT, FORCE_FAILOVER],
     "", "Makes the current node the master"),
   "master-ping": (
     MasterPing, ARGS_NONE, [],
@@ -1515,7 +1546,7 @@ commands = {
     "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
   "command": (
     RunClusterCommand, [ArgCommand(min=1)],
-    [NODE_LIST_OPT, NODEGROUP_OPT, SHOW_MACHINE_OPT],
+    [NODE_LIST_OPT, NODEGROUP_OPT, SHOW_MACHINE_OPT, FAILURE_ONLY_OPT],
     "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
   "info": (
     ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
@@ -1550,7 +1581,7 @@ commands = {
      DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
      RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
      NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT,
-     DISK_STATE_OPT] +
+     DISK_STATE_OPT, SUBMIT_OPT, ENABLED_STORAGE_TYPES_OPT] +
     INSTANCE_POLICY_OPTS,
     "[opts...]",
     "Alters the parameters of the cluster"),