#
#
-# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
from ganeti import uidpool
from ganeti import compat
from ganeti import netutils
+from ganeti import pathutils
ON_OPT = cli_option("--on", default=False,
help="Recover from an EPO")
GROUPS_OPT = cli_option("--groups", default=False,
- action="store_true", dest="groups",
- help="Arguments are node groups instead of nodes")
+ action="store_true", dest="groups",
+ help="Arguments are node groups instead of nodes")
+
+FORCE_FAILOVER = cli_option("--yes-do-it", dest="yes_do_it",
+ help="Override interactive check for --no-voting",
+ default=False, action="store_true")
_EPO_PING_INTERVAL = 30 # 30 seconds between pings
_EPO_PING_TIMEOUT = 1 # 1 second
beparams = opts.beparams
nicparams = opts.nicparams
+ diskparams = dict(opts.diskparams)
+
+ # check the disk template types here, as we cannot rely on the type check done
+ # by the opcode parameter types
+ diskparams_keys = set(diskparams.keys())
+ if not (diskparams_keys <= constants.DISK_TEMPLATES):
+ unknown = utils.NiceSort(diskparams_keys - constants.DISK_TEMPLATES)
+ ToStderr("Disk templates unknown: %s" % utils.CommaJoin(unknown))
+ return 1
+
# prepare beparams dict
beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
- utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
+ utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
# prepare nicparams dict
nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
+ # prepare diskparams dict
+ for templ in constants.DISK_TEMPLATES:
+ if templ not in diskparams:
+ diskparams[templ] = {}
+ diskparams[templ] = objects.FillDict(constants.DISK_DT_DEFAULTS[templ],
+ diskparams[templ])
+ utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
+
+ # prepare ipolicy dict
+ ipolicy = CreateIPolicyFromOpts(
+ ispecs_mem_size=opts.ispecs_mem_size,
+ ispecs_cpu_count=opts.ispecs_cpu_count,
+ ispecs_disk_count=opts.ispecs_disk_count,
+ ispecs_disk_size=opts.ispecs_disk_size,
+ ispecs_nic_count=opts.ispecs_nic_count,
+ ipolicy_disk_templates=opts.ipolicy_disk_templates,
+ ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
+ ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
+ fill_all=True)
+
if opts.candidate_pool_size is None:
opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
if opts.prealloc_wipe_disks is None:
opts.prealloc_wipe_disks = False
+ external_ip_setup_script = opts.use_external_mip_script
+ if external_ip_setup_script is None:
+ external_ip_setup_script = False
+
try:
primary_ip_version = int(opts.primary_ip_version)
except (ValueError, TypeError), err:
ToStderr("Invalid primary ip version value: %s" % str(err))
return 1
+ master_netmask = opts.master_netmask
+ try:
+ if master_netmask is not None:
+ master_netmask = int(master_netmask)
+ except (ValueError, TypeError), err:
+ ToStderr("Invalid master netmask value: %s" % str(err))
+ return 1
+
+ if opts.disk_state:
+ disk_state = utils.FlatToDict(opts.disk_state)
+ else:
+ disk_state = {}
+
+ hv_state = dict(opts.hv_state)
+
+ enabled_storage_types = opts.enabled_storage_types
+ if enabled_storage_types is not None:
+ enabled_storage_types = enabled_storage_types.split(",")
+ else:
+ enabled_storage_types = list(constants.DEFAULT_ENABLED_STORAGE_TYPES)
+
bootstrap.InitCluster(cluster_name=args[0],
secondary_ip=opts.secondary_ip,
vg_name=vg_name,
mac_prefix=opts.mac_prefix,
+ master_netmask=master_netmask,
master_netdev=master_netdev,
file_storage_dir=opts.file_storage_dir,
shared_file_storage_dir=opts.shared_file_storage_dir,
beparams=beparams,
nicparams=nicparams,
ndparams=ndparams,
+ diskparams=diskparams,
+ ipolicy=ipolicy,
candidate_pool_size=opts.candidate_pool_size,
modify_etc_hosts=opts.modify_etc_hosts,
modify_ssh_setup=opts.modify_ssh_setup,
default_iallocator=opts.default_iallocator,
primary_ip_version=primary_ip_version,
prealloc_wipe_disks=opts.prealloc_wipe_disks,
+ use_external_mip_script=external_ip_setup_script,
+ hv_state=hv_state,
+ disk_state=disk_state,
+ enabled_storage_types=enabled_storage_types,
)
op = opcodes.OpClusterPostInit()
SubmitOpCode(op, opts=opts)
return 0
+def ActivateMasterIp(opts, args):
+ """Activates the master IP.
+
+ """
+ op = opcodes.OpClusterActivateMasterIp()
+ SubmitOpCode(op)
+ return 0
+
+
+def DeactivateMasterIp(opts, args):
+ """Deactivates the master IP.
+
+ """
+ if not opts.confirm:
+ usertext = ("This will disable the master IP. All the open connections to"
+ " the master IP will be closed. To reach the master you will"
+ " need to use its node IP."
+ " Continue?")
+ if not AskUser(usertext):
+ return 1
+
+ op = opcodes.OpClusterDeactivateMasterIp()
+ SubmitOpCode(op)
+ return 0
+
+
def RedistributeConfig(opts, args):
"""Forces push of the cluster configuration.
@return: the desired exit code
"""
- cl = GetClient()
+ cl = GetClient(query=True)
result = cl.QueryClusterInfo()
ToStdout("Software version: %s", result["software_version"])
ToStdout("Internode protocol: %s", result["protocol_version"])
return 0
-def _PrintGroupedParams(paramsdict, level=1, roman=False):
- """Print Grouped parameters (be, nic, disk) by group.
+def _FormatGroupedParams(paramsdict, roman=False):
+ """Format Grouped parameters (be, nic, disk) by group.
@type paramsdict: dict of dicts
@param paramsdict: {group: {param: value, ...}, ...}
- @type level: int
- @param level: Level of indention
+ @rtype: dict of dicts
+ @return: copy of the input dictionaries with strings as values
"""
- indent = " " * level
- for item, val in sorted(paramsdict.items()):
+ ret = {}
+ for (item, val) in paramsdict.items():
if isinstance(val, dict):
- ToStdout("%s- %s:", indent, item)
- _PrintGroupedParams(val, level=level + 1, roman=roman)
+ ret[item] = _FormatGroupedParams(val, roman=roman)
elif roman and isinstance(val, int):
- ToStdout("%s %s: %s", indent, item, compat.TryToRoman(val))
+ ret[item] = compat.TryToRoman(val)
else:
- ToStdout("%s %s: %s", indent, item, val)
+ ret[item] = str(val)
+ return ret
def ShowClusterConfig(opts, args):
@return: the desired exit code
"""
- cl = GetClient()
+ cl = GetClient(query=True)
result = cl.QueryClusterInfo()
- ToStdout("Cluster name: %s", result["name"])
- ToStdout("Cluster UUID: %s", result["uuid"])
-
- ToStdout("Creation time: %s", utils.FormatTime(result["ctime"]))
- ToStdout("Modification time: %s", utils.FormatTime(result["mtime"]))
-
- ToStdout("Master node: %s", result["master"])
-
- ToStdout("Architecture (this node): %s (%s)",
- result["architecture"][0], result["architecture"][1])
-
if result["tags"]:
tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
else:
tags = "(none)"
-
- ToStdout("Tags: %s", tags)
-
- ToStdout("Default hypervisor: %s", result["default_hypervisor"])
- ToStdout("Enabled hypervisors: %s",
- utils.CommaJoin(result["enabled_hypervisors"]))
-
- ToStdout("Hypervisor parameters:")
- _PrintGroupedParams(result["hvparams"])
-
- ToStdout("OS-specific hypervisor parameters:")
- _PrintGroupedParams(result["os_hvp"])
-
- ToStdout("OS parameters:")
- _PrintGroupedParams(result["osparams"])
-
- ToStdout("Hidden OSes: %s", utils.CommaJoin(result["hidden_os"]))
- ToStdout("Blacklisted OSes: %s", utils.CommaJoin(result["blacklisted_os"]))
-
- ToStdout("Cluster parameters:")
- ToStdout(" - candidate pool size: %s",
- compat.TryToRoman(result["candidate_pool_size"],
- convert=opts.roman_integers))
- ToStdout(" - master netdev: %s", result["master_netdev"])
- ToStdout(" - lvm volume group: %s", result["volume_group_name"])
if result["reserved_lvs"]:
reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
else:
reserved_lvs = "(none)"
- ToStdout(" - lvm reserved volumes: %s", reserved_lvs)
- ToStdout(" - drbd usermode helper: %s", result["drbd_usermode_helper"])
- ToStdout(" - file storage path: %s", result["file_storage_dir"])
- ToStdout(" - shared file storage path: %s",
- result["shared_file_storage_dir"])
- ToStdout(" - maintenance of node health: %s",
- result["maintain_node_health"])
- ToStdout(" - uid pool: %s",
- uidpool.FormatUidPool(result["uid_pool"],
- roman=opts.roman_integers))
- ToStdout(" - default instance allocator: %s", result["default_iallocator"])
- ToStdout(" - primary ip version: %d", result["primary_ip_version"])
- ToStdout(" - preallocation wipe disks: %s", result["prealloc_wipe_disks"])
- ToStdout(" - OS search path: %s", utils.CommaJoin(constants.OS_SEARCH_PATH))
-
- ToStdout("Default node parameters:")
- _PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
-
- ToStdout("Default instance parameters:")
- _PrintGroupedParams(result["beparams"], roman=opts.roman_integers)
-
- ToStdout("Default nic parameters:")
- _PrintGroupedParams(result["nicparams"], roman=opts.roman_integers)
+ info = [
+ ("Cluster name", result["name"]),
+ ("Cluster UUID", result["uuid"]),
+
+ ("Creation time", utils.FormatTime(result["ctime"])),
+ ("Modification time", utils.FormatTime(result["mtime"])),
+
+ ("Master node", result["master"]),
+
+ ("Architecture (this node)",
+ "%s (%s)" % (result["architecture"][0], result["architecture"][1])),
+
+ ("Tags", tags),
+
+ ("Default hypervisor", result["default_hypervisor"]),
+ ("Enabled hypervisors",
+ utils.CommaJoin(result["enabled_hypervisors"])),
+
+ ("Hypervisor parameters", _FormatGroupedParams(result["hvparams"])),
+
+ ("OS-specific hypervisor parameters",
+ _FormatGroupedParams(result["os_hvp"])),
+
+ ("OS parameters", _FormatGroupedParams(result["osparams"])),
+
+ ("Hidden OSes", utils.CommaJoin(result["hidden_os"])),
+ ("Blacklisted OSes", utils.CommaJoin(result["blacklisted_os"])),
+
+ ("Cluster parameters", [
+ ("candidate pool size",
+ compat.TryToRoman(result["candidate_pool_size"],
+ convert=opts.roman_integers)),
+ ("master netdev", result["master_netdev"]),
+ ("master netmask", result["master_netmask"]),
+ ("use external master IP address setup script",
+ result["use_external_mip_script"]),
+ ("lvm volume group", result["volume_group_name"]),
+ ("lvm reserved volumes", reserved_lvs),
+ ("drbd usermode helper", result["drbd_usermode_helper"]),
+ ("file storage path", result["file_storage_dir"]),
+ ("shared file storage path", result["shared_file_storage_dir"]),
+ ("maintenance of node health", result["maintain_node_health"]),
+ ("uid pool", uidpool.FormatUidPool(result["uid_pool"])),
+ ("default instance allocator", result["default_iallocator"]),
+ ("primary ip version", result["primary_ip_version"]),
+ ("preallocation wipe disks", result["prealloc_wipe_disks"]),
+ ("OS search path", utils.CommaJoin(pathutils.OS_SEARCH_PATH)),
+ ("ExtStorage Providers search path",
+ utils.CommaJoin(pathutils.ES_SEARCH_PATH)),
+ ("enabled storage types",
+ utils.CommaJoin(result["enabled_storage_types"])),
+ ]),
+
+ ("Default node parameters",
+ _FormatGroupedParams(result["ndparams"], roman=opts.roman_integers)),
+
+ ("Default instance parameters",
+ _FormatGroupedParams(result["beparams"], roman=opts.roman_integers)),
+
+ ("Default nic parameters",
+ _FormatGroupedParams(result["nicparams"], roman=opts.roman_integers)),
+
+ ("Default disk parameters",
+ _FormatGroupedParams(result["diskparams"], roman=opts.roman_integers)),
+
+ ("Instance policy - limits for instances",
+ [
+ (key,
+ _FormatGroupedParams(result["ipolicy"][constants.ISPECS_MINMAX][key],
+ roman=opts.roman_integers))
+ for key in constants.ISPECS_MINMAX_KEYS
+ ] +
+ [
+ (constants.ISPECS_STD,
+ _FormatGroupedParams(result["ipolicy"][constants.ISPECS_STD],
+ roman=opts.roman_integers)),
+ ("enabled disk templates",
+ utils.CommaJoin(result["ipolicy"][constants.IPOLICY_DTS])),
+ ] +
+ [
+ (key, result["ipolicy"][key])
+ for key in constants.IPOLICY_PARAMETERS
+ ]),
+ ]
+
+ PrintGenericInfo(info)
return 0
secondary_ips=opts.use_replication_network,
nodegroup=opts.nodegroup)
- srun = ssh.SshRunner(cluster_name=cluster_name)
+ srun = ssh.SshRunner(cluster_name)
for node in results:
if not srun.CopyFileToNode(node, filename):
ToStderr("Copy of file %s to node %s failed", filename, node)
nodes.append(master_node)
for name in nodes:
- result = srun.Run(name, "root", command)
+ result = srun.Run(name, constants.SSH_LOGIN_USER, command)
+
+ if opts.failure_only and result.exit_code == constants.EXIT_SUCCESS:
+ # Do not output anything for successful commands
+ continue
+
ToStdout("------------------------------------------------")
- ToStdout("node: %s", name)
- ToStdout("%s", result.output)
+ if opts.show_machine_names:
+ for line in result.output.splitlines():
+ ToStdout("%s: %s", name, line)
+ else:
+ ToStdout("node: %s", name)
+ ToStdout("%s", result.output)
ToStdout("return code = %s", result.exit_code)
return 0
error_codes=opts.error_codes,
debug_simulate_errors=opts.simulate_errors,
skip_checks=skip_checks,
+ ignore_errors=opts.ignore_errors,
group_name=opts.nodegroup)
result = SubmitOpCode(op, cl=cl, opts=opts)
ToStdout("You need to replace or recreate disks for all the above"
" instances if this message persists after fixing broken nodes.")
retcode = constants.EXIT_FAILURE
+ elif not instances:
+ ToStdout("No disks need to be activated.")
return retcode
@return: the desired exit code
"""
- if opts.no_voting:
+ if opts.no_voting and not opts.yes_do_it:
usertext = ("This will perform the failover even if most other nodes"
" are down, or if this node is outdated. This is dangerous"
" as it can lead to a non-consistent cluster. Check the"
ToStdout("%s %s", path, tag)
-def _RenewCrypto(new_cluster_cert, new_rapi_cert, rapi_cert_filename,
- new_confd_hmac_key, new_cds, cds_filename,
- force):
+def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
+ """Reads and verifies an X509 certificate.
+
+ @type cert_filename: string
+ @param cert_filename: the path of the file containing the certificate to
+ verify encoded in PEM format
+ @type verify_private_key: bool
+ @param verify_private_key: whether to verify the private key in addition to
+ the public certificate
+ @rtype: string
+ @return: a string containing the PEM-encoded certificate.
+
+ """
+ try:
+ pem = utils.ReadFile(cert_filename)
+ except IOError, err:
+ raise errors.X509CertError(cert_filename,
+ "Unable to read certificate: %s" % str(err))
+
+ try:
+ OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
+ except Exception, err:
+ raise errors.X509CertError(cert_filename,
+ "Unable to load certificate: %s" % str(err))
+
+ if verify_private_key:
+ try:
+ OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem)
+ except Exception, err:
+ raise errors.X509CertError(cert_filename,
+ "Unable to load private key: %s" % str(err))
+
+ return pem
+
+
+def _RenewCrypto(new_cluster_cert, new_rapi_cert, # pylint: disable=R0911
+ rapi_cert_filename, new_spice_cert, spice_cert_filename,
+ spice_cacert_filename, new_confd_hmac_key, new_cds,
+ cds_filename, force):
"""Renews cluster certificates, keys and secrets.
@type new_cluster_cert: bool
@param new_rapi_cert: Whether to generate a new RAPI certificate
@type rapi_cert_filename: string
@param rapi_cert_filename: Path to file containing new RAPI certificate
+ @type new_spice_cert: bool
+ @param new_spice_cert: Whether to generate a new SPICE certificate
+ @type spice_cert_filename: string
+ @param spice_cert_filename: Path to file containing new SPICE certificate
+ @type spice_cacert_filename: string
+ @param spice_cacert_filename: Path to file containing the certificate of the
+ CA that signed the SPICE certificate
@type new_confd_hmac_key: bool
@param new_confd_hmac_key: Whether to generate a new HMAC key
@type new_cds: bool
"""
if new_rapi_cert and rapi_cert_filename:
- ToStderr("Only one of the --new-rapi-certficate and --rapi-certificate"
+ ToStderr("Only one of the --new-rapi-certificate and --rapi-certificate"
" options can be specified at the same time.")
return 1
" the same time.")
return 1
- if rapi_cert_filename:
- # Read and verify new certificate
- try:
- rapi_cert_pem = utils.ReadFile(rapi_cert_filename)
-
- OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
- rapi_cert_pem)
- except Exception, err: # pylint: disable=W0703
- ToStderr("Can't load new RAPI certificate from %s: %s" %
- (rapi_cert_filename, str(err)))
- return 1
+ if new_spice_cert and (spice_cert_filename or spice_cacert_filename):
+ ToStderr("When using --new-spice-certificate, the --spice-certificate"
+ " and --spice-ca-certificate must not be used.")
+ return 1
- try:
- OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, rapi_cert_pem)
- except Exception, err: # pylint: disable=W0703
- ToStderr("Can't load new RAPI private key from %s: %s" %
- (rapi_cert_filename, str(err)))
- return 1
+ if bool(spice_cacert_filename) ^ bool(spice_cert_filename):
+ ToStderr("Both --spice-certificate and --spice-ca-certificate must be"
+ " specified.")
+ return 1
- else:
- rapi_cert_pem = None
+ rapi_cert_pem, spice_cert_pem, spice_cacert_pem = (None, None, None)
+ try:
+ if rapi_cert_filename:
+ rapi_cert_pem = _ReadAndVerifyCert(rapi_cert_filename, True)
+ if spice_cert_filename:
+ spice_cert_pem = _ReadAndVerifyCert(spice_cert_filename, True)
+ spice_cacert_pem = _ReadAndVerifyCert(spice_cacert_filename)
+ except errors.X509CertError, err:
+ ToStderr("Unable to load X509 certificate from %s: %s", err[0], err[1])
+ return 1
if cds_filename:
try:
def _RenewCryptoInner(ctx):
ctx.feedback_fn("Updating certificates and keys")
- bootstrap.GenerateClusterCrypto(new_cluster_cert, new_rapi_cert,
+ bootstrap.GenerateClusterCrypto(new_cluster_cert,
+ new_rapi_cert,
+ new_spice_cert,
new_confd_hmac_key,
new_cds,
rapi_cert_pem=rapi_cert_pem,
+ spice_cert_pem=spice_cert_pem,
+ spice_cacert_pem=spice_cacert_pem,
cds=cds)
files_to_copy = []
if new_cluster_cert:
- files_to_copy.append(constants.NODED_CERT_FILE)
+ files_to_copy.append(pathutils.NODED_CERT_FILE)
if new_rapi_cert or rapi_cert_pem:
- files_to_copy.append(constants.RAPI_CERT_FILE)
+ files_to_copy.append(pathutils.RAPI_CERT_FILE)
+
+ if new_spice_cert or spice_cert_pem:
+ files_to_copy.append(pathutils.SPICE_CERT_FILE)
+ files_to_copy.append(pathutils.SPICE_CACERT_FILE)
if new_confd_hmac_key:
- files_to_copy.append(constants.CONFD_HMAC_KEY)
+ files_to_copy.append(pathutils.CONFD_HMAC_KEY)
if new_cds or cds:
- files_to_copy.append(constants.CLUSTER_DOMAIN_SECRET_FILE)
+ files_to_copy.append(pathutils.CLUSTER_DOMAIN_SECRET_FILE)
if files_to_copy:
for node_name in ctx.nonmaster_nodes:
return _RenewCrypto(opts.new_cluster_cert,
opts.new_rapi_cert,
opts.rapi_cert,
+ opts.new_spice_cert,
+ opts.spice_cert,
+ opts.spice_cacert,
opts.new_confd_hmac_key,
opts.new_cluster_domain_secret,
opts.cluster_domain_secret,
if not (not opts.lvm_storage or opts.vg_name or
not opts.drbd_storage or opts.drbd_helper or
opts.enabled_hypervisors or opts.hvparams or
- opts.beparams or opts.nicparams or opts.ndparams or
+ opts.beparams or opts.nicparams or
+ opts.ndparams or opts.diskparams or
opts.candidate_pool_size is not None or
opts.uid_pool is not None or
opts.maintain_node_health is not None or
opts.default_iallocator is not None or
opts.reserved_lvs is not None or
opts.master_netdev is not None or
- opts.prealloc_wipe_disks is not None):
+ opts.master_netmask is not None or
+ opts.use_external_mip_script is not None or
+ opts.prealloc_wipe_disks is not None or
+ opts.hv_state or
+ opts.enabled_storage_types or
+ opts.disk_state or
+ opts.ispecs_mem_size or
+ opts.ispecs_cpu_count or
+ opts.ispecs_disk_count or
+ opts.ispecs_disk_size or
+ opts.ispecs_nic_count or
+ opts.ipolicy_disk_templates is not None or
+ opts.ipolicy_vcpu_ratio is not None or
+ opts.ipolicy_spindle_ratio is not None):
ToStderr("Please give at least one of the parameters.")
return 1
if hvlist is not None:
hvlist = hvlist.split(",")
+ enabled_storage_types = opts.enabled_storage_types
+ if enabled_storage_types is not None:
+ enabled_storage_types = enabled_storage_types.split(",")
+
# a list of (name, dict) we can pass directly to dict() (or [])
hvparams = dict(opts.hvparams)
for hv_params in hvparams.values():
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
+ diskparams = dict(opts.diskparams)
+
+ for dt_params in diskparams.values():
+ utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+
beparams = opts.beparams
- utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
+ utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
nicparams = opts.nicparams
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
if ndparams is not None:
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
+ ipolicy = CreateIPolicyFromOpts(
+ ispecs_mem_size=opts.ispecs_mem_size,
+ ispecs_cpu_count=opts.ispecs_cpu_count,
+ ispecs_disk_count=opts.ispecs_disk_count,
+ ispecs_disk_size=opts.ispecs_disk_size,
+ ispecs_nic_count=opts.ispecs_nic_count,
+ ipolicy_disk_templates=opts.ipolicy_disk_templates,
+ ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
+ ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
+ )
+
mnh = opts.maintain_node_health
uid_pool = opts.uid_pool
else:
opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
- op = opcodes.OpClusterSetParams(vg_name=vg_name,
- drbd_helper=drbd_helper,
- enabled_hypervisors=hvlist,
- hvparams=hvparams,
- os_hvp=None,
- beparams=beparams,
- nicparams=nicparams,
- ndparams=ndparams,
- candidate_pool_size=opts.candidate_pool_size,
- maintain_node_health=mnh,
- uid_pool=uid_pool,
- add_uids=add_uids,
- remove_uids=remove_uids,
- default_iallocator=opts.default_iallocator,
- prealloc_wipe_disks=opts.prealloc_wipe_disks,
- master_netdev=opts.master_netdev,
- reserved_lvs=opts.reserved_lvs)
- SubmitOpCode(op, opts=opts)
+ if opts.master_netmask is not None:
+ try:
+ opts.master_netmask = int(opts.master_netmask)
+ except ValueError:
+ ToStderr("The --master-netmask option expects an int parameter.")
+ return 1
+
+ ext_ip_script = opts.use_external_mip_script
+
+ if opts.disk_state:
+ disk_state = utils.FlatToDict(opts.disk_state)
+ else:
+ disk_state = {}
+
+ hv_state = dict(opts.hv_state)
+
+ op = opcodes.OpClusterSetParams(
+ vg_name=vg_name,
+ drbd_helper=drbd_helper,
+ enabled_hypervisors=hvlist,
+ hvparams=hvparams,
+ os_hvp=None,
+ beparams=beparams,
+ nicparams=nicparams,
+ ndparams=ndparams,
+ diskparams=diskparams,
+ ipolicy=ipolicy,
+ candidate_pool_size=opts.candidate_pool_size,
+ maintain_node_health=mnh,
+ uid_pool=uid_pool,
+ add_uids=add_uids,
+ remove_uids=remove_uids,
+ default_iallocator=opts.default_iallocator,
+ prealloc_wipe_disks=opts.prealloc_wipe_disks,
+ master_netdev=opts.master_netdev,
+ master_netmask=opts.master_netmask,
+ reserved_lvs=opts.reserved_lvs,
+ use_external_mip_script=ext_ip_script,
+ hv_state=hv_state,
+ disk_state=disk_state,
+ enabled_storage_types=enabled_storage_types,
+ )
+ SubmitOrSend(op, opts)
return 0
return True
-def _InstanceStart(opts, inst_list, start):
+def _InstanceStart(opts, inst_list, start, no_remember=False):
"""Puts the instances in the list to desired state.
@param opts: The command line options selected by the user
@param inst_list: The list of instances to operate on
@param start: True if they should be started, False for shutdown
+ @param no_remember: If the instance state should be remembered
@return: The success of the operation (none failed)
"""
text_submit, text_success, text_failed = ("startup", "started", "starting")
else:
opcls = compat.partial(opcodes.OpInstanceShutdown,
- timeout=opts.shutdown_timeout)
+ timeout=opts.shutdown_timeout,
+ no_remember=no_remember)
text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
jex = JobExecutor(opts=opts)
@return: The desired exit status
"""
- if not _InstanceStart(opts, inst_map.keys(), False):
+ if not _InstanceStart(opts, inst_map.keys(), False, no_remember=True):
ToStderr("Please investigate and stop instances manually before continuing")
return constants.EXIT_FAILURE
return constants.EXIT_FAILURE
-def Epo(opts, args):
+def Epo(opts, args, cl=None, _on_fn=_EpoOn, _off_fn=_EpoOff,
+ _confirm_fn=ConfirmOperation,
+ _stdout_fn=ToStdout, _stderr_fn=ToStderr):
"""EPO operations.
@param opts: the command line options selected by the user
"""
if opts.groups and opts.show_all:
- ToStderr("Only one of --groups or --all are allowed")
+ _stderr_fn("Only one of --groups or --all are allowed")
return constants.EXIT_FAILURE
elif args and opts.show_all:
- ToStderr("Arguments in combination with --all are not allowed")
+ _stderr_fn("Arguments in combination with --all are not allowed")
return constants.EXIT_FAILURE
- client = GetClient()
+ if cl is None:
+ cl = GetClient()
if opts.groups:
- node_query_list = itertools.chain(*client.QueryGroups(names=args,
- fields=["node_list"],
- use_locking=False))
+ node_query_list = \
+ itertools.chain(*cl.QueryGroups(args, ["node_list"], False))
else:
node_query_list = args
- result = client.QueryNodes(names=node_query_list,
- fields=["name", "master", "pinst_list",
- "sinst_list", "powered", "offline"],
- use_locking=False)
+ result = cl.QueryNodes(node_query_list, ["name", "master", "pinst_list",
+ "sinst_list", "powered", "offline"],
+ False)
+
+ all_nodes = map(compat.fst, result)
node_list = []
inst_map = {}
- for (idx, (node, master, pinsts, sinsts, powered,
- offline)) in enumerate(result):
- # Normalize the node_query_list as well
- if not opts.show_all:
- node_query_list[idx] = node
+ for (node, master, pinsts, sinsts, powered, offline) in result:
if not offline:
for inst in (pinsts + sinsts):
if inst in inst_map:
# already operating on the master at this point :)
continue
elif master and not opts.show_all:
- ToStderr("%s is the master node, please do a master-failover to another"
- " node not affected by the EPO or use --all if you intend to"
- " shutdown the whole cluster", node)
+ _stderr_fn("%s is the master node, please do a master-failover to another"
+ " node not affected by the EPO or use --all if you intend to"
+ " shutdown the whole cluster", node)
return constants.EXIT_FAILURE
elif powered is None:
- ToStdout("Node %s does not support out-of-band handling, it can not be"
- " handled in a fully automated manner", node)
+ _stdout_fn("Node %s does not support out-of-band handling, it can not be"
+ " handled in a fully automated manner", node)
elif powered == opts.on:
- ToStdout("Node %s is already in desired power state, skipping", node)
+ _stdout_fn("Node %s is already in desired power state, skipping", node)
elif not offline or (offline and powered):
node_list.append(node)
- if not opts.force and not ConfirmOperation(node_query_list, "nodes", "epo"):
+ if not (opts.force or _confirm_fn(all_nodes, "nodes", "epo")):
return constants.EXIT_FAILURE
if opts.on:
- return _EpoOn(opts, node_query_list, node_list, inst_map)
+ return _on_fn(opts, all_nodes, node_list, inst_map)
else:
- return _EpoOff(opts, node_list, inst_map)
+ return _off_fn(opts, node_list, inst_map)
commands = {
"init": (
InitCluster, [ArgHost(min=1, max=1)],
[BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
- HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT,
- NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, NOMODIFY_SSH_SETUP_OPT,
- SECONDARY_IP_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
- UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
+ HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, MASTER_NETMASK_OPT,
+ NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT,
+ NOMODIFY_SSH_SETUP_OPT, SECONDARY_IP_OPT, VG_NAME_OPT,
+ MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
- NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT],
+ NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT,
+ DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT, ENABLED_STORAGE_TYPES_OPT]
+ + INSTANCE_POLICY_OPTS,
"[opts...] <cluster_name>", "Initialises a new cluster configuration"),
"destroy": (
DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
"verify": (
VerifyCluster, ARGS_NONE,
[VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
- DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT],
+ DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT, IGNORE_ERRORS_OPT],
"", "Does a check on the cluster configuration"),
"verify-disks": (
VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
"", "Does a check on the cluster disk status"),
"repair-disk-sizes": (
RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
- "", "Updates mismatches in recorded disk sizes"),
+ "[instance...]", "Updates mismatches in recorded disk sizes"),
"master-failover": (
- MasterFailover, ARGS_NONE, [NOVOTING_OPT],
+ MasterFailover, ARGS_NONE, [NOVOTING_OPT, FORCE_FAILOVER],
"", "Makes the current node the master"),
"master-ping": (
MasterPing, ARGS_NONE, [],
"[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
"command": (
RunClusterCommand, [ArgCommand(min=1)],
- [NODE_LIST_OPT, NODEGROUP_OPT],
+ [NODE_LIST_OPT, NODEGROUP_OPT, SHOW_MACHINE_OPT, FAILURE_ONLY_OPT],
"[-n node...] <command>", "Runs a command on all (or only some) nodes"),
"info": (
ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
"list-tags": (
ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
"add-tags": (
- AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
+ AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"tag...", "Add tags to the cluster"),
"remove-tags": (
- RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
+ RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"tag...", "Remove tags from the cluster"),
"search-tags": (
SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
"modify": (
SetClusterParams, ARGS_NONE,
[BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
- NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
- UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT, DRBD_HELPER_OPT,
- NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT, RESERVED_LVS_OPT,
- DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT],
+ MASTER_NETMASK_OPT, NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT,
+ MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT,
+ DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
+ RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
+ NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT,
+ DISK_STATE_OPT, SUBMIT_OPT, ENABLED_STORAGE_TYPES_OPT] +
+ INSTANCE_POLICY_OPTS,
"[opts...]",
"Alters the parameters of the cluster"),
"renew-crypto": (
RenewCrypto, ARGS_NONE,
[NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
- NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT],
+ NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT,
+ NEW_SPICE_CERT_OPT, SPICE_CERT_OPT, SPICE_CACERT_OPT],
"[opts...]",
"Renews cluster certificates, keys and secrets"),
"epo": (
SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
"[opts...] [args]",
"Performs an emergency power-off on given args"),
+ "activate-master-ip": (
+ ActivateMasterIp, ARGS_NONE, [], "", "Activates the master IP"),
+ "deactivate-master-ip": (
+ DeactivateMasterIp, ARGS_NONE, [CONFIRM_OPT], "",
+ "Deactivates the master IP"),
}
#: dictionary with aliases for commands
aliases = {
"masterfailover": "master-failover",
+ "show": "info",
}