#
#
-# Copyright (C) 2010 Google Inc.
+# Copyright (C) 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
import urllib
import threading
import pycurl
+import time
try:
from cStringIO import StringIO
REPLACE_DISK_CHG = "replace_new_secondary"
REPLACE_DISK_AUTO = "replace_auto"
+NODE_EVAC_PRI = "primary-only"
+NODE_EVAC_SEC = "secondary-only"
+NODE_EVAC_ALL = "all"
+
NODE_ROLE_DRAINED = "drained"
NODE_ROLE_MASTER_CANDIATE = "master-candidate"
NODE_ROLE_MASTER = "master"
NODE_ROLE_OFFLINE = "offline"
NODE_ROLE_REGULAR = "regular"
+JOB_STATUS_QUEUED = "queued"
+JOB_STATUS_WAITING = "waiting"
+JOB_STATUS_CANCELING = "canceling"
+JOB_STATUS_RUNNING = "running"
+JOB_STATUS_CANCELED = "canceled"
+JOB_STATUS_SUCCESS = "success"
+JOB_STATUS_ERROR = "error"
+JOB_STATUS_PENDING = frozenset([
+ JOB_STATUS_QUEUED,
+ JOB_STATUS_WAITING,
+ JOB_STATUS_CANCELING,
+ ])
+JOB_STATUS_FINALIZED = frozenset([
+ JOB_STATUS_CANCELED,
+ JOB_STATUS_SUCCESS,
+ JOB_STATUS_ERROR,
+ ])
+JOB_STATUS_ALL = frozenset([
+ JOB_STATUS_RUNNING,
+ ]) | JOB_STATUS_PENDING | JOB_STATUS_FINALIZED
+
+# Legacy name
+JOB_STATUS_WAITLOCK = JOB_STATUS_WAITING
+
# Internal constants
_REQ_DATA_VERSION_FIELD = "__version__"
-_INST_CREATE_REQV1 = "instance-create-reqv1"
-_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
-_INST_NIC_PARAMS = frozenset(["mac", "ip", "mode", "link", "bridge"])
-_INST_CREATE_V0_DISK_PARAMS = frozenset(["size"])
-_INST_CREATE_V0_PARAMS = frozenset([
- "os", "pnode", "snode", "iallocator", "start", "ip_check", "name_check",
- "hypervisor", "file_storage_dir", "file_driver", "dry_run",
+_QPARAM_DRY_RUN = "dry-run"
+_QPARAM_FORCE = "force"
+
+# Feature strings
+INST_CREATE_REQV1 = "instance-create-reqv1"
+INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
+NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
+NODE_EVAC_RES1 = "node-evac-res1"
+
+# Old feature constant names in case they're references by users of this module
+_INST_CREATE_REQV1 = INST_CREATE_REQV1
+_INST_REINSTALL_REQV1 = INST_REINSTALL_REQV1
+_NODE_MIGRATE_REQV1 = NODE_MIGRATE_REQV1
+_NODE_EVAC_RES1 = NODE_EVAC_RES1
+
+#: Resolver errors
+ECODE_RESOLVER = "resolver_error"
+
+#: Not enough resources (iallocator failure, disk space, memory, etc.)
+ECODE_NORES = "insufficient_resources"
+
+#: Temporarily out of resources; operation can be tried again
+ECODE_TEMP_NORES = "temp_insufficient_resources"
+
+#: Wrong arguments (at syntax level)
+ECODE_INVAL = "wrong_input"
+
+#: Wrong entity state
+ECODE_STATE = "wrong_state"
+
+#: Entity not found
+ECODE_NOENT = "unknown_entity"
+
+#: Entity already exists
+ECODE_EXISTS = "already_exists"
+
+#: Resource not unique (e.g. MAC or IP duplication)
+ECODE_NOTUNIQUE = "resource_not_unique"
+
+#: Internal cluster error
+ECODE_FAULT = "internal_error"
+
+#: Environment error (e.g. node disk error)
+ECODE_ENVIRON = "environment_error"
+
+#: List of all failure types
+ECODE_ALL = frozenset([
+ ECODE_RESOLVER,
+ ECODE_NORES,
+ ECODE_TEMP_NORES,
+ ECODE_INVAL,
+ ECODE_STATE,
+ ECODE_NOENT,
+ ECODE_EXISTS,
+ ECODE_NOTUNIQUE,
+ ECODE_FAULT,
+ ECODE_ENVIRON,
])
-_INST_CREATE_V0_DPARAMS = frozenset(["beparams", "hvparams"])
# Older pycURL versions don't have all error constants
try:
pass
-class CertificateError(Error):
+class GanetiApiError(Error):
+ """Generic error raised from Ganeti API.
+
+ """
+ def __init__(self, msg, code=None):
+ Error.__init__(self, msg)
+ self.code = code
+
+
+class CertificateError(GanetiApiError):
"""Raised when a problem is found with the SSL certificate.
"""
pass
-class GanetiApiError(Error):
- """Generic error raised from Ganeti API.
+def _AppendIf(container, condition, value):
+ """Appends to a list if a condition evaluates to truth.
"""
- def __init__(self, msg, code=None):
- Error.__init__(self, msg)
- self.code = code
+ if condition:
+ container.append(value)
+
+ return condition
+
+
+def _AppendDryRunIf(container, condition):
+ """Appends a "dry-run" parameter if a condition evaluates to truth.
+
+ """
+ return _AppendIf(container, condition, (_QPARAM_DRY_RUN, 1))
+
+
+def _AppendForceIf(container, condition):
+ """Appends a "force" parameter if a condition evaluates to truth.
+
+ """
+ return _AppendIf(container, condition, (_QPARAM_FORCE, 1))
+
+
+def _SetItemIf(container, condition, item, value):
+ """Sets an item if a condition evaluates to truth.
+
+ """
+ if condition:
+ container[item] = value
+
+ return condition
def UsesRapiClient(fn):
lcsslver = sslver.lower()
if lcsslver.startswith("openssl/"):
pass
+ elif lcsslver.startswith("nss/"):
+ # TODO: investigate compatibility beyond a simple test
+ pass
elif lcsslver.startswith("gnutls/"):
if capath:
raise Error("cURL linked against GnuTLS has no support for a"
return _ConfigCurl
-class GanetiRapiClient(object):
+class GanetiRapiClient(object): # pylint: disable=R0904
"""Ganeti RAPI client.
"""
curl.perform()
except pycurl.error, err:
if err.args[0] in _CURL_SSL_CERT_ERRORS:
- raise CertificateError("SSL certificate error %s" % err)
+ raise CertificateError("SSL certificate error %s" % err,
+ code=err.args[0])
- raise GanetiApiError(str(err))
+ raise GanetiApiError(str(err), code=err.args[0])
finally:
# Reset settings to not keep references to large objects in memory
# between requests
return self._SendRequest(HTTP_GET, "/%s/info" % GANETI_RAPI_VERSION,
None, None)
+ def RedistributeConfig(self):
+ """Tells the cluster to redistribute its configuration files.
+
+ @rtype: string
+ @return: job id
+
+ """
+ return self._SendRequest(HTTP_PUT,
+ "/%s/redistribute-config" % GANETI_RAPI_VERSION,
+ None, None)
+
def ModifyCluster(self, **kwargs):
"""Modifies cluster parameters.
More details for parameters can be found in the RAPI documentation.
- @rtype: int
+ @rtype: string
@return: job id
"""
@type dry_run: bool
@param dry_run: whether to perform a dry run
- @rtype: int
+ @rtype: string
@return: job id
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_PUT, "/%s/tags" % GANETI_RAPI_VERSION,
query, None)
@param tags: tags to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @rtype: string
+ @return: job id
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE, "/%s/tags" % GANETI_RAPI_VERSION,
query, None)
"""
query = []
- if bulk:
- query.append(("bulk", 1))
+ _AppendIf(query, bulk, ("bulk", 1))
instances = self._SendRequest(HTTP_GET,
"/%s/instances" % GANETI_RAPI_VERSION,
("/%s/instances/%s/info" %
(GANETI_RAPI_VERSION, instance)), query, None)
+ @staticmethod
+ def _UpdateWithKwargs(base, **kwargs):
+ """Updates the base with params from kwargs.
+
+ @param base: The base dict, filled with required fields
+
+ @note: This is an inplace update of base
+
+ """
+ conflicts = set(kwargs.iterkeys()) & set(base.iterkeys())
+ if conflicts:
+ raise GanetiApiError("Required fields can not be specified as"
+ " keywords: %s" % ", ".join(conflicts))
+
+ base.update((key, value) for key, value in kwargs.iteritems()
+ if key != "dry_run")
+
+ def InstanceAllocation(self, mode, name, disk_template, disks, nics,
+ **kwargs):
+ """Generates an instance allocation as used by multiallocate.
+
+ More details for parameters can be found in the RAPI documentation.
+ It is the same as used by CreateInstance.
+
+ @type mode: string
+ @param mode: Instance creation mode
+ @type name: string
+ @param name: Hostname of the instance to create
+ @type disk_template: string
+ @param disk_template: Disk template for instance (e.g. plain, diskless,
+ file, or drbd)
+ @type disks: list of dicts
+ @param disks: List of disk definitions
+ @type nics: list of dicts
+ @param nics: List of NIC definitions
+
+ @return: A dict with the generated entry
+
+ """
+ # All required fields for request data version 1
+ alloc = {
+ "mode": mode,
+ "name": name,
+ "disk_template": disk_template,
+ "disks": disks,
+ "nics": nics,
+ }
+
+ self._UpdateWithKwargs(alloc, **kwargs)
+
+ return alloc
+
+ def InstancesMultiAlloc(self, instances, **kwargs):
+ """Tries to allocate multiple instances.
+
+ More details for parameters can be found in the RAPI documentation.
+
+ @param instances: A list of L{InstanceAllocation} results
+
+ """
+ query = []
+ body = {
+ "instances": instances,
+ }
+ self._UpdateWithKwargs(body, **kwargs)
+
+ _AppendDryRunIf(query, kwargs.get("dry_run"))
+
+ return self._SendRequest(HTTP_POST,
+ "/%s/instances-multi-alloc" % GANETI_RAPI_VERSION,
+ query, body)
+
def CreateInstance(self, mode, name, disk_template, disks, nics,
**kwargs):
"""Creates a new instance.
@type dry_run: bool
@keyword dry_run: whether to perform a dry run
- @rtype: int
+ @rtype: string
@return: job id
"""
query = []
- if kwargs.get("dry_run"):
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, kwargs.get("dry_run"))
if _INST_CREATE_REQV1 in self.GetFeatures():
- # All required fields for request data version 1
- body = {
- _REQ_DATA_VERSION_FIELD: 1,
- "mode": mode,
- "name": name,
- "disk_template": disk_template,
- "disks": disks,
- "nics": nics,
- }
-
- conflicts = set(kwargs.iterkeys()) & set(body.iterkeys())
- if conflicts:
- raise GanetiApiError("Required fields can not be specified as"
- " keywords: %s" % ", ".join(conflicts))
-
- body.update((key, value) for key, value in kwargs.iteritems()
- if key != "dry_run")
+ body = self.InstanceAllocation(mode, name, disk_template, disks, nics,
+ **kwargs)
+ body[_REQ_DATA_VERSION_FIELD] = 1
else:
- # Old request format (version 0)
-
- # The following code must make sure that an exception is raised when an
- # unsupported setting is requested by the caller. Otherwise this can lead
- # to bugs difficult to find. The interface of this function must stay
- # exactly the same for version 0 and 1 (e.g. they aren't allowed to
- # require different data types).
-
- # Validate disks
- for idx, disk in enumerate(disks):
- unsupported = set(disk.keys()) - _INST_CREATE_V0_DISK_PARAMS
- if unsupported:
- raise GanetiApiError("Server supports request version 0 only, but"
- " disk %s specifies the unsupported parameters"
- " %s, allowed are %s" %
- (idx, unsupported,
- list(_INST_CREATE_V0_DISK_PARAMS)))
-
- assert (len(_INST_CREATE_V0_DISK_PARAMS) == 1 and
- "size" in _INST_CREATE_V0_DISK_PARAMS)
- disk_sizes = [disk["size"] for disk in disks]
-
- # Validate NICs
- if not nics:
- raise GanetiApiError("Server supports request version 0 only, but"
- " no NIC specified")
- elif len(nics) > 1:
- raise GanetiApiError("Server supports request version 0 only, but"
- " more than one NIC specified")
-
- assert len(nics) == 1
-
- unsupported = set(nics[0].keys()) - _INST_NIC_PARAMS
- if unsupported:
- raise GanetiApiError("Server supports request version 0 only, but"
- " NIC 0 specifies the unsupported parameters %s,"
- " allowed are %s" %
- (unsupported, list(_INST_NIC_PARAMS)))
-
- # Validate other parameters
- unsupported = (set(kwargs.keys()) - _INST_CREATE_V0_PARAMS -
- _INST_CREATE_V0_DPARAMS)
- if unsupported:
- allowed = _INST_CREATE_V0_PARAMS.union(_INST_CREATE_V0_DPARAMS)
- raise GanetiApiError("Server supports request version 0 only, but"
- " the following unsupported parameters are"
- " specified: %s, allowed are %s" %
- (unsupported, list(allowed)))
-
- # All required fields for request data version 0
- body = {
- _REQ_DATA_VERSION_FIELD: 0,
- "name": name,
- "disk_template": disk_template,
- "disks": disk_sizes,
- }
-
- # NIC fields
- assert len(nics) == 1
- assert not (set(body.keys()) & set(nics[0].keys()))
- body.update(nics[0])
-
- # Copy supported fields
- assert not (set(body.keys()) & set(kwargs.keys()))
- body.update(dict((key, value) for key, value in kwargs.items()
- if key in _INST_CREATE_V0_PARAMS))
-
- # Merge dictionaries
- for i in (value for key, value in kwargs.items()
- if key in _INST_CREATE_V0_DPARAMS):
- assert not (set(body.keys()) & set(i.keys()))
- body.update(i)
-
- assert not (set(kwargs.keys()) -
- (_INST_CREATE_V0_PARAMS | _INST_CREATE_V0_DPARAMS))
- assert not (set(body.keys()) & _INST_CREATE_V0_DPARAMS)
+ raise GanetiApiError("Server does not support new-style (version 1)"
+ " instance creation requests")
return self._SendRequest(HTTP_POST, "/%s/instances" % GANETI_RAPI_VERSION,
query, body)
@type instance: str
@param instance: the instance to delete
- @rtype: int
+ @rtype: string
@return: job id
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
("/%s/instances/%s" %
@type instance: string
@param instance: Instance name
- @rtype: int
+ @rtype: string
@return: job id
"""
("/%s/instances/%s/modify" %
(GANETI_RAPI_VERSION, instance)), None, body)
+ def ActivateInstanceDisks(self, instance, ignore_size=None):
+ """Activates an instance's disks.
+
+ @type instance: string
+ @param instance: Instance name
+ @type ignore_size: bool
+ @param ignore_size: Whether to ignore recorded size
+ @rtype: string
+ @return: job id
+
+ """
+ query = []
+ _AppendIf(query, ignore_size, ("ignore_size", 1))
+
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/instances/%s/activate-disks" %
+ (GANETI_RAPI_VERSION, instance)), query, None)
+
+ def DeactivateInstanceDisks(self, instance):
+ """Deactivates an instance's disks.
+
+ @type instance: string
+ @param instance: Instance name
+ @rtype: string
+ @return: job id
+
+ """
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/instances/%s/deactivate-disks" %
+ (GANETI_RAPI_VERSION, instance)), None, None)
+
+ def RecreateInstanceDisks(self, instance, disks=None, nodes=None):
+ """Recreate an instance's disks.
+
+ @type instance: string
+ @param instance: Instance name
+ @type disks: list of int
+ @param disks: List of disk indexes
+ @type nodes: list of string
+ @param nodes: New instance nodes, if relocation is desired
+ @rtype: string
+ @return: job id
+
+ """
+ body = {}
+ _SetItemIf(body, disks is not None, "disks", disks)
+ _SetItemIf(body, nodes is not None, "nodes", nodes)
+
+ return self._SendRequest(HTTP_POST,
+ ("/%s/instances/%s/recreate-disks" %
+ (GANETI_RAPI_VERSION, instance)), None, body)
+
+ def GrowInstanceDisk(self, instance, disk, amount, wait_for_sync=None):
+ """Grows a disk of an instance.
+
+ More details for parameters can be found in the RAPI documentation.
+
+ @type instance: string
+ @param instance: Instance name
+ @type disk: integer
+ @param disk: Disk index
+ @type amount: integer
+ @param amount: Grow disk by this amount (MiB)
+ @type wait_for_sync: bool
+ @param wait_for_sync: Wait for disk to synchronize
+ @rtype: string
+ @return: job id
+
+ """
+ body = {
+ "amount": amount,
+ }
+
+ _SetItemIf(body, wait_for_sync is not None, "wait_for_sync", wait_for_sync)
+
+ return self._SendRequest(HTTP_POST,
+ ("/%s/instances/%s/disk/%s/grow" %
+ (GANETI_RAPI_VERSION, instance, disk)),
+ None, body)
+
def GetInstanceTags(self, instance):
"""Gets tags for an instance.
@type dry_run: bool
@param dry_run: whether to perform a dry run
- @rtype: int
+ @rtype: string
@return: job id
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/tags" %
@param tags: tags to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @rtype: string
+ @return: job id
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
("/%s/instances/%s/tags" %
while re-assembling disks (in hard-reboot mode only)
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @rtype: string
+ @return: job id
"""
query = []
- if reboot_type:
- query.append(("type", reboot_type))
- if ignore_secondaries is not None:
- query.append(("ignore_secondaries", ignore_secondaries))
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
+ _AppendIf(query, reboot_type, ("type", reboot_type))
+ _AppendIf(query, ignore_secondaries is not None,
+ ("ignore_secondaries", ignore_secondaries))
return self._SendRequest(HTTP_POST,
("/%s/instances/%s/reboot" %
(GANETI_RAPI_VERSION, instance)), query, None)
- def ShutdownInstance(self, instance, dry_run=False):
+ def ShutdownInstance(self, instance, dry_run=False, no_remember=False,
+ **kwargs):
"""Shuts down an instance.
@type instance: str
@param instance: the instance to shut down
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @type no_remember: bool
+ @param no_remember: if true, will not record the state change
+ @rtype: string
+ @return: job id
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ body = kwargs
+
+ _AppendDryRunIf(query, dry_run)
+ _AppendIf(query, no_remember, ("no-remember", 1))
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/shutdown" %
- (GANETI_RAPI_VERSION, instance)), query, None)
+ (GANETI_RAPI_VERSION, instance)), query, body)
- def StartupInstance(self, instance, dry_run=False):
+ def StartupInstance(self, instance, dry_run=False, no_remember=False):
"""Starts up an instance.
@type instance: str
@param instance: the instance to start up
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @type no_remember: bool
+ @param no_remember: if true, will not record the state change
+ @rtype: string
+ @return: job id
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
+ _AppendIf(query, no_remember, ("no-remember", 1))
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/startup" %
current operating system will be installed again
@type no_startup: bool
@param no_startup: Whether to start the instance automatically
+ @rtype: string
+ @return: job id
"""
if _INST_REINSTALL_REQV1 in self.GetFeatures():
body = {
"start": not no_startup,
}
- if os is not None:
- body["os"] = os
- if osparams is not None:
- body["osparams"] = osparams
+ _SetItemIf(body, os is not None, "os", os)
+ _SetItemIf(body, osparams is not None, "osparams", osparams)
return self._SendRequest(HTTP_POST,
("/%s/instances/%s/reinstall" %
(GANETI_RAPI_VERSION, instance)), None, body)
" for instance reinstallation")
query = []
- if os:
- query.append(("os", os))
- if no_startup:
- query.append(("nostartup", 1))
+ _AppendIf(query, os, ("os", os))
+ _AppendIf(query, no_startup, ("nostartup", 1))
+
return self._SendRequest(HTTP_POST,
("/%s/instances/%s/reinstall" %
(GANETI_RAPI_VERSION, instance)), query, None)
def ReplaceInstanceDisks(self, instance, disks=None, mode=REPLACE_DISK_AUTO,
- remote_node=None, iallocator=None, dry_run=False):
+ remote_node=None, iallocator=None):
"""Replaces disks on an instance.
@type instance: str
@type iallocator: str or None
@param iallocator: instance allocator plugin to use (for use with
replace_auto mode)
- @type dry_run: bool
- @param dry_run: whether to perform a dry run
- @rtype: int
+ @rtype: string
@return: job id
"""
("mode", mode),
]
- if disks:
- query.append(("disks", ",".join(str(idx) for idx in disks)))
-
- if remote_node:
- query.append(("remote_node", remote_node))
+ # TODO: Convert to body parameters
- if iallocator:
- query.append(("iallocator", iallocator))
+ if disks is not None:
+ _AppendIf(query, True,
+ ("disks", ",".join(str(idx) for idx in disks)))
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendIf(query, remote_node is not None, ("remote_node", remote_node))
+ _AppendIf(query, iallocator is not None, ("iallocator", iallocator))
return self._SendRequest(HTTP_POST,
("/%s/instances/%s/replace-disks" %
"mode": mode,
}
- if shutdown is not None:
- body["shutdown"] = shutdown
-
- if remove_instance is not None:
- body["remove_instance"] = remove_instance
-
- if x509_key_name is not None:
- body["x509_key_name"] = x509_key_name
-
- if destination_x509_ca is not None:
- body["destination_x509_ca"] = destination_x509_ca
+ _SetItemIf(body, shutdown is not None, "shutdown", shutdown)
+ _SetItemIf(body, remove_instance is not None,
+ "remove_instance", remove_instance)
+ _SetItemIf(body, x509_key_name is not None, "x509_key_name", x509_key_name)
+ _SetItemIf(body, destination_x509_ca is not None,
+ "destination_x509_ca", destination_x509_ca)
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/export" %
@param mode: Migration mode
@type cleanup: bool
@param cleanup: Whether to clean up a previously failed migration
+ @rtype: string
+ @return: job id
"""
body = {}
+ _SetItemIf(body, mode is not None, "mode", mode)
+ _SetItemIf(body, cleanup is not None, "cleanup", cleanup)
- if mode is not None:
- body["mode"] = mode
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/instances/%s/migrate" %
+ (GANETI_RAPI_VERSION, instance)), None, body)
- if cleanup is not None:
- body["cleanup"] = cleanup
+ def FailoverInstance(self, instance, iallocator=None,
+ ignore_consistency=None, target_node=None):
+ """Does a failover of an instance.
+
+ @type instance: string
+ @param instance: Instance name
+ @type iallocator: string
+ @param iallocator: Iallocator for deciding the target node for
+ shared-storage instances
+ @type ignore_consistency: bool
+ @param ignore_consistency: Whether to ignore disk consistency
+ @type target_node: string
+ @param target_node: Target node for shared-storage instances
+ @rtype: string
+ @return: job id
+
+ """
+ body = {}
+ _SetItemIf(body, iallocator is not None, "iallocator", iallocator)
+ _SetItemIf(body, ignore_consistency is not None,
+ "ignore_consistency", ignore_consistency)
+ _SetItemIf(body, target_node is not None, "target_node", target_node)
return self._SendRequest(HTTP_PUT,
- ("/%s/instances/%s/migrate" %
+ ("/%s/instances/%s/failover" %
(GANETI_RAPI_VERSION, instance)), None, body)
def RenameInstance(self, instance, new_name, ip_check=None, name_check=None):
@param ip_check: Whether to ensure instance's IP address is inactive
@type name_check: bool
@param name_check: Whether to ensure instance's name is resolvable
+ @rtype: string
+ @return: job id
"""
body = {
"new_name": new_name,
}
- if ip_check is not None:
- body["ip_check"] = ip_check
-
- if name_check is not None:
- body["name_check"] = name_check
+ _SetItemIf(body, ip_check is not None, "ip_check", ip_check)
+ _SetItemIf(body, name_check is not None, "name_check", name_check)
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/rename" %
(GANETI_RAPI_VERSION, instance)), None, body)
+ def GetInstanceConsole(self, instance):
+ """Request information for connecting to instance's console.
+
+ @type instance: string
+ @param instance: Instance name
+ @rtype: dict
+ @return: dictionary containing information about instance's console
+
+ """
+ return self._SendRequest(HTTP_GET,
+ ("/%s/instances/%s/console" %
+ (GANETI_RAPI_VERSION, instance)), None, None)
+
def GetJobs(self):
"""Gets all jobs for the cluster.
def GetJobStatus(self, job_id):
"""Gets the status of a job.
- @type job_id: int
+ @type job_id: string
@param job_id: job id whose status to query
@rtype: dict
"/%s/jobs/%s" % (GANETI_RAPI_VERSION, job_id),
None, None)
+ def WaitForJobCompletion(self, job_id, period=5, retries=-1):
+ """Polls cluster for job status until completion.
+
+ Completion is defined as any of the following states listed in
+ L{JOB_STATUS_FINALIZED}.
+
+ @type job_id: string
+ @param job_id: job id to watch
+ @type period: int
+ @param period: how often to poll for status (optional, default 5s)
+ @type retries: int
+ @param retries: how many time to poll before giving up
+ (optional, default -1 means unlimited)
+
+ @rtype: bool
+ @return: C{True} if job succeeded or C{False} if failed/status timeout
+ @deprecated: It is recommended to use L{WaitForJobChange} wherever
+ possible; L{WaitForJobChange} returns immediately after a job changed and
+ does not use polling
+
+ """
+ while retries != 0:
+ job_result = self.GetJobStatus(job_id)
+
+ if job_result and job_result["status"] == JOB_STATUS_SUCCESS:
+ return True
+ elif not job_result or job_result["status"] in JOB_STATUS_FINALIZED:
+ return False
+
+ if period:
+ time.sleep(period)
+
+ if retries > 0:
+ retries -= 1
+
+ return False
+
def WaitForJobChange(self, job_id, fields, prev_job_info, prev_log_serial):
"""Waits for job changes.
- @type job_id: int
+ @type job_id: string
@param job_id: Job ID for which to wait
+ @return: C{None} if no changes have been detected and a dict with two keys,
+ C{job_info} and C{log_entries} otherwise.
+ @rtype: dict
"""
body = {
def CancelJob(self, job_id, dry_run=False):
"""Cancels a job.
- @type job_id: int
+ @type job_id: string
@param job_id: id of the job to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @rtype: tuple
+ @return: tuple containing the result, and a message (bool, string)
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
"/%s/jobs/%s" % (GANETI_RAPI_VERSION, job_id),
"""
query = []
- if bulk:
- query.append(("bulk", 1))
+ _AppendIf(query, bulk, ("bulk", 1))
nodes = self._SendRequest(HTTP_GET, "/%s/nodes" % GANETI_RAPI_VERSION,
query, None)
None, None)
def EvacuateNode(self, node, iallocator=None, remote_node=None,
- dry_run=False, early_release=False):
+ dry_run=False, early_release=None,
+ mode=None, accept_old=False):
"""Evacuates instances from a Ganeti node.
@type node: str
@param dry_run: whether to perform a dry run
@type early_release: bool
@param early_release: whether to enable parallelization
+ @type mode: string
+ @param mode: Node evacuation mode
+ @type accept_old: bool
+ @param accept_old: Whether caller is ready to accept old-style (pre-2.5)
+ results
- @rtype: list
- @return: list of (job ID, instance name, new secondary node); if
- dry_run was specified, then the actual move jobs were not
- submitted and the job IDs will be C{None}
+ @rtype: string, or a list for pre-2.5 results
+ @return: Job ID or, if C{accept_old} is set and server is pre-2.5,
+ list of (job ID, instance name, new secondary node); if dry_run was
+ specified, then the actual move jobs were not submitted and the job IDs
+ will be C{None}
@raises GanetiApiError: if an iallocator and remote_node are both
specified
raise GanetiApiError("Only one of iallocator or remote_node can be used")
query = []
- if iallocator:
- query.append(("iallocator", iallocator))
- if remote_node:
- query.append(("remote_node", remote_node))
- if dry_run:
- query.append(("dry-run", 1))
- if early_release:
- query.append(("early_release", 1))
+ _AppendDryRunIf(query, dry_run)
+
+ if _NODE_EVAC_RES1 in self.GetFeatures():
+ # Server supports body parameters
+ body = {}
+
+ _SetItemIf(body, iallocator is not None, "iallocator", iallocator)
+ _SetItemIf(body, remote_node is not None, "remote_node", remote_node)
+ _SetItemIf(body, early_release is not None,
+ "early_release", early_release)
+ _SetItemIf(body, mode is not None, "mode", mode)
+ else:
+ # Pre-2.5 request format
+ body = None
+
+ if not accept_old:
+ raise GanetiApiError("Server is version 2.4 or earlier and caller does"
+ " not accept old-style results (parameter"
+ " accept_old)")
+
+ # Pre-2.5 servers can only evacuate secondaries
+ if mode is not None and mode != NODE_EVAC_SEC:
+ raise GanetiApiError("Server can only evacuate secondary instances")
+
+ _AppendIf(query, iallocator, ("iallocator", iallocator))
+ _AppendIf(query, remote_node, ("remote_node", remote_node))
+ _AppendIf(query, early_release, ("early_release", 1))
return self._SendRequest(HTTP_POST,
("/%s/nodes/%s/evacuate" %
- (GANETI_RAPI_VERSION, node)), query, None)
+ (GANETI_RAPI_VERSION, node)), query, body)
- def MigrateNode(self, node, mode=None, dry_run=False):
+ def MigrateNode(self, node, mode=None, dry_run=False, iallocator=None,
+ target_node=None):
"""Migrates all primary instances from a node.
@type node: str
otherwise the hypervisor default will be used
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @type iallocator: string
+ @param iallocator: instance allocator to use
+ @type target_node: string
+ @param target_node: Target node for shared-storage instances
- @rtype: int
+ @rtype: string
@return: job id
"""
query = []
- if mode is not None:
- query.append(("mode", mode))
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
- return self._SendRequest(HTTP_POST,
- ("/%s/nodes/%s/migrate" %
- (GANETI_RAPI_VERSION, node)), query, None)
+ if _NODE_MIGRATE_REQV1 in self.GetFeatures():
+ body = {}
+
+ _SetItemIf(body, mode is not None, "mode", mode)
+ _SetItemIf(body, iallocator is not None, "iallocator", iallocator)
+ _SetItemIf(body, target_node is not None, "target_node", target_node)
+
+ assert len(query) <= 1
+
+ return self._SendRequest(HTTP_POST,
+ ("/%s/nodes/%s/migrate" %
+ (GANETI_RAPI_VERSION, node)), query, body)
+ else:
+ # Use old request format
+ if target_node is not None:
+ raise GanetiApiError("Server does not support specifying target node"
+ " for node migration")
+
+ _AppendIf(query, mode is not None, ("mode", mode))
+
+ return self._SendRequest(HTTP_POST,
+ ("/%s/nodes/%s/migrate" %
+ (GANETI_RAPI_VERSION, node)), query, None)
def GetNodeRole(self, node):
"""Gets the current role for a node.
("/%s/nodes/%s/role" %
(GANETI_RAPI_VERSION, node)), None, None)
- def SetNodeRole(self, node, role, force=False):
+ def SetNodeRole(self, node, role, force=False, auto_promote=None):
"""Sets the role for a node.
@type node: str
@param role: the role to set for the node
@type force: bool
@param force: whether to force the role change
+ @type auto_promote: bool
+ @param auto_promote: Whether node(s) should be promoted to master candidate
+ if necessary
- @rtype: int
+ @rtype: string
@return: job id
"""
- query = [
- ("force", force),
- ]
+ query = []
+ _AppendForceIf(query, force)
+ _AppendIf(query, auto_promote is not None, ("auto-promote", auto_promote))
return self._SendRequest(HTTP_PUT,
("/%s/nodes/%s/role" %
(GANETI_RAPI_VERSION, node)), query, role)
+ def PowercycleNode(self, node, force=False):
+ """Powercycles a node.
+
+ @type node: string
+ @param node: Node name
+ @type force: bool
+ @param force: Whether to force the operation
+ @rtype: string
+ @return: job id
+
+ """
+ query = []
+ _AppendForceIf(query, force)
+
+ return self._SendRequest(HTTP_POST,
+ ("/%s/nodes/%s/powercycle" %
+ (GANETI_RAPI_VERSION, node)), query, None)
+
+ def ModifyNode(self, node, **kwargs):
+ """Modifies a node.
+
+ More details for parameters can be found in the RAPI documentation.
+
+ @type node: string
+ @param node: Node name
+ @rtype: string
+ @return: job id
+
+ """
+ return self._SendRequest(HTTP_POST,
+ ("/%s/nodes/%s/modify" %
+ (GANETI_RAPI_VERSION, node)), None, kwargs)
+
def GetNodeStorageUnits(self, node, storage_type, output_fields):
"""Gets the storage units for a node.
@type output_fields: str
@param output_fields: storage type fields to return
- @rtype: int
+ @rtype: string
@return: job id where results can be retrieved
"""
@param allocatable: Whether to set the "allocatable" flag on the storage
unit (None=no modification, True=set, False=unset)
- @rtype: int
+ @rtype: string
@return: job id
"""
("name", name),
]
- if allocatable is not None:
- query.append(("allocatable", allocatable))
+ _AppendIf(query, allocatable is not None, ("allocatable", allocatable))
return self._SendRequest(HTTP_PUT,
("/%s/nodes/%s/storage/modify" %
@type name: str
@param name: name of the storage unit to repair
- @rtype: int
+ @rtype: string
@return: job id
"""
@type dry_run: bool
@param dry_run: whether to perform a dry run
- @rtype: int
+ @rtype: string
@return: job id
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_PUT,
("/%s/nodes/%s/tags" %
@type dry_run: bool
@param dry_run: whether to perform a dry run
- @rtype: int
+ @rtype: string
@return: job id
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
("/%s/nodes/%s/tags" %
(GANETI_RAPI_VERSION, node)), query, None)
+ def GetNetworks(self, bulk=False):
+ """Gets all networks in the cluster.
+
+ @type bulk: bool
+ @param bulk: whether to return all information about the networks
+
+ @rtype: list of dict or str
+ @return: if bulk is true, a list of dictionaries with info about all
+ networks in the cluster, else a list of names of those networks
+
+ """
+ query = []
+ _AppendIf(query, bulk, ("bulk", 1))
+
+ networks = self._SendRequest(HTTP_GET, "/%s/networks" % GANETI_RAPI_VERSION,
+ query, None)
+ if bulk:
+ return networks
+ else:
+ return [n["name"] for n in networks]
+
+ def GetNetwork(self, network):
+ """Gets information about a network.
+
+ @type network: str
+ @param network: name of the network whose info to return
+
+ @rtype: dict
+ @return: info about the network
+
+ """
+ return self._SendRequest(HTTP_GET,
+ "/%s/networks/%s" % (GANETI_RAPI_VERSION, network),
+ None, None)
+
+ def CreateNetwork(self, network_name, network, gateway=None, network6=None,
+ gateway6=None, mac_prefix=None, network_type=None,
+ add_reserved_ips=None, tags=None, dry_run=False):
+ """Creates a new network.
+
+ @type network_name: str
+ @param network_name: the name of network to create
+ @type dry_run: bool
+ @param dry_run: whether to peform a dry run
+
+ @rtype: string
+ @return: job id
+
+ """
+ query = []
+ _AppendDryRunIf(query, dry_run)
+
+ if add_reserved_ips:
+ add_reserved_ips = add_reserved_ips.split(",")
+
+ if tags:
+ tags = tags.split(",")
+
+ body = {
+ "network_name": network_name,
+ "gateway": gateway,
+ "network": network,
+ "gateway6": gateway6,
+ "network6": network6,
+ "mac_prefix": mac_prefix,
+ "network_type": network_type,
+ "add_reserved_ips": add_reserved_ips,
+ "tags": tags,
+ }
+
+ return self._SendRequest(HTTP_POST, "/%s/networks" % GANETI_RAPI_VERSION,
+ query, body)
+
+ def ConnectNetwork(self, network_name, group_name, mode, link, dry_run=False):
+ """Connects a Network to a NodeGroup with the given netparams
+
+ """
+ body = {
+ "group_name": group_name,
+ "network_mode": mode,
+ "network_link": link,
+ }
+
+ query = []
+ _AppendDryRunIf(query, dry_run)
+
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/networks/%s/connect" %
+ (GANETI_RAPI_VERSION, network_name)), query, body)
+
+ def DisconnectNetwork(self, network_name, group_name, dry_run=False):
+ """Connects a Network to a NodeGroup with the given netparams
+
+ """
+ body = {
+ "group_name": group_name,
+ }
+
+ query = []
+ _AppendDryRunIf(query, dry_run)
+
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/networks/%s/disconnect" %
+ (GANETI_RAPI_VERSION, network_name)), query, body)
+
+ def ModifyNetwork(self, network, **kwargs):
+ """Modifies a network.
+
+ More details for parameters can be found in the RAPI documentation.
+
+ @type network: string
+ @param network: Network name
+ @rtype: string
+ @return: job id
+
+ """
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/networks/%s/modify" %
+ (GANETI_RAPI_VERSION, network)), None, kwargs)
+
+ def DeleteNetwork(self, network, dry_run=False):
+ """Deletes a network.
+
+ @type network: str
+ @param network: the network to delete
+ @type dry_run: bool
+ @param dry_run: whether to peform a dry run
+
+ @rtype: string
+ @return: job id
+
+ """
+ query = []
+ _AppendDryRunIf(query, dry_run)
+
+ return self._SendRequest(HTTP_DELETE,
+ ("/%s/networks/%s" %
+ (GANETI_RAPI_VERSION, network)), query, None)
+
+ def GetNetworkTags(self, network):
+ """Gets tags for a network.
+
+ @type network: string
+ @param network: Node group whose tags to return
+
+ @rtype: list of strings
+ @return: tags for the network
+
+ """
+ return self._SendRequest(HTTP_GET,
+ ("/%s/networks/%s/tags" %
+ (GANETI_RAPI_VERSION, network)), None, None)
+
+ def AddNetworkTags(self, network, tags, dry_run=False):
+ """Adds tags to a network.
+
+ @type network: str
+ @param network: network to add tags to
+ @type tags: list of string
+ @param tags: tags to add to the network
+ @type dry_run: bool
+ @param dry_run: whether to perform a dry run
+
+ @rtype: string
+ @return: job id
+
+ """
+ query = [("tag", t) for t in tags]
+ _AppendDryRunIf(query, dry_run)
+
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/networks/%s/tags" %
+ (GANETI_RAPI_VERSION, network)), query, None)
+
+ def DeleteNetworkTags(self, network, tags, dry_run=False):
+ """Deletes tags from a network.
+
+ @type network: str
+ @param network: network to delete tags from
+ @type tags: list of string
+ @param tags: tags to delete
+ @type dry_run: bool
+ @param dry_run: whether to perform a dry run
+ @rtype: string
+ @return: job id
+
+ """
+ query = [("tag", t) for t in tags]
+ _AppendDryRunIf(query, dry_run)
+
+ return self._SendRequest(HTTP_DELETE,
+ ("/%s/networks/%s/tags" %
+ (GANETI_RAPI_VERSION, network)), query, None)
+
def GetGroups(self, bulk=False):
"""Gets all node groups in the cluster.
"""
query = []
- if bulk:
- query.append(("bulk", 1))
+ _AppendIf(query, bulk, ("bulk", 1))
groups = self._SendRequest(HTTP_GET, "/%s/groups" % GANETI_RAPI_VERSION,
query, None)
@type dry_run: bool
@param dry_run: whether to peform a dry run
- @rtype: int
+ @rtype: string
@return: job id
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
body = {
"name": name,
- "alloc_policy": alloc_policy
+ "alloc_policy": alloc_policy,
}
return self._SendRequest(HTTP_POST, "/%s/groups" % GANETI_RAPI_VERSION,
@type group: string
@param group: Node group name
- @rtype: int
+ @rtype: string
@return: job id
"""
@type dry_run: bool
@param dry_run: whether to peform a dry run
- @rtype: int
+ @rtype: string
@return: job id
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
("/%s/groups/%s" %
@type new_name: string
@param new_name: New node group name
- @rtype: int
+ @rtype: string
@return: job id
"""
return self._SendRequest(HTTP_PUT,
("/%s/groups/%s/rename" %
(GANETI_RAPI_VERSION, group)), None, body)
+
+ def AssignGroupNodes(self, group, nodes, force=False, dry_run=False):
+ """Assigns nodes to a group.
+
+ @type group: string
+ @param group: Node group name
+ @type nodes: list of strings
+ @param nodes: List of nodes to assign to the group
+
+ @rtype: string
+ @return: job id
+
+ """
+ query = []
+ _AppendForceIf(query, force)
+ _AppendDryRunIf(query, dry_run)
+
+ body = {
+ "nodes": nodes,
+ }
+
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/groups/%s/assign-nodes" %
+ (GANETI_RAPI_VERSION, group)), query, body)
+
+ def GetGroupTags(self, group):
+ """Gets tags for a node group.
+
+ @type group: string
+ @param group: Node group whose tags to return
+
+ @rtype: list of strings
+ @return: tags for the group
+
+ """
+ return self._SendRequest(HTTP_GET,
+ ("/%s/groups/%s/tags" %
+ (GANETI_RAPI_VERSION, group)), None, None)
+
+ def AddGroupTags(self, group, tags, dry_run=False):
+ """Adds tags to a node group.
+
+ @type group: str
+ @param group: group to add tags to
+ @type tags: list of string
+ @param tags: tags to add to the group
+ @type dry_run: bool
+ @param dry_run: whether to perform a dry run
+
+ @rtype: string
+ @return: job id
+
+ """
+ query = [("tag", t) for t in tags]
+ _AppendDryRunIf(query, dry_run)
+
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/groups/%s/tags" %
+ (GANETI_RAPI_VERSION, group)), query, None)
+
+ def DeleteGroupTags(self, group, tags, dry_run=False):
+ """Deletes tags from a node group.
+
+ @type group: str
+ @param group: group to delete tags from
+ @type tags: list of string
+ @param tags: tags to delete
+ @type dry_run: bool
+ @param dry_run: whether to perform a dry run
+ @rtype: string
+ @return: job id
+
+ """
+ query = [("tag", t) for t in tags]
+ _AppendDryRunIf(query, dry_run)
+
+ return self._SendRequest(HTTP_DELETE,
+ ("/%s/groups/%s/tags" %
+ (GANETI_RAPI_VERSION, group)), query, None)
+
+ def Query(self, what, fields, qfilter=None):
+ """Retrieves information about resources.
+
+ @type what: string
+ @param what: Resource name, one of L{constants.QR_VIA_RAPI}
+ @type fields: list of string
+ @param fields: Requested fields
+ @type qfilter: None or list
+ @param qfilter: Query filter
+
+ @rtype: string
+ @return: job id
+
+ """
+ body = {
+ "fields": fields,
+ }
+
+ _SetItemIf(body, qfilter is not None, "qfilter", qfilter)
+ # TODO: remove "filter" after 2.7
+ _SetItemIf(body, qfilter is not None, "filter", qfilter)
+
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/query/%s" %
+ (GANETI_RAPI_VERSION, what)), None, body)
+
+ def QueryFields(self, what, fields=None):
+ """Retrieves available fields for a resource.
+
+ @type what: string
+ @param what: Resource name, one of L{constants.QR_VIA_RAPI}
+ @type fields: list of string
+ @param fields: Requested fields
+
+ @rtype: string
+ @return: job id
+
+ """
+ query = []
+
+ if fields is not None:
+ _AppendIf(query, True, ("fields", ",".join(fields)))
+
+ return self._SendRequest(HTTP_GET,
+ ("/%s/query/%s/fields" %
+ (GANETI_RAPI_VERSION, what)), query, None)