NODE_ROLE_REGULAR = "regular"
JOB_STATUS_QUEUED = "queued"
-JOB_STATUS_WAITLOCK = "waiting"
+JOB_STATUS_WAITING = "waiting"
JOB_STATUS_CANCELING = "canceling"
JOB_STATUS_RUNNING = "running"
JOB_STATUS_CANCELED = "canceled"
])
JOB_STATUS_ALL = frozenset([
JOB_STATUS_QUEUED,
- JOB_STATUS_WAITLOCK,
+ JOB_STATUS_WAITING,
JOB_STATUS_CANCELING,
JOB_STATUS_RUNNING,
]) | JOB_STATUS_FINALIZED
+# Legacy name
+JOB_STATUS_WAITLOCK = JOB_STATUS_WAITING
+
# Internal constants
_REQ_DATA_VERSION_FIELD = "__version__"
_INST_CREATE_REQV1 = "instance-create-reqv1"
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
+_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
+_NODE_EVAC_RES1 = "node-evac-res1"
_INST_NIC_PARAMS = frozenset(["mac", "ip", "mode", "link"])
_INST_CREATE_V0_DISK_PARAMS = frozenset(["size"])
_INST_CREATE_V0_PARAMS = frozenset([
def RedistributeConfig(self):
"""Tells the cluster to redistribute its configuration files.
+ @rtype: string
@return: job id
"""
@param tags: tags to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @rtype: string
+ @return: job id
"""
query = [("tag", t) for t in tags]
body.update((key, value) for key, value in kwargs.iteritems()
if key != "dry_run")
else:
- # Old request format (version 0)
-
- # The following code must make sure that an exception is raised when an
- # unsupported setting is requested by the caller. Otherwise this can lead
- # to bugs difficult to find. The interface of this function must stay
- # exactly the same for version 0 and 1 (e.g. they aren't allowed to
- # require different data types).
-
- # Validate disks
- for idx, disk in enumerate(disks):
- unsupported = set(disk.keys()) - _INST_CREATE_V0_DISK_PARAMS
- if unsupported:
- raise GanetiApiError("Server supports request version 0 only, but"
- " disk %s specifies the unsupported parameters"
- " %s, allowed are %s" %
- (idx, unsupported,
- list(_INST_CREATE_V0_DISK_PARAMS)))
-
- assert (len(_INST_CREATE_V0_DISK_PARAMS) == 1 and
- "size" in _INST_CREATE_V0_DISK_PARAMS)
- disk_sizes = [disk["size"] for disk in disks]
-
- # Validate NICs
- if not nics:
- raise GanetiApiError("Server supports request version 0 only, but"
- " no NIC specified")
- elif len(nics) > 1:
- raise GanetiApiError("Server supports request version 0 only, but"
- " more than one NIC specified")
-
- assert len(nics) == 1
-
- unsupported = set(nics[0].keys()) - _INST_NIC_PARAMS
- if unsupported:
- raise GanetiApiError("Server supports request version 0 only, but"
- " NIC 0 specifies the unsupported parameters %s,"
- " allowed are %s" %
- (unsupported, list(_INST_NIC_PARAMS)))
-
- # Validate other parameters
- unsupported = (set(kwargs.keys()) - _INST_CREATE_V0_PARAMS -
- _INST_CREATE_V0_DPARAMS)
- if unsupported:
- allowed = _INST_CREATE_V0_PARAMS.union(_INST_CREATE_V0_DPARAMS)
- raise GanetiApiError("Server supports request version 0 only, but"
- " the following unsupported parameters are"
- " specified: %s, allowed are %s" %
- (unsupported, list(allowed)))
-
- # All required fields for request data version 0
- body = {
- _REQ_DATA_VERSION_FIELD: 0,
- "name": name,
- "disk_template": disk_template,
- "disks": disk_sizes,
- }
-
- # NIC fields
- assert len(nics) == 1
- assert not (set(body.keys()) & set(nics[0].keys()))
- body.update(nics[0])
-
- # Copy supported fields
- assert not (set(body.keys()) & set(kwargs.keys()))
- body.update(dict((key, value) for key, value in kwargs.items()
- if key in _INST_CREATE_V0_PARAMS))
-
- # Merge dictionaries
- for i in (value for key, value in kwargs.items()
- if key in _INST_CREATE_V0_DPARAMS):
- assert not (set(body.keys()) & set(i.keys()))
- body.update(i)
-
- assert not (set(kwargs.keys()) -
- (_INST_CREATE_V0_PARAMS | _INST_CREATE_V0_DPARAMS))
- assert not (set(body.keys()) & _INST_CREATE_V0_DPARAMS)
+ raise GanetiApiError("Server does not support new-style (version 1)"
+ " instance creation requests")
return self._SendRequest(HTTP_POST, "/%s/instances" % GANETI_RAPI_VERSION,
query, body)
@param instance: Instance name
@type ignore_size: bool
@param ignore_size: Whether to ignore recorded size
+ @rtype: string
@return: job id
"""
@type instance: string
@param instance: Instance name
+ @rtype: string
@return: job id
"""
@param tags: tags to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @rtype: string
+ @return: job id
"""
query = [("tag", t) for t in tags]
while re-assembling disks (in hard-reboot mode only)
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @rtype: string
+ @return: job id
"""
query = []
("/%s/instances/%s/reboot" %
(GANETI_RAPI_VERSION, instance)), query, None)
- def ShutdownInstance(self, instance, dry_run=False):
+ def ShutdownInstance(self, instance, dry_run=False, no_remember=False):
"""Shuts down an instance.
@type instance: str
@param instance: the instance to shut down
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @type no_remember: bool
+ @param no_remember: if true, will not record the state change
+ @rtype: string
+ @return: job id
"""
query = []
if dry_run:
query.append(("dry-run", 1))
+ if no_remember:
+ query.append(("no-remember", 1))
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/shutdown" %
(GANETI_RAPI_VERSION, instance)), query, None)
- def StartupInstance(self, instance, dry_run=False):
+ def StartupInstance(self, instance, dry_run=False, no_remember=False):
"""Starts up an instance.
@type instance: str
@param instance: the instance to start up
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @type no_remember: bool
+ @param no_remember: if true, will not record the state change
+ @rtype: string
+ @return: job id
"""
query = []
if dry_run:
query.append(("dry-run", 1))
+ if no_remember:
+ query.append(("no-remember", 1))
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/startup" %
current operating system will be installed again
@type no_startup: bool
@param no_startup: Whether to start the instance automatically
+ @rtype: string
+ @return: job id
"""
if _INST_REINSTALL_REQV1 in self.GetFeatures():
@param mode: Migration mode
@type cleanup: bool
@param cleanup: Whether to clean up a previously failed migration
+ @rtype: string
+ @return: job id
"""
body = {}
("/%s/instances/%s/migrate" %
(GANETI_RAPI_VERSION, instance)), None, body)
+ def FailoverInstance(self, instance, iallocator=None,
+ ignore_consistency=None, target_node=None):
+ """Does a failover of an instance.
+
+ @type instance: string
+ @param instance: Instance name
+ @type iallocator: string
+ @param iallocator: Iallocator for deciding the target node for
+ shared-storage instances
+ @type ignore_consistency: bool
+ @param ignore_consistency: Whether to ignore disk consistency
+ @type target_node: string
+ @param target_node: Target node for shared-storage instances
+ @rtype: string
+ @return: job id
+
+ """
+ body = {}
+
+ if iallocator is not None:
+ body["iallocator"] = iallocator
+
+ if ignore_consistency is not None:
+ body["ignore_consistency"] = ignore_consistency
+
+ if target_node is not None:
+ body["target_node"] = target_node
+
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/instances/%s/failover" %
+ (GANETI_RAPI_VERSION, instance)), None, body)
+
def RenameInstance(self, instance, new_name, ip_check=None, name_check=None):
"""Changes the name of an instance.
@param ip_check: Whether to ensure instance's IP address is inactive
@type name_check: bool
@param name_check: Whether to ensure instance's name is resolvable
+ @rtype: string
+ @return: job id
"""
body = {
@type instance: string
@param instance: Instance name
+ @rtype: dict
+ @return: dictionary containing information about instance's console
"""
return self._SendRequest(HTTP_GET,
def WaitForJobCompletion(self, job_id, period=5, retries=-1):
"""Polls cluster for job status until completion.
- Completion is defined as any of the following states: "error",
- "canceled", or "success".
+ Completion is defined as any of the following states listed in
+ L{JOB_STATUS_FINALIZED}.
@type job_id: string
@param job_id: job id to watch
-
@type period: int
@param period: how often to poll for status (optional, default 5s)
-
@type retries: int
@param retries: how many time to poll before giving up
(optional, default -1 means unlimited)
@rtype: bool
- @return: True if job succeeded or False if failed/status timeout
+ @return: C{True} if job succeeded or C{False} if failed/status timeout
+ @deprecated: It is recommended to use L{WaitForJobChange} wherever
+ possible; L{WaitForJobChange} returns immediately after a job changed and
+ does not use polling
"""
while retries != 0:
job_result = self.GetJobStatus(job_id)
- if not job_result or job_result["status"] in ("error", "canceled"):
- return False
- if job_result["status"] == "success":
+
+ if job_result and job_result["status"] == JOB_STATUS_SUCCESS:
return True
- time.sleep(period)
+ elif not job_result or job_result["status"] in JOB_STATUS_FINALIZED:
+ return False
+
+ if period:
+ time.sleep(period)
+
if retries > 0:
retries -= 1
+
return False
def WaitForJobChange(self, job_id, fields, prev_job_info, prev_log_serial):
@type job_id: string
@param job_id: Job ID for which to wait
+ @return: C{None} if no changes have been detected and a dict with two keys,
+ C{job_info} and C{log_entries} otherwise.
+ @rtype: dict
"""
body = {
@param job_id: id of the job to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @rtype: tuple
+ @return: tuple containing the result, and a message (bool, string)
"""
query = []
None, None)
def EvacuateNode(self, node, iallocator=None, remote_node=None,
- dry_run=False, early_release=False):
+ dry_run=False, early_release=None,
+ primary=None, secondary=None, accept_old=False):
"""Evacuates instances from a Ganeti node.
@type node: str
@param dry_run: whether to perform a dry run
@type early_release: bool
@param early_release: whether to enable parallelization
-
- @rtype: list
- @return: list of (job ID, instance name, new secondary node); if
- dry_run was specified, then the actual move jobs were not
- submitted and the job IDs will be C{None}
+ @type primary: bool
+ @param primary: Whether to evacuate primary instances
+ @type secondary: bool
+ @param secondary: Whether to evacuate secondary instances
+ @type accept_old: bool
+ @param accept_old: Whether caller is ready to accept old-style (pre-2.5)
+ results
+
+ @rtype: string, or a list for pre-2.5 results
+ @return: Job ID or, if C{accept_old} is set and server is pre-2.5,
+ list of (job ID, instance name, new secondary node); if dry_run was
+ specified, then the actual move jobs were not submitted and the job IDs
+ will be C{None}
@raises GanetiApiError: if an iallocator and remote_node are both
specified
raise GanetiApiError("Only one of iallocator or remote_node can be used")
query = []
- if iallocator:
- query.append(("iallocator", iallocator))
- if remote_node:
- query.append(("remote_node", remote_node))
if dry_run:
query.append(("dry-run", 1))
- if early_release:
- query.append(("early_release", 1))
+
+ if _NODE_EVAC_RES1 in self.GetFeatures():
+ body = {}
+
+ if iallocator is not None:
+ body["iallocator"] = iallocator
+ if remote_node is not None:
+ body["remote_node"] = remote_node
+ if early_release is not None:
+ body["early_release"] = early_release
+ if primary is not None:
+ body["primary"] = primary
+ if secondary is not None:
+ body["secondary"] = secondary
+ else:
+ # Pre-2.5 request format
+ body = None
+
+ if not accept_old:
+ raise GanetiApiError("Server is version 2.4 or earlier and caller does"
+ " not accept old-style results (parameter"
+ " accept_old)")
+
+ if primary or primary is None or not (secondary is None or secondary):
+ raise GanetiApiError("Server can only evacuate secondary instances")
+
+ if iallocator:
+ query.append(("iallocator", iallocator))
+ if remote_node:
+ query.append(("remote_node", remote_node))
+ if early_release:
+ query.append(("early_release", 1))
return self._SendRequest(HTTP_POST,
("/%s/nodes/%s/evacuate" %
- (GANETI_RAPI_VERSION, node)), query, None)
+ (GANETI_RAPI_VERSION, node)), query, body)
- def MigrateNode(self, node, mode=None, dry_run=False):
+ def MigrateNode(self, node, mode=None, dry_run=False, iallocator=None,
+ target_node=None):
"""Migrates all primary instances from a node.
@type node: str
otherwise the hypervisor default will be used
@type dry_run: bool
@param dry_run: whether to perform a dry run
+ @type iallocator: string
+ @param iallocator: instance allocator to use
+ @type target_node: string
+ @param target_node: Target node for shared-storage instances
@rtype: string
@return: job id
"""
query = []
- if mode is not None:
- query.append(("mode", mode))
if dry_run:
query.append(("dry-run", 1))
- return self._SendRequest(HTTP_POST,
- ("/%s/nodes/%s/migrate" %
- (GANETI_RAPI_VERSION, node)), query, None)
+ if _NODE_MIGRATE_REQV1 in self.GetFeatures():
+ body = {}
+
+ if mode is not None:
+ body["mode"] = mode
+ if iallocator is not None:
+ body["iallocator"] = iallocator
+ if target_node is not None:
+ body["target_node"] = target_node
+
+ assert len(query) <= 1
+
+ return self._SendRequest(HTTP_POST,
+ ("/%s/nodes/%s/migrate" %
+ (GANETI_RAPI_VERSION, node)), query, body)
+ else:
+ # Use old request format
+ if target_node is not None:
+ raise GanetiApiError("Server does not support specifying target node"
+ " for node migration")
+
+ if mode is not None:
+ query.append(("mode", mode))
+
+ return self._SendRequest(HTTP_POST,
+ ("/%s/nodes/%s/migrate" %
+ (GANETI_RAPI_VERSION, node)), query, None)
def GetNodeRole(self, node):
"""Gets the current role for a node.
("/%s/groups/%s/assign-nodes" %
(GANETI_RAPI_VERSION, group)), query, body)
+ def GetGroupTags(self, group):
+ """Gets tags for a node group.
+
+ @type group: string
+ @param group: Node group whose tags to return
+
+ @rtype: list of strings
+ @return: tags for the group
+
+ """
+ return self._SendRequest(HTTP_GET,
+ ("/%s/groups/%s/tags" %
+ (GANETI_RAPI_VERSION, group)), None, None)
+
+ def AddGroupTags(self, group, tags, dry_run=False):
+ """Adds tags to a node group.
+
+ @type group: str
+ @param group: group to add tags to
+ @type tags: list of string
+ @param tags: tags to add to the group
+ @type dry_run: bool
+ @param dry_run: whether to perform a dry run
+
+ @rtype: string
+ @return: job id
+
+ """
+ query = [("tag", t) for t in tags]
+ if dry_run:
+ query.append(("dry-run", 1))
+
+ return self._SendRequest(HTTP_PUT,
+ ("/%s/groups/%s/tags" %
+ (GANETI_RAPI_VERSION, group)), query, None)
+
+ def DeleteGroupTags(self, group, tags, dry_run=False):
+ """Deletes tags from a node group.
+
+ @type group: str
+ @param group: group to delete tags from
+ @type tags: list of string
+ @param tags: tags to delete
+ @type dry_run: bool
+ @param dry_run: whether to perform a dry run
+ @rtype: string
+ @return: job id
+
+ """
+ query = [("tag", t) for t in tags]
+ if dry_run:
+ query.append(("dry-run", 1))
+
+ return self._SendRequest(HTTP_DELETE,
+ ("/%s/groups/%s/tags" %
+ (GANETI_RAPI_VERSION, group)), query, None)
+
def Query(self, what, fields, filter_=None):
"""Retrieves information about resources.
@type fields: list of string
@param fields: Requested fields
@type filter_: None or list
- @param filter_ Query filter
+ @param filter_: Query filter
@rtype: string
@return: job id