CDIR=`pwd` && \
cd $$TMPDIR && \
mv lib ganeti && \
- epydoc --conf $$CDIR/epydoc.conf -o $$CDIR/doc/api \
+ epydoc -v --conf $$CDIR/epydoc.conf -o $$CDIR/doc/api \
) ; \
rm -rf $$TMPDIR ; \
}
def __init__(self, address, rqhandler):
"""IOServer constructor
- Args:
- address: the address to bind this IOServer to
- rqhandler: RequestHandler type object
+ @param address: the address to bind this IOServer to
+ @param rqhandler: RequestHandler type object
"""
SocketServer.UnixStreamServer.__init__(self, address, rqhandler)
def ParseOptions():
"""Parse the command line options.
- Returns:
- (options, args) as from OptionParser.parse_args()
+ @return: (options, args) as from OptionParser.parse_args()
"""
parser = OptionParser(description="Ganeti master daemon",
def ParseOptions():
"""Parse the command line options.
- Returns:
- (options, args) as from OptionParser.parse_args()
+ @return: (options, args) as from OptionParser.parse_args()
"""
parser = OptionParser(description="Ganeti node daemon",
def ParseOptions():
"""Parse the command line options.
- Returns:
- (options, args) as from OptionParser.parse_args()
+ @return: (options, args) as from OptionParser.parse_args()
"""
parser = optparse.OptionParser(description="Ganeti Remote API",
def Indent(s, prefix='| '):
"""Indent a piece of text with a given prefix before each line.
- Args:
- s: The string to indent
- prefix: The string to prepend each line.
+ @param s: the string to indent
+ @param prefix: the string to prepend each line
"""
return "%s%s\n" % (prefix, ('\n' + prefix).join(s.splitlines()))
def NumberOfRestartAttempts(self, instance):
"""Returns number of previous restart attempts.
- Args:
- instance - the instance to look up.
+ @type instance: L{Instance}
+ @param instance: the instance to look up
"""
idata = self._data["instance"]
def RecordRestartAttempt(self, instance):
"""Record a restart attempt.
- Args:
- instance - the instance being restarted
+ @type instance: L{Instance}
+ @param instance: the instance being restarted
"""
idata = self._data["instance"]
inst[KEY_RESTART_COUNT] = inst.get(KEY_RESTART_COUNT, 0) + 1
def RemoveInstance(self, instance):
- """Update state to reflect that a machine is running, i.e. remove record.
+ """Update state to reflect that a machine is running.
- Args:
- instance - the instance to remove from books
+ This method removes the record for a named instance (as we only
+ track down instances).
- This method removes the record for a named instance.
+ @type instance: L{Instance}
+ @param instance: the instance to remove from books
"""
idata = self._data["instance"]
class Instance(object):
"""Abstraction for a Virtual Machine instance.
- Methods:
- Restart(): issue a command to restart the represented machine.
-
"""
def __init__(self, name, state, autostart):
self.name = name
def ParseOptions():
"""Parse the command line options.
- Returns:
- (options, args) as from OptionParser.parse_args()
+ @return: (options, args) as from OptionParser.parse_args()
"""
parser = OptionParser(description="Ganeti cluster watcher",
from the cluster.
If processing is successful, then it raises an
- L{errors.GanetiQuitException} which is used as a special case to
+ L{errors.QuitGanetiException} which is used as a special case to
shutdown the node daemon.
"""
@note: This is intended to be called recursively.
- @type disk: L{objects.disk}
+ @type disk: L{objects.Disk}
@param disk: the disk object we should remove
@rtype: boolean
@return: the success of the operation
def ShutdownBlockDevice(disk):
"""Shut down a block device.
- First, if the device is assembled (can L{Attach()}), then the device
- is shutdown. Then the children of the device are shutdown.
+ First, if the device is assembled (Attach() is successfull), then
+ the device is shutdown. Then the children of the device are
+ shutdown.
This function is called recursively. Note that we don't cache the
children or such, as oppossed to assemble, shutdown of different
@rtype: disk
@return:
a list of (mirror_done, estimated_time) tuples, which
- are the result of L{bdev.BlockDevice.CombinedSyncStatus}
+ are the result of L{bdev.BlockDev.CombinedSyncStatus}
@raise errors.BlockDeviceError: if any of the disks cannot be
found
def JobQueueRename(old, new):
"""Renames a job queue file.
- This is just a wrapper over L{os.rename} with proper checking.
+ This is just a wrapper over os.rename with proper checking.
@type old: str
@param old: the old (actual) file name
node nor not
@type iv_name: str
@param iv_name: the instance-visible name of the
- device, as in L{objects.Disk.iv_name}
+ device, as in objects.Disk.iv_name
@rtype: None
If this device is a mirroring device, this function returns the
status of the mirror.
- Returns:
- (sync_percent, estimated_time, is_degraded, ldisk)
-
If sync_percent is None, it means the device is not syncing.
If estimated_time is None, it means we can't estimate
data. This is only valid for some devices, the rest will always
return False (not degraded).
+ @rtype: tuple
+ @return: (sync_percent, estimated_time, is_degraded, ldisk)
+
"""
return None, None, False, False
def Grow(self, amount):
"""Grow the block device.
- Arguments:
- amount: the amount (in mebibytes) to grow with
-
- Returns: None
+ @param amount: the amount (in mebibytes) to grow with
"""
raise NotImplementedError
def GetPVInfo(vg_name):
"""Get the free space info for PVs in a volume group.
- Args:
- vg_name: the volume group name
+ @param vg_name: the volume group name
- Returns:
- list of (free_space, name) with free_space in mebibytes
+ @rtype: list
+ @return: list of tuples (free_space, name) with free_space in mebibytes
"""
command = ["pvs", "--noheadings", "--nosuffix", "--units=m",
If this device is a mirroring device, this function returns the
status of the mirror.
- Returns:
- (sync_percent, estimated_time, is_degraded, ldisk)
-
For logical volumes, sync_percent and estimated_time are always
None (no recovery in progress, as we don't handle the mirrored LV
case). The is_degraded parameter is the inverse of the ldisk
The status was already read in Attach, so we just return it.
+ @rtype: tuple
+ @return: (sync_percent, estimated_time, is_degraded, ldisk)
+
"""
return None, None, self._degraded, self._degraded
def _MassageProcData(data):
"""Transform the output of _GetProdData into a nicer form.
- Returns:
- a dictionary of minor: joined lines from /proc/drbd for that minor
+ @return: a dictionary of minor: joined lines from /proc/drbd
+ for that minor
"""
lmatch = re.compile("^ *([0-9]+):.*$")
"""Return the DRBD version.
This will return a dict with keys:
- k_major,
- k_minor,
- k_point,
- api,
- proto,
- proto2 (only on drbd > 8.2.X)
+ - k_major
+ - k_minor
+ - k_point
+ - api
+ - proto
+ - proto2 (only on drbd > 8.2.X)
"""
proc_data = cls._GetProcData()
def GetSyncStatus(self):
"""Returns the sync status of the device.
- Returns:
- (sync_percent, estimated_time, is_degraded)
If sync_percent is None, it means all is ok
If estimated_time is None, it means we can't esimate
We compute the ldisk parameter based on wheter we have a local
disk or not.
+ @rtype: tuple
+ @return: (sync_percent, estimated_time, is_degraded, ldisk)
+
"""
if self.minor is None and not self.Attach():
raise errors.BlockDeviceError("Can't attach to device in GetSyncStatus")
def Remove(self):
"""Remove the file backing the block device.
- Returns:
- boolean indicating wheter removal of file was successful or not.
+ @rtype: boolean
+ @return: True if the removal was successful
"""
if not os.path.exists(self.dev_path):
Check if this file already exists.
- Returns:
- boolean indicating if file exists or not.
+ @rtype: boolean
+ @return: True if file exists
"""
self.attached = os.path.exists(self.dev_path)
def Create(cls, unique_id, children, size):
"""Create a new file.
- Args:
- children:
- size: integer size of file in MiB
+ @param size: the size of file in MiB
- Returns:
- A ganeti.bdev.FileStorage object.
+ @rtype: L{bdev.FileStorage}
+ @return: an instance of FileStorage
"""
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
def _InitSSHSetup(node):
"""Setup the SSH configuration for the cluster.
-
This generates a dsa keypair for root, adds the pub key to the
permitted hosts and adds the hostkey to its own known hosts.
- Args:
- node: the name of this host as a fqdn
+ @param node: the name of this host as an FQDN
"""
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
node, and no instances.
@type version: int
- @param version: Configuration version
- @type cluster_config: objects.Cluster
- @param cluster_config: Cluster configuration
- @type master_node_config: objects.Node
- @param master_node_config: Master node configuration
- @type file_name: string
- @param file_name: Configuration file path
-
- @rtype: ssconf.SimpleConfigWriter
- @returns: Initialized config instance
+ @param version: configuration version
+ @type cluster_config: L{objects.Cluster}
+ @param cluster_config: cluster configuration
+ @type master_node_config: L{objects.Node}
+ @param master_node_config: master node configuration
+ @type cfg_file: string
+ @param cfg_file: configuration file path
+
+ @rtype: L{ssconf.SimpleConfigWriter}
+ @returns: initialized config instance
"""
nodes = {
def _ParseArgs(argv, commands, aliases):
- """Parses the command line and return the function which must be
- executed together with its arguments
+ """Parser for the command line arguments.
- Arguments:
- argv: the command line
+ This function parses the arguements and returns the function which
+ must be executed together with its (modified) arguments.
- commands: dictionary with special contents, see the design doc for
- cmdline handling
- aliases: dictionary with command aliases {'alias': 'target, ...}
+ @param argv: the command line
+ @param commands: dictionary with special contents, see the design
+ doc for cmdline handling
+ @param aliases: dictionary with command aliases {'alias': 'target, ...}
"""
if len(argv) == 0:
def AskUser(text, choices=None):
"""Ask the user a question.
- Args:
- text - the question to ask.
+ @param text: the question to ask
- choices - list with elements tuples (input_char, return_value,
- description); if not given, it will default to: [('y', True,
- 'Perform the operation'), ('n', False, 'Do no do the operation')];
- note that the '?' char is reserved for help
+ @param choices: list with elements tuples (input_char, return_value,
+ description); if not given, it will default to: [('y', True,
+ 'Perform the operation'), ('n', False, 'Do no do the operation')];
+ note that the '?' char is reserved for help
- Returns: one of the return values from the choices list; if input is
- not possible (i.e. not running with a tty, we return the last entry
- from the list
+ @return: one of the return values from the choices list; if input is
+ not possible (i.e. not running with a tty, we return the last
+ entry from the list
"""
if choices is None:
def _ValidateConfig(data):
+ """Verifies that a configuration objects looks valid.
+
+ This only verifies the version of the configuration.
+
+ @raise errors.ConfigurationError: if the version differs from what
+ we expect
+
+ """
if data.version != constants.CONFIG_VERSION:
raise errors.ConfigurationError("Cluster configuration version"
" mismatch, got %s instead of %s" %
This checks the current node, instances and disk names for
duplicates.
- Args:
- - exceptions: a list with some other names which should be checked
- for uniqueness (used for example when you want to get
- more than one id at one time without adding each one in
- turn to the config file
+ @param exceptions: a list with some other names which should be checked
+ for uniqueness (used for example when you want to get
+ more than one id at one time without adding each one in
+ turn to the config file)
- Returns: the unique id as a string
+ @rtype: string
+ @return: the unique id
"""
existing = set()
def _AllMACs(self):
"""Return all MACs present in the config.
+ @rtype: list
+ @return: the list of all MACs
+
"""
result = []
for instance in self._config_data.instances.values():
def _AllDRBDSecrets(self):
"""Return all DRBD secrets present in the config.
+ @rtype: list
+ @return: the list of all DRBD secrets
+
"""
def helper(disk, result):
"""Recursively gather secrets from this disk."""
def _ComputeDRBDMap(self, instance):
"""Compute the used DRBD minor/nodes.
- Return: dictionary of node_name: dict of minor: instance_name. The
- returned dict will have all the nodes in it (even if with an empty
- list).
+ @return: dictionary of node_name: dict of minor: instance_name;
+ the returned dict will have all the nodes in it (even if with
+ an empty list).
"""
def _AppendUsedPorts(instance_name, disk, used):
def GetHostKey(self):
"""Return the rsa hostkey from the config.
- Args: None
+ @rtype: string
+ @return: the rsa hostkey
- Returns: rsa hostkey
"""
return self._config_data.cluster.rsahostkeypub
This should be used after creating a new instance.
- Args:
- instance: the instance object
+ @type instance: L{objects.Instance}
+ @param instance: the instance object
+
"""
if not isinstance(instance, objects.Instance):
raise errors.ProgrammerError("Invalid type passed to AddInstance")
def GetInstanceList(self):
"""Get the list of instances.
- Returns:
- array of instances, ex. ['instance2.example.com','instance1.example.com']
- these contains all the instances, also the ones in Admin_down state
+ @return: array of instances, ex. ['instance2.example.com',
+ 'instance1.example.com']
"""
return self._UnlockedGetInstanceList()
It takes the information from the configuration file. Other informations of
an instance are taken from the live systems.
- Args:
- instance: name of the instance, ex instance1.example.com
+ @param instance_name: name of the instance, e.g.
+ I{instance1.example.com}
- Returns:
- the instance object
+ @rtype: L{objects.Instance}
+ @return: the instance object
"""
return self._UnlockedGetInstanceInfo(instance_name)
def AddNode(self, node):
"""Add a node to the configuration.
- Args:
- node: an object.Node instance
+ @type node: L{objects.Node}
+ @param node: a Node instance
"""
logging.info("Adding node %s to configuration" % node.name)
def _UnlockedGetNodeInfo(self, node_name):
"""Get the configuration of a node, as stored in the config.
- This function is for internal use, when the config lock is already held.
+ This function is for internal use, when the config lock is already
+ held.
- Args: node: nodename (tuple) of the node
+ @param node_name: the node name, e.g. I{node1.example.com}
- Returns: the node object
+ @rtype: L{objects.Node}
+ @return: the node object
"""
if node_name not in self._config_data.nodes:
def GetNodeInfo(self, node_name):
"""Get the configuration of a node, as stored in the config.
- Args: node: nodename (tuple) of the node
+ This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
- Returns: the node object
+ @param node_name: the node name, e.g. I{node1.example.com}
+
+ @rtype: L{objects.Node}
+ @return: the node object
"""
return self._UnlockedGetNodeInfo(node_name)
def _UnlockedGetNodeList(self):
"""Return the list of nodes which are in the configuration.
- This function is for internal use, when the config lock is already held.
+ This function is for internal use, when the config lock is already
+ held.
+
+ @rtype: list
"""
return self._config_data.nodes.keys()
def _OpenConfig(self):
"""Read the config data from disk.
- In case we already have configuration data and the config file has
- the same mtime as when we read it, we skip the parsing of the
- file, since de-serialisation could be slow.
-
"""
f = open(self._cfg_file, 'r')
try:
def GetClusterInfo(self):
"""Returns informations about the cluster
- Returns:
- the cluster object
+ @rtype: L{objects.Cluster}
+ @return: the cluster object
"""
return self._config_data.cluster
that all modified objects will be saved, but the target argument
is the one the caller wants to ensure that it's saved.
+ @param target: an instance of either L{objects.Cluster},
+ L{objects.Node} or L{objects.Instance} which is existing in
+ the cluster
+
"""
if self._config_data is None:
raise errors.ProgrammerError("Configuration file not read,"
error should returned to the caller, and the second one will be the returned
result (either as an error or as a normal result).
- Examples:
+ Examples::
+
# Return a result of "True" to the caller, but quit ganeti afterwards
raise QuitGanetiException(False, True)
# Send an error to the caller, and quit ganeti
@type mainloop: ganeti.daemon.Mainloop
@param mainloop: Mainloop used to poll for I/O events
- @type local_addess: string
+ @type local_address: string
@param local_address: Local IP address to bind to
@type port: int
@param port: TCP port to listen on
def GetInstanceInfo(self, instance_name):
"""Get instance properties.
- Args:
- instance_name: the instance name
+ @param instance_name: the instance name
- Returns:
- (name, id, memory, vcpus, state, times)
+ @return: tuple (name, id, memory, vcpus, state, times)
"""
raise NotImplementedError
def GetAllInstancesInfo(self):
"""Get properties of all instances.
- Returns:
- [(name, id, memory, vcpus, stat, times),...]
+ @return: list of tuples (name, id, memory, vcpus, stat, times)
+
"""
raise NotImplementedError
def GetNodeInfo(self):
"""Return information about the node.
- The return value is a dict, which has to have the following items:
- (all values in MiB)
- - memory_total: the total memory size on the node
- - memory_free: the available memory on the node for instances
- - memory_dom0: the memory used by the node itself, if available
+ @return: a dict with the following keys (values in MiB):
+ - memory_total: the total memory size on the node
+ - memory_free: the available memory on the node for instances
+ - memory_dom0: the memory used by the node itself, if available
"""
raise NotImplementedError
def GetInstanceInfo(self, instance_name):
"""Get instance properties.
- Args:
- instance_name: the instance name
+ @param instance_name: the instance name
+
+ @return: tuple of (name, id, memory, vcpus, stat, times)
- Returns:
- (name, id, memory, vcpus, stat, times)
"""
file_name = "%s/%s" % (self._ROOT_DIR, instance_name)
if not os.path.exists(file_name):
def GetAllInstancesInfo(self):
"""Get properties of all instances.
- Returns:
- [(name, id, memory, vcpus, stat, times),...]
+ @return: list of tuples (name, id, memory, vcpus, stat, times)
+
"""
data = []
for file_name in os.listdir(self._ROOT_DIR):
def GetNodeInfo(self):
"""Return information about the node.
- The return value is a dict, which has to have the following items:
- (all values in MiB)
- - memory_total: the total memory size on the node
- - memory_free: the available memory on the node for instances
- - memory_dom0: the memory used by the node itself, if available
+ @return: a dict with the following keys (values in MiB):
+ - memory_total: the total memory size on the node
+ - memory_free: the available memory on the node for instances
+ - memory_dom0: the memory used by the node itself, if available
"""
# global ram usage from the xm info command
def ListInstances(self):
"""Get the list of running instances.
- We can do this by listing our live instances directory and checking whether
- the associated kvm process is still alive.
+ We can do this by listing our live instances directory and
+ checking whether the associated kvm process is still alive.
"""
result = []
def GetInstanceInfo(self, instance_name):
"""Get instance properties.
- Args:
- instance_name: the instance name
+ @param instance_name: the instance name
+
+ @return: tuple (name, id, memory, vcpus, stat, times)
- Returns:
- (name, id, memory, vcpus, stat, times)
"""
pidfile = "%s/%s" % (self._PIDS_DIR, instance_name)
pid = utils.ReadPidFile(pidfile)
def GetAllInstancesInfo(self):
"""Get properties of all instances.
- Returns:
- [(name, id, memory, vcpus, stat, times),...]
+ @return: list of tuples (name, id, memory, vcpus, stat, times)
+
"""
data = []
for name in os.listdir(self._PIDS_DIR):
def GetNodeInfo(self):
"""Return information about the node.
- The return value is a dict, which has to have the following items:
- (all values in MiB)
- - memory_total: the total memory size on the node
- - memory_free: the available memory on the node for instances
- - memory_dom0: the memory used by the node itself, if available
+ @return: a dict with the following keys (values in MiB):
+ - memory_total: the total memory size on the node
+ - memory_free: the available memory on the node for instances
+ - memory_dom0: the memory used by the node itself, if available
"""
# global ram usage from the xm info command
def _GetXMList(include_node):
"""Return the list of running instances.
- If the `include_node` argument is True, then we return information
+ If the include_node argument is True, then we return information
for dom0 also, otherwise we filter that from the return value.
- The return value is a list of (name, id, memory, vcpus, state, time spent)
+ @return: list of (name, id, memory, vcpus, state, time spent)
"""
for dummy in range(5):
def GetInstanceInfo(self, instance_name):
"""Get instance properties.
- Args:
- instance_name: the instance name
+ @param instance_name: the instance name
+
+ @return: tuple (name, id, memory, vcpus, stat, times)
- Returns:
- (name, id, memory, vcpus, stat, times)
"""
xm_list = self._GetXMList(instance_name=="Domain-0")
result = None
def GetAllInstancesInfo(self):
"""Get properties of all instances.
- Returns:
- [(name, id, memory, vcpus, stat, times),...]
+ @return: list of tuples (name, id, memory, vcpus, stat, times)
+
"""
xm_list = self._GetXMList(False)
return xm_list
def StartInstance(self, instance, block_devices, extra_args):
- """Start an instance."""
+ """Start an instance.
+
+ """
self._WriteConfigFile(instance, block_devices, extra_args)
result = utils.RunCmd(["xm", "create", instance.name])
result.output))
def StopInstance(self, instance, force=False):
- """Stop an instance."""
+ """Stop an instance.
+
+ """
self._RemoveConfigFile(instance)
if force:
command = ["xm", "destroy", instance.name]
(instance.name, result.fail_reason))
def RebootInstance(self, instance):
- """Reboot an instance."""
+ """Reboot an instance.
+
+ """
result = utils.RunCmd(["xm", "reboot", instance.name])
if result.failed:
def GetNodeInfo(self):
"""Return information about the node.
- The return value is a dict, which has to have the following items:
- (all values in MiB)
- - memory_total: the total memory size on the node
- - memory_free: the available memory on the node for instances
- - memory_dom0: the memory used by the node itself, if available
+ @return: a dict with the following keys (values in MiB):
+ - memory_total: the total memory size on the node
+ - memory_free: the available memory on the node for instances
+ - memory_dom0: the memory used by the node itself, if available
"""
# note: in xen 3, memory has changed to total_memory
This method builds the xen config disk directive according to the
given disk_template and block_devices.
- Args:
- disk_template: String containing instance disk template
- block_devices: List[tuple1,tuple2,...]
- tuple: (cfdev, rldev)
- cfdev: dict containing ganeti config disk part
- rldev: ganeti.bdev.BlockDev object
+ @param disk_template: string containing instance disk template
+ @param block_devices: list of tuples (cfdev, rldev):
+ - cfdev: dict containing ganeti config disk part
+ - rldev: ganeti.bdev.BlockDev object
- Returns:
- String containing disk directive for xen instance config file
+ @return: string containing disk directive for xen instance config file
"""
FILE_DRIVER_MAP = {
"""Archives a job.
@type job_id: string
- @param job_id: Job ID of job to be archived.
+ @param job_id: the ID of job to be archived
"""
logging.info("Archiving job %s", job_id)
def _is_owned(self, shared=-1):
"""Is the current thread somehow owning the lock at this time?
- Args:
- shared:
- < 0: check for any type of ownership (default)
- 0: check for exclusive ownership
- > 0: check for shared ownership
+ @param shared:
+ - < 0: check for any type of ownership (default)
+ - 0: check for exclusive ownership
+ - > 0: check for shared ownership
"""
self.__lock.acquire()
"""Wait on the given condition, and raise an exception if the current lock
is declared deleted in the meantime.
- Args:
- c: condition to wait on
+ @param c: the condition to wait on
"""
c.wait()
def acquire(self, blocking=1, shared=0):
"""Acquire a shared lock.
- Args:
- shared: whether to acquire in shared mode. By default an exclusive lock
- will be acquired.
- blocking: whether to block while trying to acquire or to operate in
- try-lock mode. this locking mode is not supported yet.
+ @param shared: whether to acquire in shared mode; by default an
+ exclusive lock will be acquired
+ @param blocking: whether to block while trying to acquire or to
+ operate in try-lock mode (this locking mode is not supported yet)
"""
if not blocking:
acquired in exclusive mode if you don't already own it, then the lock
will be put in a state where any future and pending acquire() fail.
- Args:
- blocking: whether to block while trying to acquire or to operate in
- try-lock mode. this locking mode is not supported yet unless
- you are already holding exclusively the lock.
+ @param blocking: whether to block while trying to acquire or to
+ operate in try-lock mode. this locking mode is not supported
+ yet unless you are already holding exclusively the lock.
"""
self.__lock.acquire()
def __init__(self, members=None):
"""Constructs a new LockSet.
- Args:
- members: initial members of the set
+ @param members: initial members of the set
"""
# Used internally to guarantee coherency.
def acquire(self, names, blocking=1, shared=0):
"""Acquire a set of resource locks.
- Args:
- names: the names of the locks which shall be acquired.
- (special lock names, or instance/node names)
- shared: whether to acquire in shared mode. By default an exclusive lock
- will be acquired.
- blocking: whether to block while trying to acquire or to operate in
- try-lock mode. this locking mode is not supported yet.
+ @param names: the names of the locks which shall be acquired
+ (special lock names, or instance/node names)
+ @param shared: whether to acquire in shared mode; by default an
+ exclusive lock will be acquired
+ @param blocking: whether to block while trying to acquire or to
+ operate in try-lock mode (this locking mode is not supported yet)
- Returns:
- True: when all the locks are successfully acquired
+ @return: True when all the locks are successfully acquired
- Raises:
- errors.LockError: when any lock we try to acquire has been deleted
- before we succeed. In this case none of the locks requested will be
- acquired.
+ @raise errors.LockError: when any lock we try to acquire has
+ been deleted before we succeed. In this case none of the
+ locks requested will be acquired.
"""
if not blocking:
You must have acquired the locks, either in shared or in exclusive mode,
before releasing them.
- Args:
- names: the names of the locks which shall be released.
- (defaults to all the locks acquired at that level).
+ @param names: the names of the locks which shall be released
+ (defaults to all the locks acquired at that level).
"""
assert self._is_owned(), "release() on lock set while not owner"
def add(self, names, acquired=0, shared=0):
"""Add a new set of elements to the set
- Args:
- names: names of the new elements to add
- acquired: pre-acquire the new resource?
- shared: is the pre-acquisition shared?
+ @param names: names of the new elements to add
+ @param acquired: pre-acquire the new resource?
+ @param shared: is the pre-acquisition shared?
"""
# Check we don't already own locks at this level
You can either not hold anything in the lockset or already hold a superset
of the elements you want to delete, exclusively.
- Args:
- names: names of the resource to remove.
- blocking: whether to block while trying to acquire or to operate in
- try-lock mode. this locking mode is not supported yet unless
- you are already holding exclusively the locks.
+ @param names: names of the resource to remove.
+ @param blocking: whether to block while trying to acquire or to
+ operate in try-lock mode (this locking mode is not supported
+ yet unless you are already holding exclusively the locks)
- Returns:
- A list of lock which we removed. The list is always equal to the names
- list if we were holding all the locks exclusively.
+ @return:: a list of locks which we removed; the list is always
+ equal to the names list if we were holding all the locks
+ exclusively
"""
if not blocking and not self._is_owned():
There should be only a GanetiLockManager object at any time, so this
function raises an error if this is not the case.
- Args:
- nodes: list of node names
- instances: list of instance names
+ @param nodes: list of node names
+ @param instances: list of instance names
"""
- assert self.__class__._instance is None, "double GanetiLockManager instance"
+ assert self.__class__._instance is None, \
+ "double GanetiLockManager instance"
+
self.__class__._instance = self
# The keyring contains all the locks, at their level and in the correct
def _names(self, level):
"""List the lock names at the given level.
- Used for debugging/testing purposes.
- Args:
- level: the level whose list of locks to get
+ This can be used for debugging/testing purposes.
+
+ @param level: the level whose list of locks to get
"""
assert level in LEVELS, "Invalid locking level %s" % level
return BGL in self.__keyring[LEVEL_CLUSTER]._list_owned()
def _contains_BGL(self, level, names):
- """Check if acting on the given level and set of names will change the
- status of the Big Ganeti Lock.
+ """Check if the level contains the BGL.
+
+ Check if acting on the given level and set of names will change
+ the status of the Big Ganeti Lock.
"""
return level == LEVEL_CLUSTER and (names is None or BGL in names)
def acquire(self, level, names, blocking=1, shared=0):
"""Acquire a set of resource locks, at the same level.
- Args:
- level: the level at which the locks shall be acquired.
- It must be a memmber of LEVELS.
- names: the names of the locks which shall be acquired.
- (special lock names, or instance/node names)
- shared: whether to acquire in shared mode. By default an exclusive lock
- will be acquired.
- blocking: whether to block while trying to acquire or to operate in
- try-lock mode. this locking mode is not supported yet.
+ @param level: the level at which the locks shall be acquired;
+ it must be a memmber of LEVELS.
+ @param names: the names of the locks which shall be acquired
+ (special lock names, or instance/node names)
+ @param shared: whether to acquire in shared mode; by default
+ an exclusive lock will be acquired
+ @param blocking: whether to block while trying to acquire or to
+ operate in try-lock mode (this locking mode is not supported yet)
"""
assert level in LEVELS, "Invalid locking level %s" % level
def release(self, level, names=None):
"""Release a set of resource locks, at the same level.
- You must have acquired the locks, either in shared or in exclusive mode,
- before releasing them.
+ You must have acquired the locks, either in shared or in exclusive
+ mode, before releasing them.
- Args:
- level: the level at which the locks shall be released.
- It must be a memmber of LEVELS.
- names: the names of the locks which shall be released.
- (defaults to all the locks acquired at that level).
+ @param level: the level at which the locks shall be released;
+ it must be a memmber of LEVELS
+ @param names: the names of the locks which shall be released
+ (defaults to all the locks acquired at that level)
"""
assert level in LEVELS, "Invalid locking level %s" % level
def add(self, level, names, acquired=0, shared=0):
"""Add locks at the specified level.
- Args:
- level: the level at which the locks shall be added.
- It must be a memmber of LEVELS_MOD.
- names: names of the locks to acquire
- acquired: whether to acquire the newly added locks
- shared: whether the acquisition will be shared
+ @param level: the level at which the locks shall be added;
+ it must be a memmber of LEVELS_MOD.
+ @param names: names of the locks to acquire
+ @param acquired: whether to acquire the newly added locks
+ @param shared: whether the acquisition will be shared
+
"""
assert level in LEVELS_MOD, "Invalid or immutable level %s" % level
assert self._BGL_owned(), ("You must own the BGL before performing other"
def remove(self, level, names, blocking=1):
"""Remove locks from the specified level.
- You must either already own the locks you are trying to remove exclusively
- or not own any lock at an upper level.
+ You must either already own the locks you are trying to remove
+ exclusively or not own any lock at an upper level.
- Args:
- level: the level at which the locks shall be removed.
- It must be a memmber of LEVELS_MOD.
- names: the names of the locks which shall be removed.
- (special lock names, or instance/node names)
- blocking: whether to block while trying to operate in try-lock mode.
- this locking mode is not supported yet.
+ @param level: the level at which the locks shall be removed;
+ it must be a member of LEVELS_MOD
+ @param names: the names of the locks which shall be removed
+ (special lock names, or instance/node names)
+ @param blocking: whether to block while trying to operate in
+ try-lock mode (this locking mode is not supported yet)
"""
assert level in LEVELS_MOD, "Invalid or immutable level %s" % level
def MapLVsByNode(self, lvmap=None, devs=None, node=None):
"""Provide a mapping of nodes to LVs this instance owns.
- This function figures out what logical volumes should belong on which
- nodes, recursing through a device tree.
+ This function figures out what logical volumes should belong on
+ which nodes, recursing through a device tree.
- Args:
- lvmap: (optional) a dictionary to receive the 'node' : ['lv', ...] data.
+ @param lvmap: optional dictionary to receive the
+ 'node' : ['lv', ...] data.
- Returns:
- None if lvmap arg is given.
- Otherwise, { 'nodename' : ['volume1', 'volume2', ...], ... }
+ @return: None if lvmap arg is given, otherwise, a dictionary
+ of the form { 'nodename' : ['volume1', 'volume2', ...], ... }
"""
if node == None:
def BuildUriList(ids, uri_format, uri_fields=("name", "uri")):
"""Builds a URI list as used by index resources.
- Args:
- - ids: List of ids as strings
- - uri_format: Format to be applied for URI
- - uri_fields: Optional parameter for field ids
+ @param ids: list of ids as strings
+ @param uri_format: format to be applied for URI
+ @param uri_fields: optional parameter for field IDs
"""
(field_id, field_uri) = uri_fields
def ExtractField(sequence, index):
"""Creates a list containing one column out of a list of lists.
- Args:
- - sequence: Sequence of lists
- - index: Index of field
+ @param sequence: sequence of lists
+ @param index: index of field
"""
return map(lambda item: item[index], sequence)
def MapFields(names, data):
"""Maps two lists into one dictionary.
- Args:
- - names: Field names (list of strings)
- - data: Field data (list)
+ Example::
+ >>> MapFields(["a", "b"], ["foo", 123])
+ {'a': 'foo', 'b': 123}
- Example:
- >>> MapFields(["a", "b"], ["foo", 123])
- {'a': 'foo', 'b': 123}
+ @param names: field names (list of strings)
+ @param data: field data (list)
"""
if len(names) != len(data):
def MapBulkFields(itemslist, fields):
"""Map value to field name in to one dictionary.
- Args:
- - itemslist: A list of items values
- - instance: A list of items names
+ @param itemslist: a list of items values
+ @param fields: a list of items names
+
+ @return: a list of mapped dictionaries
- Returns:
- A list of mapped dictionaries
"""
items_details = []
for item in itemslist:
def MakeParamsDict(opts, params):
- """ Makes params dictionary out of a option set.
+ """Makes params dictionary out of a option set.
This function returns a dictionary needed for hv or be parameters. But only
those fields which provided in the option set. Takes parameters frozensets
def __init__(self, items, queryargs, req):
"""Generic resource constructor.
- Args:
- items: a list with variables encoded in the URL
- queryargs: a dictionary with additional options from URL
+ @param items: a list with variables encoded in the URL
+ @param queryargs: a dictionary with additional options from URL
"""
self.items = items
def __init__(self, connector=CONNECTOR):
"""Resource mapper constructor.
- Args:
- con: a dictionary, mapping method name with URL path regexp
+ @param connector: a dictionary, mapping method name with URL path regexp
"""
self._connector = connector
def getController(self, uri):
"""Find method for a given URI.
- Args:
- uri: string with URI
+ @param uri: string with URI
- Returns:
- None if no method is found or a tuple containing the following fields:
- methd: name of method mapped to URI
- items: a list of variable intems in the path
- args: a dictionary with additional parameters from URL
+ @return: None if no method is found or a tuple containing
+ the following fields:
+ - method: name of method mapped to URI
+ - items: a list of variable intems in the path
+ - args: a dictionary with additional parameters from URL
"""
if '?' in uri:
def GET(self):
"""Show the list of mapped resources.
- Returns:
- A dictionary with 'name' and 'uri' keys for each of them.
+ @return: a dictionary with 'name' and 'uri' keys for each of them.
"""
root_pattern = re.compile('^R_([a-zA-Z0-9]+)$')
class R_version(baserlib.R_Generic):
"""/version resource.
- This resource should be used to determine the remote API version and to adapt
- clients accordingly.
+ This resource should be used to determine the remote API version and
+ to adapt clients accordingly.
"""
DOC_URI = "/version"
def GET(self):
"""Returns cluster information.
- Example: {
- "config_version": 3,
- "name": "cluster1.example.com",
- "software_version": "1.2.4",
- "os_api_version": 5,
- "export_version": 0,
- "master": "node1.example.com",
- "architecture": [
- "64bit",
- "x86_64"
- ],
- "hypervisor_type": "xen-pvm",
- "protocol_version": 12
- }
+ Example::
+
+ {
+ "config_version": 3,
+ "name": "cluster1.example.com",
+ "software_version": "1.2.4",
+ "os_api_version": 5,
+ "export_version": 0,
+ "master": "node1.example.com",
+ "architecture": [
+ "64bit",
+ "x86_64"
+ ],
+ "hypervisor_type": "xen-pvm",
+ "protocol_version": 12
+ }
"""
op = ganeti.opcodes.OpQueryClusterInfo()
def GET(self):
"""Returns a dictionary of jobs.
- Returns:
- A dictionary with jobs id and uri.
+ @return: a dictionary with jobs id and uri.
"""
fields = ["id"]
def GET(self):
"""Returns a job status.
- Returns:
- A dictionary with job parameters.
-
- The result includes:
- id - job ID as a number
- status - current job status as a string
- ops - involved OpCodes as a list of dictionaries for each opcodes in
- the job
- opstatus - OpCodes status as a list
- opresult - OpCodes results as a list of lists
+ @return: a dictionary with job parameters.
+ The result includes:
+ - id: job ID as a number
+ - status: current job status as a string
+ - ops: involved OpCodes as a list of dictionaries for each
+ opcodes in the job
+ - opstatus: OpCodes status as a list
+ - opresult: OpCodes results as a list of lists
"""
fields = ["id", "ops", "status", "opstatus", "opresult"]
def GET(self):
"""Returns a list of all nodes.
- Returns:
- A dictionary with 'name' and 'uri' keys for each of them.
+ Example::
- Example: [
+ [
{
"id": "node1.example.com",
"uri": "\/instances\/node1.example.com"
{
"id": "node2.example.com",
"uri": "\/instances\/node2.example.com"
- }]
+ }
+ ]
If the optional 'bulk' argument is provided and set to 'true'
value (i.e '?bulk=1'), the output contains detailed
information about nodes as a list.
- Example: [
+ Example::
+
+ [
{
"pinst_cnt": 1,
"mfree": 31280,
"dfree": 5171712
},
...
- ]
+ ]
+
+ @return: a dictionary with 'name' and 'uri' keys for each of them
"""
client = luxi.Client()
def GET(self):
"""Returns a list of all available instances.
- Returns:
- A dictionary with 'name' and 'uri' keys for each of them.
- Example: [
+ Example::
+
+ [
{
"name": "web.example.com",
"uri": "\/instances\/web.example.com"
{
"name": "mail.example.com",
"uri": "\/instances\/mail.example.com"
- }]
+ }
+ ]
If the optional 'bulk' argument is provided and set to 'true'
value (i.e '?bulk=1'), the output contains detailed
information about instances as a list.
- Example: [
+ Example::
+
+ [
{
"status": "running",
"bridge": "xen-br0",
"oper_state": true
},
...
- ]
+ ]
+
+ @returns: a dictionary with 'name' and 'uri' keys for each of them.
"""
client = luxi.Client()
def POST(self):
"""Create an instance.
- Returns:
- A job id.
+ @returns: a job id
"""
opts = self.req.request_post_data
def PUT(self):
"""Startup an instance.
- The URI takes force=[False|True] parameter to start the instance if even if
- secondary disks are failing.
+ The URI takes force=[False|True] parameter to start the instance
+ if even if secondary disks are failing.
"""
instance_name = self.items[0]
def PUT(self):
"""Add a set of tags to the instance.
- The request as a list of strings should be PUT to this URI. And you'll have
- back a job id.
+ The request as a list of strings should be PUT to this URI. And
+ you'll have back a job id.
"""
return baserlib._Tags_PUT(constants.TAG_INSTANCE,
def DELETE(self):
"""Delete a tag.
- In order to delete a set of tags from a instance, DELETE request should be
- addressed to URI like: /2/instances/[instance_name]/tags?tag=[tag]&tag=[tag]
+ In order to delete a set of tags from a instance, the DELETE
+ request should be addressed to URI like:
+ /2/instances/[instance_name]/tags?tag=[tag]&tag=[tag]
"""
if 'tag' not in self.queryargs:
@type node_list: list
@param node_list: the list of nodes to query
- @type vgname: C{string}
- @param vgname: the name of the volume group to ask for disk space
+ @type vg_name: C{string}
+ @param vg_name: the name of the volume group to ask for disk space
information
@type hypervisor_type: C{str}
@param hypervisor_type: the name of the hypervisor to ask for
def DumpJson(data, indent=True):
"""Serialize a given object.
- Args:
- - indent: Whether to indent output (depends on simplejson version)
+ @param data: the data to serialize
+ @param indent: whether to indent output (depends on simplejson version)
+
+ @return: the string representation of data
"""
if not indent or _JSON_INDENT is None:
def LoadJson(txt):
"""Unserialize data from a string.
+ @param txt: the json-encoded form
+
+ @return: the original data
+
"""
return simplejson.loads(txt)
tty=False, use_cluster_key=True, strict_host_check=True):
"""Build an ssh command to execute a command on a remote node.
- Args:
- hostname: the target host, string
- user: user to auth as
- command: the command
- batch: if true, ssh will run in batch mode with no prompting
- ask_key: if true, ssh will run with StrictHostKeyChecking=ask, so that
- we can connect to an unknown host (not valid in batch mode)
- use_cluster_key: Whether to expect and use the cluster-global SSH key
- strict_host_check: Whether to check the host's SSH key at all
-
- Returns:
- The ssh call to run 'command' on the remote host.
+ @param hostname: the target host, string
+ @param user: user to auth as
+ @param command: the command
+ @param batch: if true, ssh will run in batch mode with no prompting
+ @param ask_key: if true, ssh will run with
+ StrictHostKeyChecking=ask, so that we can connect to an
+ unknown host (not valid in batch mode)
+ @param use_cluster_key: whether to expect and use the
+ cluster-global SSH key
+ @param strict_host_check: whether to check the host's SSH key at all
+
+ @return: the ssh call to run 'command' on the remote host.
"""
argv = [constants.SSH, "-q"]
This method has the same return value as `utils.RunCmd()`, which it
uses to launch ssh.
- Args:
- See SshRunner.BuildCmd.
+ Args: see SshRunner.BuildCmd.
- Returns:
- `utils.RunResult` like `utils.RunCmd()`
+ @rtype: L{utils.RunResult}
+ @return: the result as from L{utils.RunCmd()}
"""
return utils.RunCmd(self.BuildCmd(*args, **kwargs))
def CopyFileToNode(self, node, filename):
"""Copy a file to another node with scp.
- Args:
- node: node in the cluster
- filename: absolute pathname of a local file
+ @param node: node in the cluster
+ @param filename: absolute pathname of a local file
- Returns:
- success: True/False
+ @rtype: boolean
+ @return: the success of the operation
"""
if not os.path.isabs(filename):
(conflicting known hosts) and incosistencies between dns/hosts
entries and local machine names
- Args:
- node: nodename of a host to check. can be short or full qualified hostname
+ @param node: nodename of a host to check; can be short or
+ full qualified hostname
- Returns:
- (success, detail)
- where
- success: True/False
- detail: String with details
+ @return: (success, detail), where:
+ - success: True/False
+ - detail: string with details
"""
retval = self.Run(node, 'root', 'hostname')
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the age as a time spec
- that can be parsed by L{cli.ParseTimespec} or the keyword I{all},
- which will cause all jobs to be archived
+ that can be parsed by L{ganeti.cli.ParseTimespec} or the
+ keyword I{all}, which will cause all jobs to be archived
@rtype: int
@return: the desired exit code