from ganeti import daemon
from ganeti import http
from ganeti import utils
-from ganeti import storage
+from ganeti.storage import container
from ganeti import serializer
from ganeti import netutils
+from ganeti import pathutils
+from ganeti import ssconf
import ganeti.http.server # pylint: disable=W0611
queue_lock = None
+def _extendReasonTrail(trail, source, reason=""):
+ """Extend the reason trail with noded information
+
+ The trail is extended by appending the name of the noded functionality
+ """
+ assert trail is not None
+ trail_source = "%s:%s" % (constants.OPCODE_REASON_SRC_NODED, source)
+ trail.append((trail_source, reason, utils.EpochNano()))
+
+
def _PrepareQueueLock():
"""Try to prepare the queue lock.
return ieioargs
+def _DefaultAlternative(value, default):
+ """Returns value or, if evaluating to False, a default value.
+
+ Returns the given value, unless it evaluates to False. In the latter case the
+ default value is returned.
+
+ @param value: Value to return if it doesn't evaluate to False
+ @param default: Default value
+ @return: Given value or the default
+
+ """
+ if value:
+ return value
+
+ return default
+
+
class MlockallRequestExecutor(http.server.HttpServerRequestExecutor):
"""Subclass ensuring request handlers are locked in RAM.
"""Handle a request.
"""
- # FIXME: Remove HTTP_PUT in Ganeti 2.7
- if req.request_method.upper() not in (http.HTTP_PUT, http.HTTP_POST):
- raise http.HttpBadRequest("Only PUT and POST methods are supported")
+ if req.request_method.upper() != http.HTTP_POST:
+ raise http.HttpBadRequest("Only the POST method is supported")
path = req.request_path
if path.startswith("/"):
"""Create a block device.
"""
- bdev_s, size, owner, on_primary, info = params
+ (bdev_s, size, owner, on_primary, info, excl_stor) = params
bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
- return backend.BlockdevCreate(bdev, size, owner, on_primary, info)
+ return backend.BlockdevCreate(bdev, size, owner, on_primary, info,
+ excl_stor)
@staticmethod
def perspective_blockdev_pause_resume_sync(params):
return backend.BlockdevClose(params[0], disks)
@staticmethod
- def perspective_blockdev_getsize(params):
+ def perspective_blockdev_getdimensions(params):
"""Compute the sizes of the given block devices.
"""
disks = [objects.Disk.FromDict(cf) for cf in params[0]]
- return backend.BlockdevGetsize(disks)
+ return backend.BlockdevGetdimensions(disks)
@staticmethod
def perspective_blockdev_export(params):
dest_node, dest_path, cluster_name = params[1:]
return backend.BlockdevExport(disk, dest_node, dest_path, cluster_name)
+ @staticmethod
+ def perspective_blockdev_setinfo(params):
+ """Sets metadata information on the given block device.
+
+ """
+ (disk, info) = params
+ disk = objects.Disk.FromDict(disk)
+ return backend.BlockdevSetInfo(disk, info)
+
# blockdev/drbd specific methods ----------
@staticmethod
disk list must all be drbd devices.
"""
- nodes_ip, disks = params
+ nodes_ip, disks, target_node_uuid = params
disks = [objects.Disk.FromDict(cf) for cf in disks]
- return backend.DrbdDisconnectNet(nodes_ip, disks)
+ return backend.DrbdDisconnectNet(target_node_uuid, nodes_ip, disks)
@staticmethod
def perspective_drbd_attach_net(params):
disk list must all be drbd devices.
"""
- nodes_ip, disks, instance_name, multimaster = params
+ nodes_ip, disks, instance_name, multimaster, target_node_uuid = params
disks = [objects.Disk.FromDict(cf) for cf in disks]
- return backend.DrbdAttachNet(nodes_ip, disks,
- instance_name, multimaster)
+ return backend.DrbdAttachNet(target_node_uuid, nodes_ip, disks,
+ instance_name, multimaster)
@staticmethod
def perspective_drbd_wait_sync(params):
disk list must all be drbd devices.
"""
- nodes_ip, disks = params
+ nodes_ip, disks, target_node_uuid = params
disks = [objects.Disk.FromDict(cf) for cf in disks]
- return backend.DrbdWaitSync(nodes_ip, disks)
+ return backend.DrbdWaitSync(target_node_uuid, nodes_ip, disks)
@staticmethod
def perspective_drbd_helper(params):
Note that as opposed to export_info, which may query data about an
export in any path, this only queries the standard Ganeti path
- (constants.EXPORT_DIR).
+ (pathutils.EXPORT_DIR).
"""
return backend.ListExports()
"""
(su_name, su_args, name, fields) = params
- return storage.GetStorage(su_name, *su_args).List(name, fields)
+ return container.GetStorage(su_name, *su_args).List(name, fields)
@staticmethod
def perspective_storage_modify(params):
"""
(su_name, su_args, name, changes) = params
- return storage.GetStorage(su_name, *su_args).Modify(name, changes)
+ return container.GetStorage(su_name, *su_args).Modify(name, changes)
@staticmethod
def perspective_storage_execute(params):
"""
(su_name, su_args, name, op) = params
- return storage.GetStorage(su_name, *su_args).Execute(name, op)
+ return container.GetStorage(su_name, *su_args).Execute(name, op)
# bridge --------------------------
"""
instance = objects.Instance.FromDict(params[0])
timeout = params[1]
- return backend.InstanceShutdown(instance, timeout)
+ trail = params[2]
+ _extendReasonTrail(trail, "shutdown")
+ return backend.InstanceShutdown(instance, timeout, trail)
@staticmethod
def perspective_instance_start(params):
"""Start an instance.
"""
- (instance_name, startup_paused) = params
+ (instance_name, startup_paused, trail) = params
instance = objects.Instance.FromDict(instance_name)
- return backend.StartInstance(instance, startup_paused)
+ _extendReasonTrail(trail, "start")
+ return backend.StartInstance(instance, startup_paused, trail)
@staticmethod
def perspective_migration_info(params):
"""Migrates an instance.
"""
- instance, target, live = params
+ cluster_name, instance, target, live = params
instance = objects.Instance.FromDict(instance)
- return backend.MigrateInstance(instance, target, live)
+ return backend.MigrateInstance(cluster_name, instance, target, live)
@staticmethod
def perspective_instance_finalize_migration_src(params):
instance = objects.Instance.FromDict(params[0])
reboot_type = params[1]
shutdown_timeout = params[2]
- return backend.InstanceReboot(instance, reboot_type, shutdown_timeout)
+ trail = params[3]
+ _extendReasonTrail(trail, "reboot")
+ return backend.InstanceReboot(instance, reboot_type, shutdown_timeout,
+ trail)
@staticmethod
def perspective_instance_balloon_memory(params):
"""Query instance information.
"""
- return backend.GetInstanceInfo(params[0], params[1])
+ (instance_name, hypervisor_name, hvparams) = params
+ return backend.GetInstanceInfo(instance_name, hypervisor_name, hvparams)
@staticmethod
def perspective_instance_migratable(params):
"""Query information about all instances.
"""
- return backend.GetAllInstancesInfo(params[0])
+ (hypervisor_list, all_hvparams) = params
+ return backend.GetAllInstancesInfo(hypervisor_list, all_hvparams)
@staticmethod
def perspective_instance_list(params):
"""Query the list of running instances.
"""
- return backend.GetInstanceList(params[0])
+ (hypervisor_list, hvparams) = params
+ return backend.GetInstanceList(hypervisor_list, hvparams)
# node --------------------------
"""Query node information.
"""
- (vg_names, hv_names) = params
- return backend.GetNodeInfo(vg_names, hv_names)
+ (storage_units, hv_specs, excl_stor) = params
+ return backend.GetNodeInfo(storage_units, hv_specs, excl_stor)
@staticmethod
def perspective_etc_hosts_modify(params):
"""Run a verify sequence on this node.
"""
- return backend.VerifyNode(params[0], params[1])
+ (what, cluster_name, hvparams) = params
+ return backend.VerifyNode(what, cluster_name, hvparams)
+
+ @classmethod
+ def perspective_node_verify_light(cls, params):
+ """Run a light verify sequence on this node.
+
+ """
+ # So far it's the same as the normal node_verify
+ return cls.perspective_node_verify(params)
@staticmethod
def perspective_node_start_master_daemons(params):
"""Tries to powercycle the nod.
"""
- hypervisor_type = params[0]
- return backend.PowercycleNode(hypervisor_type)
+ (hypervisor_type, hvparams) = params
+ return backend.PowercycleNode(hypervisor_type, hvparams)
# cluster --------------------------
return result
@staticmethod
+ def perspective_restricted_command(params):
+ """Runs a restricted command.
+
+ """
+ (cmd, ) = params
+
+ return backend.RunRestrictedCmd(cmd)
+
+ @staticmethod
def perspective_write_ssconf_files(params):
"""Write ssconf files.
"""
(values,) = params
- return backend.WriteSsconfFiles(values)
+ return ssconf.WriteSsconfFiles(values)
+
+ @staticmethod
+ def perspective_get_watcher_pause(params):
+ """Get watcher pause end.
+
+ """
+ return utils.ReadWatcherPauseFile(pathutils.WATCHER_PAUSEFILE)
+
+ @staticmethod
+ def perspective_set_watcher_pause(params):
+ """Set watcher pause.
+
+ """
+ (until, ) = params
+ return backend.SetWatcherPause(until)
# os -----------------------
required, name, checks, params = params
return backend.ValidateOS(required, name, checks, params)
+ # extstorage -----------------------
+
+ @staticmethod
+ def perspective_extstorage_diagnose(params):
+ """Query detailed information about existing extstorage providers.
+
+ """
+ return backend.DiagnoseExtStorage()
+
# hooks -----------------------
@staticmethod
# TODO: What if a file fails to rename?
return [backend.JobQueueRename(old, new) for old, new in params[0]]
+ @staticmethod
+ @_RequireJobQueueLock
+ def perspective_jobqueue_set_drain_flag(params):
+ """Set job queue's drain flag.
+
+ """
+ (flag, ) = params
+
+ return jstore.SetDrainFlag(flag)
+
# hypervisor ---------------
@staticmethod
"""
parser = OptionParser(description="Ganeti node daemon",
- usage="%prog [-f] [-d] [-p port] [-b ADDRESS]",
+ usage="%prog [-f] [-d] [-p port] [-b ADDRESS]\
+ \ [-i INTERFACE]",
version="%%prog (ganeti) %s" %
constants.RELEASE_VERSION)
parser.add_option("--no-mlock", dest="mlock",
default=True, action="store_false")
daemon.GenericMain(constants.NODED, parser, CheckNoded, PrepNoded, ExecNoded,
- default_ssl_cert=constants.NODED_CERT_FILE,
- default_ssl_key=constants.NODED_CERT_FILE,
+ default_ssl_cert=pathutils.NODED_CERT_FILE,
+ default_ssl_key=pathutils.NODED_CERT_FILE,
console_logging=True)