from ganeti import daemon
from ganeti import http
from ganeti import utils
-from ganeti import storage
+from ganeti.storage import container
from ganeti import serializer
from ganeti import netutils
from ganeti import pathutils
queue_lock = None
+def _extendReasonTrail(trail, source, reason=""):
+ """Extend the reason trail with noded information
+
+ The trail is extended by appending the name of the noded functionality
+ """
+ assert trail is not None
+ trail_source = "%s:%s" % (constants.OPCODE_REASON_SRC_NODED, source)
+ trail.append((trail_source, reason, utils.EpochNano()))
+
+
def _PrepareQueueLock():
"""Try to prepare the queue lock.
"""Grow a stack of devices.
"""
- if len(params) < 4:
- raise ValueError("Received only 3 parameters in blockdev_grow,"
- " old master?")
+ if len(params) < 5:
+ raise ValueError("Received only %s parameters in blockdev_grow,"
+ " old master?" % len(params))
cfbd = objects.Disk.FromDict(params[0])
amount = params[1]
dryrun = params[2]
backingstore = params[3]
- return backend.BlockdevGrow(cfbd, amount, dryrun, backingstore)
+ excl_stor = params[4]
+ return backend.BlockdevGrow(cfbd, amount, dryrun, backingstore, excl_stor)
@staticmethod
def perspective_blockdev_close(params):
return backend.BlockdevClose(params[0], disks)
@staticmethod
- def perspective_blockdev_getsize(params):
+ def perspective_blockdev_getdimensions(params):
"""Compute the sizes of the given block devices.
"""
disks = [objects.Disk.FromDict(cf) for cf in params[0]]
- return backend.BlockdevGetsize(disks)
+ return backend.BlockdevGetdimensions(disks)
@staticmethod
def perspective_blockdev_export(params):
disk list must all be drbd devices.
"""
- nodes_ip, disks = params
+ nodes_ip, disks, target_node_uuid = params
disks = [objects.Disk.FromDict(cf) for cf in disks]
- return backend.DrbdDisconnectNet(nodes_ip, disks)
+ return backend.DrbdDisconnectNet(target_node_uuid, nodes_ip, disks)
@staticmethod
def perspective_drbd_attach_net(params):
disk list must all be drbd devices.
"""
- nodes_ip, disks, instance_name, multimaster = params
+ nodes_ip, disks, instance_name, multimaster, target_node_uuid = params
disks = [objects.Disk.FromDict(cf) for cf in disks]
- return backend.DrbdAttachNet(nodes_ip, disks,
- instance_name, multimaster)
+ return backend.DrbdAttachNet(target_node_uuid, nodes_ip, disks,
+ instance_name, multimaster)
@staticmethod
def perspective_drbd_wait_sync(params):
disk list must all be drbd devices.
"""
- nodes_ip, disks = params
+ nodes_ip, disks, target_node_uuid = params
+ disks = [objects.Disk.FromDict(cf) for cf in disks]
+ return backend.DrbdWaitSync(target_node_uuid, nodes_ip, disks)
+
+ @staticmethod
+ def perspective_drbd_needs_activation(params):
+ """Checks if the drbd devices need activation
+
+ Note that this is only valid for drbd disks, so the members of the
+ disk list must all be drbd devices.
+
+ """
+ nodes_ip, disks, target_node_uuid = params
disks = [objects.Disk.FromDict(cf) for cf in disks]
- return backend.DrbdWaitSync(nodes_ip, disks)
+ return backend.DrbdNeedsActivation(target_node_uuid, nodes_ip, disks)
@staticmethod
def perspective_drbd_helper(params):
"""
(su_name, su_args, name, fields) = params
- return storage.GetStorage(su_name, *su_args).List(name, fields)
+ return container.GetStorage(su_name, *su_args).List(name, fields)
@staticmethod
def perspective_storage_modify(params):
"""
(su_name, su_args, name, changes) = params
- return storage.GetStorage(su_name, *su_args).Modify(name, changes)
+ return container.GetStorage(su_name, *su_args).Modify(name, changes)
@staticmethod
def perspective_storage_execute(params):
"""
(su_name, su_args, name, op) = params
- return storage.GetStorage(su_name, *su_args).Execute(name, op)
+ return container.GetStorage(su_name, *su_args).Execute(name, op)
# bridge --------------------------
"""
instance = objects.Instance.FromDict(params[0])
timeout = params[1]
- return backend.InstanceShutdown(instance, timeout)
+ trail = params[2]
+ _extendReasonTrail(trail, "shutdown")
+ return backend.InstanceShutdown(instance, timeout, trail)
@staticmethod
def perspective_instance_start(params):
"""Start an instance.
"""
- (instance_name, startup_paused) = params
+ (instance_name, startup_paused, trail) = params
instance = objects.Instance.FromDict(instance_name)
- return backend.StartInstance(instance, startup_paused)
+ _extendReasonTrail(trail, "start")
+ return backend.StartInstance(instance, startup_paused, trail)
@staticmethod
def perspective_migration_info(params):
"""Migrates an instance.
"""
- instance, target, live = params
+ cluster_name, instance, target, live = params
instance = objects.Instance.FromDict(instance)
- return backend.MigrateInstance(instance, target, live)
+ return backend.MigrateInstance(cluster_name, instance, target, live)
@staticmethod
def perspective_instance_finalize_migration_src(params):
instance = objects.Instance.FromDict(params[0])
reboot_type = params[1]
shutdown_timeout = params[2]
- (reason_source, reason_text) = params[3]
- reason_text = _DefaultAlternative(reason_text,
- constants.INSTANCE_REASON_REBOOT)
- reason = backend.InstReason(reason_source, reason_text)
+ trail = params[3]
+ _extendReasonTrail(trail, "reboot")
return backend.InstanceReboot(instance, reboot_type, shutdown_timeout,
- reason)
+ trail)
@staticmethod
def perspective_instance_balloon_memory(params):
"""Query instance information.
"""
- return backend.GetInstanceInfo(params[0], params[1])
+ (instance_name, hypervisor_name, hvparams) = params
+ return backend.GetInstanceInfo(instance_name, hypervisor_name, hvparams)
@staticmethod
def perspective_instance_migratable(params):
"""Query information about all instances.
"""
- return backend.GetAllInstancesInfo(params[0])
+ (hypervisor_list, all_hvparams) = params
+ return backend.GetAllInstancesInfo(hypervisor_list, all_hvparams)
@staticmethod
def perspective_instance_list(params):
"""Query the list of running instances.
"""
- return backend.GetInstanceList(params[0])
+ (hypervisor_list, hvparams) = params
+ return backend.GetInstanceList(hypervisor_list, hvparams)
# node --------------------------
"""Query node information.
"""
- (vg_names, hv_names, excl_stor) = params
- return backend.GetNodeInfo(vg_names, hv_names, excl_stor)
+ (storage_units, hv_specs) = params
+ return backend.GetNodeInfo(storage_units, hv_specs)
@staticmethod
def perspective_etc_hosts_modify(params):
"""Run a verify sequence on this node.
"""
- return backend.VerifyNode(params[0], params[1])
+ (what, cluster_name, hvparams) = params
+ return backend.VerifyNode(what, cluster_name, hvparams)
@classmethod
def perspective_node_verify_light(cls, params):
@staticmethod
def perspective_node_powercycle(params):
- """Tries to powercycle the nod.
+ """Tries to powercycle the node.
+
+ """
+ (hypervisor_type, hvparams) = params
+ return backend.PowercycleNode(hypervisor_type, hvparams)
+
+ @staticmethod
+ def perspective_node_configure_ovs(params):
+ """Sets up OpenvSwitch on the node.
"""
- hypervisor_type = params[0]
- return backend.PowercycleNode(hypervisor_type)
+ (ovs_name, ovs_link) = params
+ return backend.ConfigureOVS(ovs_name, ovs_link)
# cluster --------------------------
"""
parser = OptionParser(description="Ganeti node daemon",
- usage="%prog [-f] [-d] [-p port] [-b ADDRESS]",
+ usage=("%prog [-f] [-d] [-p port] [-b ADDRESS]"
+ " [-i INTERFACE]"),
version="%%prog (ganeti) %s" %
constants.RELEASE_VERSION)
parser.add_option("--no-mlock", dest="mlock",