bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
- return backend.CreateBlockDevice(bdev, size, owner, on_primary, info)
+ return backend.BlockdevCreate(bdev, size, owner, on_primary, info)
@staticmethod
def perspective_blockdev_remove(params):
"""
bdev_s = params[0]
bdev = objects.Disk.FromDict(bdev_s)
- return backend.RemoveBlockDevice(bdev)
+ return backend.BlockdevRemove(bdev)
@staticmethod
def perspective_blockdev_rename(params):
"""
devlist = [(objects.Disk.FromDict(ds), uid) for ds, uid in params]
- return backend.RenameBlockDevices(devlist)
+ return backend.BlockdevRename(devlist)
@staticmethod
def perspective_blockdev_assemble(params):
bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
- return backend.AssembleBlockDevice(bdev, owner, on_primary)
+ return backend.BlockdevAssemble(bdev, owner, on_primary)
@staticmethod
def perspective_blockdev_shutdown(params):
bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
- return backend.ShutdownBlockDevice(bdev)
+ return backend.BlockdevShutdown(bdev)
@staticmethod
def perspective_blockdev_addchildren(params):
ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
if bdev is None or ndevs.count(None) > 0:
raise ValueError("can't unserialize data!")
- return backend.MirrorAddChildren(bdev, ndevs)
+ return backend.BlockdevAddchildren(bdev, ndevs)
@staticmethod
def perspective_blockdev_removechildren(params):
ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
if bdev is None or ndevs.count(None) > 0:
raise ValueError("can't unserialize data!")
- return backend.MirrorRemoveChildren(bdev, ndevs)
+ return backend.BlockdevRemovechildren(bdev, ndevs)
@staticmethod
def perspective_blockdev_getmirrorstatus(params):
"""
disks = [objects.Disk.FromDict(dsk_s)
for dsk_s in params]
- return backend.GetMirrorStatus(disks)
+ return backend.BlockdevGetmirrorstatus(disks)
@staticmethod
def perspective_blockdev_find(params):
"""
disk = objects.Disk.FromDict(params[0])
- return backend.FindBlockDevice(disk)
+ return backend.BlockdevFind(disk)
@staticmethod
def perspective_blockdev_snapshot(params):
"""
cfbd = objects.Disk.FromDict(params[0])
- return backend.SnapshotBlockDevice(cfbd)
+ return backend.BlockdevSnapshot(cfbd)
@staticmethod
def perspective_blockdev_grow(params):
"""
cfbd = objects.Disk.FromDict(params[0])
amount = params[1]
- return backend.GrowBlockDevice(cfbd, amount)
+ return backend.BlockdevGrow(cfbd, amount)
@staticmethod
def perspective_blockdev_close(params):
"""Closes the given block devices.
"""
- disks = [objects.Disk.FromDict(cf) for cf in params]
- return backend.CloseBlockDevices(disks)
+ disks = [objects.Disk.FromDict(cf) for cf in params[1]]
+ return backend.BlockdevClose(params[0], disks)
+
+ # blockdev/drbd specific methods ----------
+
+ @staticmethod
+ def perspective_drbd_disconnect_net(params):
+ """Disconnects the network connection of drbd disks.
+
+ Note that this is only valid for drbd disks, so the members of the
+ disk list must all be drbd devices.
+
+ """
+ nodes_ip, disks = params
+ disks = [objects.Disk.FromDict(cf) for cf in disks]
+ return backend.DrbdDisconnectNet(nodes_ip, disks)
+
+ @staticmethod
+ def perspective_drbd_attach_net(params):
+ """Attaches the network connection of drbd disks.
+
+ Note that this is only valid for drbd disks, so the members of the
+ disk list must all be drbd devices.
+
+ """
+ nodes_ip, disks, instance_name, multimaster = params
+ disks = [objects.Disk.FromDict(cf) for cf in disks]
+ return backend.DrbdAttachNet(nodes_ip, disks,
+ instance_name, multimaster)
+
+ @staticmethod
+ def perspective_drbd_wait_sync(params):
+ """Wait until DRBD disks are synched.
+
+ Note that this is only valid for drbd disks, so the members of the
+ disk list must all be drbd devices.
+
+ """
+ nodes_ip, disks = params
+ disks = [objects.Disk.FromDict(cf) for cf in disks]
+ return backend.DrbdWaitSync(nodes_ip, disks)
# export/import --------------------------
"""
inst_s = params[0]
inst = objects.Instance.FromDict(inst_s)
- return backend.AddOSToInstance(inst)
+ reinstall = params[1]
+ return backend.InstanceOsAdd(inst, reinstall)
@staticmethod
def perspective_instance_run_rename(params):
"""
instance = objects.Instance.FromDict(params[0])
- return backend.ShutdownInstance(instance)
+ return backend.InstanceShutdown(instance)
@staticmethod
def perspective_instance_start(params):
"""
instance = objects.Instance.FromDict(params[0])
- extra_args = params[1]
- return backend.StartInstance(instance, extra_args)
+ return backend.StartInstance(instance)
+
+ @staticmethod
+ def perspective_migration_info(params):
+ """Gather information about an instance to be migrated.
+
+ """
+ instance = objects.Instance.FromDict(params[0])
+ return backend.MigrationInfo(instance)
+
+ @staticmethod
+ def perspective_accept_instance(params):
+ """Prepare the node to accept an instance.
+
+ """
+ instance, info, target = params
+ instance = objects.Instance.FromDict(instance)
+ return backend.AcceptInstance(instance, info, target)
+
+ @staticmethod
+ def perspective_finalize_migration(params):
+ """Finalize the instance migration.
+
+ """
+ instance, info, success = params
+ instance = objects.Instance.FromDict(instance)
+ return backend.FinalizeMigration(instance, info, success)
@staticmethod
def perspective_instance_migrate(params):
"""
instance = objects.Instance.FromDict(params[0])
reboot_type = params[1]
- extra_args = params[2]
- return backend.RebootInstance(instance, reboot_type, extra_args)
+ return backend.InstanceReboot(instance, reboot_type)
@staticmethod
def perspective_instance_info(params):
return backend.GetInstanceInfo(params[0], params[1])
@staticmethod
+ def perspective_instance_migratable(params):
+ """Query whether the specified instance can be migrated.
+
+ """
+ instance = objects.Instance.FromDict(params[0])
+ return backend.GetInstanceMigratable(instance)
+
+ @staticmethod
def perspective_all_instances_info(params):
"""Query information about all instances.
return backend.DemoteFromMC()
+ @staticmethod
+ def perspective_node_powercycle(params):
+ """Tries to powercycle the nod.
+
+ """
+ hypervisor_type = params[0]
+ return backend.PowercycleNode(hypervisor_type)
+
+
# cluster --------------------------
@staticmethod
"""Query detailed information about existing OSes.
"""
- return [os.ToDict() for os in backend.DiagnoseOS()]
+ return [os_obj.ToDict() for os_obj in backend.DiagnoseOS()]
@staticmethod
def perspective_os_get(params):
"""Rename a job queue file.
"""
- (old, new) = params
-
- return backend.JobQueueRename(old, new)
+ # TODO: What if a file fails to rename?
+ return [backend.JobQueueRename(old, new) for old, new in params]
@staticmethod
def perspective_jobqueue_set_drain(params):
"""
parser = OptionParser(description="Ganeti node daemon",
- usage="%prog [-f] [-d]",
+ usage="%prog [-f] [-d] [-b ADDRESS]",
version="%%prog (ganeti) %s" %
constants.RELEASE_VERSION)
parser.add_option("-d", "--debug", dest="debug",
help="Enable some debug messages",
default=False, action="store_true")
+ parser.add_option("-b", "--bind", dest="bind_address",
+ help="Bind address",
+ default="", metavar="ADDRESS")
+
options, args = parser.parse_args()
return options, args
-def EnsureRuntimeEnvironment():
- """Ensure our run-time environment is complete.
-
- Currently this creates directories which could be missing, either
- due to directories being on a tmpfs mount, or due to incomplete
- packaging.
-
- """
- dirs = [(val, constants.RUN_DIRS_MODE) for val in constants.SUB_RUN_DIRS]
- dirs.append((constants.LOG_OS_DIR, 0750))
- for dir_name, dir_mode in dirs:
- if not os.path.exists(dir_name):
- try:
- os.mkdir(dir_name, dir_mode)
- except EnvironmentError, err:
- if err.errno != errno.EEXIST:
- print ("Node setup wrong, cannot create directory '%s': %s" %
- (dir_name, err))
- sys.exit(5)
- if not os.path.isdir(dir_name):
- print ("Node setup wrong, '%s' is not a directory" % dir_name)
- sys.exit(5)
-
-
def main():
"""Main function for the node daemon.
options, args = ParseOptions()
utils.debug = options.debug
+
+ if options.fork:
+ utils.CloseFDs()
+
for fname in (constants.SSL_CERT_FILE,):
if not os.path.isfile(fname):
print "config %s not there, will not run." % fname
print "Cluster configuration incomplete: '%s'" % str(err)
sys.exit(5)
- EnsureRuntimeEnvironment()
+ dirs = [(val, constants.RUN_DIRS_MODE) for val in constants.SUB_RUN_DIRS]
+ dirs.append((constants.LOG_OS_DIR, 0750))
+ dirs.append((constants.LOCK_DIR, 1777))
+ utils.EnsureDirs(dirs)
# become a daemon
if options.fork:
queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
mainloop = daemon.Mainloop()
- server = NodeHttpServer(mainloop, "", port,
+ server = NodeHttpServer(mainloop, options.bind_address, port,
ssl_params=ssl_params, ssl_verify_peer=True)
server.Start()
try: