4 # Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Ganeti node daemon"""
24 # pylint: disable=C0103,W0142
26 # C0103: Functions in this module need to have a given name structure,
27 # and the name of the daemon doesn't match
29 # W0142: Used * or ** magic, since we do use it extensively in this
38 from optparse import OptionParser
40 from ganeti import backend
41 from ganeti import constants
42 from ganeti import objects
43 from ganeti import errors
44 from ganeti import jstore
45 from ganeti import daemon
46 from ganeti import http
47 from ganeti import utils
48 from ganeti.storage import container
49 from ganeti import serializer
50 from ganeti import netutils
51 from ganeti import pathutils
52 from ganeti import ssconf
54 import ganeti.http.server # pylint: disable=W0611
60 def _extendReasonTrail(trail, source, reason=""):
61 """Extend the reason trail with noded information
63 The trail is extended by appending the name of the noded functionality
65 assert trail is not None
66 trail_source = "%s:%s" % (constants.OPCODE_REASON_SRC_NODED, source)
67 trail.append((trail_source, reason, utils.EpochNano()))
70 def _PrepareQueueLock():
71 """Try to prepare the queue lock.
73 @return: None for success, otherwise an exception object
76 global queue_lock # pylint: disable=W0603
78 if queue_lock is not None:
83 queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
85 except EnvironmentError, err:
89 def _RequireJobQueueLock(fn):
90 """Decorator for job queue manipulating functions.
93 QUEUE_LOCK_TIMEOUT = 10
95 def wrapper(*args, **kwargs):
96 # Locking in exclusive, blocking mode because there could be several
97 # children running at the same time. Waiting up to 10 seconds.
98 if _PrepareQueueLock() is not None:
99 raise errors.JobQueueError("Job queue failed initialization,"
100 " cannot update jobs")
101 queue_lock.Exclusive(blocking=True, timeout=QUEUE_LOCK_TIMEOUT)
103 return fn(*args, **kwargs)
110 def _DecodeImportExportIO(ieio, ieioargs):
111 """Decodes import/export I/O information.
114 if ieio == constants.IEIO_RAW_DISK:
115 assert len(ieioargs) == 1
116 return (objects.Disk.FromDict(ieioargs[0]), )
118 if ieio == constants.IEIO_SCRIPT:
119 assert len(ieioargs) == 2
120 return (objects.Disk.FromDict(ieioargs[0]), ieioargs[1])
125 def _DefaultAlternative(value, default):
126 """Returns value or, if evaluating to False, a default value.
128 Returns the given value, unless it evaluates to False. In the latter case the
129 default value is returned.
131 @param value: Value to return if it doesn't evaluate to False
132 @param default: Default value
133 @return: Given value or the default
142 class MlockallRequestExecutor(http.server.HttpServerRequestExecutor):
143 """Subclass ensuring request handlers are locked in RAM.
146 def __init__(self, *args, **kwargs):
149 http.server.HttpServerRequestExecutor.__init__(self, *args, **kwargs)
152 class NodeRequestHandler(http.server.HttpServerHandler):
153 """The server implementation.
155 This class holds all methods exposed over the RPC interface.
158 # too many public methods, and unused args - all methods get params
160 # pylint: disable=R0904,W0613
162 http.server.HttpServerHandler.__init__(self)
163 self.noded_pid = os.getpid()
165 def HandleRequest(self, req):
169 if req.request_method.upper() != http.HTTP_POST:
170 raise http.HttpBadRequest("Only the POST method is supported")
172 path = req.request_path
173 if path.startswith("/"):
176 method = getattr(self, "perspective_%s" % path, None)
178 raise http.HttpNotFound()
181 result = (True, method(serializer.LoadJson(req.request_body)))
183 except backend.RPCFail, err:
184 # our custom failure exception; str(err) works fine if the
185 # exception was constructed with a single argument, and in
186 # this case, err.message == err.args[0] == str(err)
187 result = (False, str(err))
188 except errors.QuitGanetiException, err:
189 # Tell parent to quit
190 logging.info("Shutting down the node daemon, arguments: %s",
192 os.kill(self.noded_pid, signal.SIGTERM)
193 # And return the error's arguments, which must be already in
194 # correct tuple format
196 except Exception, err:
197 logging.exception("Error in RPC call")
198 result = (False, "Error while executing backend function: %s" % str(err))
200 return serializer.DumpJson(result)
202 # the new block devices --------------------------
205 def perspective_blockdev_create(params):
206 """Create a block device.
209 (bdev_s, size, owner, on_primary, info, excl_stor) = params
210 bdev = objects.Disk.FromDict(bdev_s)
212 raise ValueError("can't unserialize data!")
213 return backend.BlockdevCreate(bdev, size, owner, on_primary, info,
217 def perspective_blockdev_pause_resume_sync(params):
218 """Pause/resume sync of a block device.
221 disks_s, pause = params
222 disks = [objects.Disk.FromDict(bdev_s) for bdev_s in disks_s]
223 return backend.BlockdevPauseResumeSync(disks, pause)
226 def perspective_blockdev_wipe(params):
227 """Wipe a block device.
230 bdev_s, offset, size = params
231 bdev = objects.Disk.FromDict(bdev_s)
232 return backend.BlockdevWipe(bdev, offset, size)
235 def perspective_blockdev_remove(params):
236 """Remove a block device.
240 bdev = objects.Disk.FromDict(bdev_s)
241 return backend.BlockdevRemove(bdev)
244 def perspective_blockdev_rename(params):
245 """Remove a block device.
248 devlist = [(objects.Disk.FromDict(ds), uid) for ds, uid in params[0]]
249 return backend.BlockdevRename(devlist)
252 def perspective_blockdev_assemble(params):
253 """Assemble a block device.
256 bdev_s, owner, on_primary, idx = params
257 bdev = objects.Disk.FromDict(bdev_s)
259 raise ValueError("can't unserialize data!")
260 return backend.BlockdevAssemble(bdev, owner, on_primary, idx)
263 def perspective_blockdev_shutdown(params):
264 """Shutdown a block device.
268 bdev = objects.Disk.FromDict(bdev_s)
270 raise ValueError("can't unserialize data!")
271 return backend.BlockdevShutdown(bdev)
274 def perspective_blockdev_addchildren(params):
275 """Add a child to a mirror device.
277 Note: this is only valid for mirror devices. It's the caller's duty
278 to send a correct disk, otherwise we raise an error.
281 bdev_s, ndev_s = params
282 bdev = objects.Disk.FromDict(bdev_s)
283 ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
284 if bdev is None or ndevs.count(None) > 0:
285 raise ValueError("can't unserialize data!")
286 return backend.BlockdevAddchildren(bdev, ndevs)
289 def perspective_blockdev_removechildren(params):
290 """Remove a child from a mirror device.
292 This is only valid for mirror devices, of course. It's the callers
293 duty to send a correct disk, otherwise we raise an error.
296 bdev_s, ndev_s = params
297 bdev = objects.Disk.FromDict(bdev_s)
298 ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
299 if bdev is None or ndevs.count(None) > 0:
300 raise ValueError("can't unserialize data!")
301 return backend.BlockdevRemovechildren(bdev, ndevs)
304 def perspective_blockdev_getmirrorstatus(params):
305 """Return the mirror status for a list of disks.
308 disks = [objects.Disk.FromDict(dsk_s)
309 for dsk_s in params[0]]
310 return [status.ToDict()
311 for status in backend.BlockdevGetmirrorstatus(disks)]
314 def perspective_blockdev_getmirrorstatus_multi(params):
315 """Return the mirror status for a list of disks.
318 (node_disks, ) = params
320 disks = [objects.Disk.FromDict(dsk_s) for dsk_s in node_disks]
324 for (success, status) in backend.BlockdevGetmirrorstatusMulti(disks):
326 result.append((success, status.ToDict()))
328 result.append((success, status))
333 def perspective_blockdev_find(params):
334 """Expose the FindBlockDevice functionality for a disk.
336 This will try to find but not activate a disk.
339 disk = objects.Disk.FromDict(params[0])
341 result = backend.BlockdevFind(disk)
345 return result.ToDict()
348 def perspective_blockdev_snapshot(params):
349 """Create a snapshot device.
351 Note that this is only valid for LVM disks, if we get passed
352 something else we raise an exception. The snapshot device can be
353 remove by calling the generic block device remove call.
356 cfbd = objects.Disk.FromDict(params[0])
357 return backend.BlockdevSnapshot(cfbd)
360 def perspective_blockdev_grow(params):
361 """Grow a stack of devices.
365 raise ValueError("Received only %s parameters in blockdev_grow,"
366 " old master?" % len(params))
367 cfbd = objects.Disk.FromDict(params[0])
370 backingstore = params[3]
371 excl_stor = params[4]
372 return backend.BlockdevGrow(cfbd, amount, dryrun, backingstore, excl_stor)
375 def perspective_blockdev_close(params):
376 """Closes the given block devices.
379 disks = [objects.Disk.FromDict(cf) for cf in params[1]]
380 return backend.BlockdevClose(params[0], disks)
383 def perspective_blockdev_getdimensions(params):
384 """Compute the sizes of the given block devices.
387 disks = [objects.Disk.FromDict(cf) for cf in params[0]]
388 return backend.BlockdevGetdimensions(disks)
391 def perspective_blockdev_export(params):
392 """Compute the sizes of the given block devices.
395 disk = objects.Disk.FromDict(params[0])
396 dest_node, dest_path, cluster_name = params[1:]
397 return backend.BlockdevExport(disk, dest_node, dest_path, cluster_name)
400 def perspective_blockdev_setinfo(params):
401 """Sets metadata information on the given block device.
404 (disk, info) = params
405 disk = objects.Disk.FromDict(disk)
406 return backend.BlockdevSetInfo(disk, info)
408 # blockdev/drbd specific methods ----------
411 def perspective_drbd_disconnect_net(params):
412 """Disconnects the network connection of drbd disks.
414 Note that this is only valid for drbd disks, so the members of the
415 disk list must all be drbd devices.
418 nodes_ip, disks, target_node_uuid = params
419 disks = [objects.Disk.FromDict(cf) for cf in disks]
420 return backend.DrbdDisconnectNet(target_node_uuid, nodes_ip, disks)
423 def perspective_drbd_attach_net(params):
424 """Attaches the network connection of drbd disks.
426 Note that this is only valid for drbd disks, so the members of the
427 disk list must all be drbd devices.
430 nodes_ip, disks, instance_name, multimaster, target_node_uuid = params
431 disks = [objects.Disk.FromDict(cf) for cf in disks]
432 return backend.DrbdAttachNet(target_node_uuid, nodes_ip, disks,
433 instance_name, multimaster)
436 def perspective_drbd_wait_sync(params):
437 """Wait until DRBD disks are synched.
439 Note that this is only valid for drbd disks, so the members of the
440 disk list must all be drbd devices.
443 nodes_ip, disks, target_node_uuid = params
444 disks = [objects.Disk.FromDict(cf) for cf in disks]
445 return backend.DrbdWaitSync(target_node_uuid, nodes_ip, disks)
448 def perspective_drbd_helper(params):
449 """Query drbd helper.
452 return backend.GetDrbdUsermodeHelper()
454 # export/import --------------------------
457 def perspective_finalize_export(params):
458 """Expose the finalize export functionality.
461 instance = objects.Instance.FromDict(params[0])
464 for disk in params[1]:
465 if isinstance(disk, bool):
466 snap_disks.append(disk)
468 snap_disks.append(objects.Disk.FromDict(disk))
470 return backend.FinalizeExport(instance, snap_disks)
473 def perspective_export_info(params):
474 """Query information about an existing export on this node.
476 The given path may not contain an export, in which case we return
481 return backend.ExportInfo(path)
484 def perspective_export_list(params):
485 """List the available exports on this node.
487 Note that as opposed to export_info, which may query data about an
488 export in any path, this only queries the standard Ganeti path
489 (pathutils.EXPORT_DIR).
492 return backend.ListExports()
495 def perspective_export_remove(params):
500 return backend.RemoveExport(export)
502 # block device ---------------------
504 def perspective_bdev_sizes(params):
505 """Query the list of block devices
509 return backend.GetBlockDevSizes(devices)
511 # volume --------------------------
514 def perspective_lv_list(params):
515 """Query the list of logical volumes in a given volume group.
519 return backend.GetVolumeList(vgname)
522 def perspective_vg_list(params):
523 """Query the list of volume groups.
526 return backend.ListVolumeGroups()
528 # Storage --------------------------
531 def perspective_storage_list(params):
532 """Get list of storage units.
535 (su_name, su_args, name, fields) = params
536 return container.GetStorage(su_name, *su_args).List(name, fields)
539 def perspective_storage_modify(params):
540 """Modify a storage unit.
543 (su_name, su_args, name, changes) = params
544 return container.GetStorage(su_name, *su_args).Modify(name, changes)
547 def perspective_storage_execute(params):
548 """Execute an operation on a storage unit.
551 (su_name, su_args, name, op) = params
552 return container.GetStorage(su_name, *su_args).Execute(name, op)
554 # bridge --------------------------
557 def perspective_bridges_exist(params):
558 """Check if all bridges given exist on this node.
561 bridges_list = params[0]
562 return backend.BridgesExist(bridges_list)
564 # instance --------------------------
567 def perspective_instance_os_add(params):
568 """Install an OS on a given instance.
572 inst = objects.Instance.FromDict(inst_s)
573 reinstall = params[1]
575 return backend.InstanceOsAdd(inst, reinstall, debug)
578 def perspective_instance_run_rename(params):
579 """Runs the OS rename script for an instance.
582 inst_s, old_name, debug = params
583 inst = objects.Instance.FromDict(inst_s)
584 return backend.RunRenameInstance(inst, old_name, debug)
587 def perspective_instance_shutdown(params):
588 """Shutdown an instance.
591 instance = objects.Instance.FromDict(params[0])
594 _extendReasonTrail(trail, "shutdown")
595 return backend.InstanceShutdown(instance, timeout, trail)
598 def perspective_instance_start(params):
599 """Start an instance.
602 (instance_name, startup_paused, trail) = params
603 instance = objects.Instance.FromDict(instance_name)
604 _extendReasonTrail(trail, "start")
605 return backend.StartInstance(instance, startup_paused, trail)
608 def perspective_migration_info(params):
609 """Gather information about an instance to be migrated.
612 instance = objects.Instance.FromDict(params[0])
613 return backend.MigrationInfo(instance)
616 def perspective_accept_instance(params):
617 """Prepare the node to accept an instance.
620 instance, info, target = params
621 instance = objects.Instance.FromDict(instance)
622 return backend.AcceptInstance(instance, info, target)
625 def perspective_instance_finalize_migration_dst(params):
626 """Finalize the instance migration on the destination node.
629 instance, info, success = params
630 instance = objects.Instance.FromDict(instance)
631 return backend.FinalizeMigrationDst(instance, info, success)
634 def perspective_instance_migrate(params):
635 """Migrates an instance.
638 cluster_name, instance, target, live = params
639 instance = objects.Instance.FromDict(instance)
640 return backend.MigrateInstance(cluster_name, instance, target, live)
643 def perspective_instance_finalize_migration_src(params):
644 """Finalize the instance migration on the source node.
647 instance, success, live = params
648 instance = objects.Instance.FromDict(instance)
649 return backend.FinalizeMigrationSource(instance, success, live)
652 def perspective_instance_get_migration_status(params):
653 """Reports migration status.
656 instance = objects.Instance.FromDict(params[0])
657 return backend.GetMigrationStatus(instance).ToDict()
660 def perspective_instance_reboot(params):
661 """Reboot an instance.
664 instance = objects.Instance.FromDict(params[0])
665 reboot_type = params[1]
666 shutdown_timeout = params[2]
668 _extendReasonTrail(trail, "reboot")
669 return backend.InstanceReboot(instance, reboot_type, shutdown_timeout,
673 def perspective_instance_balloon_memory(params):
674 """Modify instance runtime memory.
677 instance_dict, memory = params
678 instance = objects.Instance.FromDict(instance_dict)
679 return backend.InstanceBalloonMemory(instance, memory)
682 def perspective_instance_info(params):
683 """Query instance information.
686 (instance_name, hypervisor_name, hvparams) = params
687 return backend.GetInstanceInfo(instance_name, hypervisor_name, hvparams)
690 def perspective_instance_migratable(params):
691 """Query whether the specified instance can be migrated.
694 instance = objects.Instance.FromDict(params[0])
695 return backend.GetInstanceMigratable(instance)
698 def perspective_all_instances_info(params):
699 """Query information about all instances.
702 (hypervisor_list, all_hvparams) = params
703 return backend.GetAllInstancesInfo(hypervisor_list, all_hvparams)
706 def perspective_instance_list(params):
707 """Query the list of running instances.
710 (hypervisor_list, hvparams) = params
711 return backend.GetInstanceList(hypervisor_list, hvparams)
713 # node --------------------------
716 def perspective_node_has_ip_address(params):
717 """Checks if a node has the given ip address.
720 return netutils.IPAddress.Own(params[0])
723 def perspective_node_info(params):
724 """Query node information.
727 # FIXME: remove the fallback to excl_stor once all callers are
729 if (len(params) == 3):
730 (legacy_storage_units, hv_specs, excl_stor) = params
731 storage_units = NodeRequestHandler._ConvertExclStorage(
732 legacy_storage_units, excl_stor)
734 (storage_units, hv_specs) = params
735 return backend.GetNodeInfo(storage_units, hv_specs)
738 def _ConvertExclStorage(storage_units, excl_stor):
740 for (storage_type, storage_key) in storage_units:
741 if storage_type in [constants.ST_LVM_VG, constants.ST_LVM_PV]:
742 result_units.append((storage_type, storage_key, [excl_stor]))
744 result_units.append((storage_type, storage_key, []))
748 def perspective_etc_hosts_modify(params):
749 """Modify a node entry in /etc/hosts.
752 backend.EtcHostsModify(params[0], params[1], params[2])
757 def perspective_node_verify(params):
758 """Run a verify sequence on this node.
761 (what, cluster_name, hvparams) = params
762 return backend.VerifyNode(what, cluster_name, hvparams)
765 def perspective_node_verify_light(cls, params):
766 """Run a light verify sequence on this node.
769 # So far it's the same as the normal node_verify
770 return cls.perspective_node_verify(params)
773 def perspective_node_start_master_daemons(params):
774 """Start the master daemons on this node.
777 return backend.StartMasterDaemons(params[0])
780 def perspective_node_activate_master_ip(params):
781 """Activate the master IP on this node.
784 master_params = objects.MasterNetworkParameters.FromDict(params[0])
785 return backend.ActivateMasterIp(master_params, params[1])
788 def perspective_node_deactivate_master_ip(params):
789 """Deactivate the master IP on this node.
792 master_params = objects.MasterNetworkParameters.FromDict(params[0])
793 return backend.DeactivateMasterIp(master_params, params[1])
796 def perspective_node_stop_master(params):
797 """Stops master daemons on this node.
800 return backend.StopMasterDaemons()
803 def perspective_node_change_master_netmask(params):
804 """Change the master IP netmask.
807 return backend.ChangeMasterNetmask(params[0], params[1], params[2],
811 def perspective_node_leave_cluster(params):
812 """Cleanup after leaving a cluster.
815 return backend.LeaveCluster(params[0])
818 def perspective_node_volumes(params):
819 """Query the list of all logical volume groups.
822 return backend.NodeVolumes()
825 def perspective_node_demote_from_mc(params):
826 """Demote a node from the master candidate role.
829 return backend.DemoteFromMC()
832 def perspective_node_powercycle(params):
833 """Tries to powercycle the nod.
836 (hypervisor_type, hvparams) = params
837 return backend.PowercycleNode(hypervisor_type, hvparams)
839 # cluster --------------------------
842 def perspective_version(params):
843 """Query version information.
846 return constants.PROTOCOL_VERSION
849 def perspective_upload_file(params):
852 Note that the backend implementation imposes strict rules on which
856 return backend.UploadFile(*(params[0]))
859 def perspective_master_info(params):
860 """Query master information.
863 return backend.GetMasterInfo()
866 def perspective_run_oob(params):
870 output = backend.RunOob(params[0], params[1], params[2], params[3])
872 result = serializer.LoadJson(output)
878 def perspective_restricted_command(params):
879 """Runs a restricted command.
884 return backend.RunRestrictedCmd(cmd)
887 def perspective_write_ssconf_files(params):
888 """Write ssconf files.
892 return ssconf.WriteSsconfFiles(values)
895 def perspective_get_watcher_pause(params):
896 """Get watcher pause end.
899 return utils.ReadWatcherPauseFile(pathutils.WATCHER_PAUSEFILE)
902 def perspective_set_watcher_pause(params):
903 """Set watcher pause.
907 return backend.SetWatcherPause(until)
909 # os -----------------------
912 def perspective_os_diagnose(params):
913 """Query detailed information about existing OSes.
916 return backend.DiagnoseOS()
919 def perspective_os_get(params):
920 """Query information about a given OS.
924 os_obj = backend.OSFromDisk(name)
925 return os_obj.ToDict()
928 def perspective_os_validate(params):
929 """Run a given OS' validation routine.
932 required, name, checks, params = params
933 return backend.ValidateOS(required, name, checks, params)
935 # extstorage -----------------------
938 def perspective_extstorage_diagnose(params):
939 """Query detailed information about existing extstorage providers.
942 return backend.DiagnoseExtStorage()
944 # hooks -----------------------
947 def perspective_hooks_runner(params):
951 hpath, phase, env = params
952 hr = backend.HooksRunner()
953 return hr.RunHooks(hpath, phase, env)
955 # iallocator -----------------
958 def perspective_iallocator_runner(params):
959 """Run an iallocator script.
963 iar = backend.IAllocatorRunner()
964 return iar.Run(name, idata)
966 # test -----------------------
969 def perspective_test_delay(params):
974 status, rval = utils.TestDelay(duration)
976 raise backend.RPCFail(rval)
979 # file storage ---------------
982 def perspective_file_storage_dir_create(params):
983 """Create the file storage directory.
986 file_storage_dir = params[0]
987 return backend.CreateFileStorageDir(file_storage_dir)
990 def perspective_file_storage_dir_remove(params):
991 """Remove the file storage directory.
994 file_storage_dir = params[0]
995 return backend.RemoveFileStorageDir(file_storage_dir)
998 def perspective_file_storage_dir_rename(params):
999 """Rename the file storage directory.
1002 old_file_storage_dir = params[0]
1003 new_file_storage_dir = params[1]
1004 return backend.RenameFileStorageDir(old_file_storage_dir,
1005 new_file_storage_dir)
1007 # jobs ------------------------
1010 @_RequireJobQueueLock
1011 def perspective_jobqueue_update(params):
1012 """Update job queue.
1015 (file_name, content) = params
1016 return backend.JobQueueUpdate(file_name, content)
1019 @_RequireJobQueueLock
1020 def perspective_jobqueue_purge(params):
1024 return backend.JobQueuePurge()
1027 @_RequireJobQueueLock
1028 def perspective_jobqueue_rename(params):
1029 """Rename a job queue file.
1032 # TODO: What if a file fails to rename?
1033 return [backend.JobQueueRename(old, new) for old, new in params[0]]
1036 @_RequireJobQueueLock
1037 def perspective_jobqueue_set_drain_flag(params):
1038 """Set job queue's drain flag.
1043 return jstore.SetDrainFlag(flag)
1045 # hypervisor ---------------
1048 def perspective_hypervisor_validate_params(params):
1049 """Validate the hypervisor parameters.
1052 (hvname, hvparams) = params
1053 return backend.ValidateHVParams(hvname, hvparams)
1058 def perspective_x509_cert_create(params):
1059 """Creates a new X509 certificate for SSL/TLS.
1062 (validity, ) = params
1063 return backend.CreateX509Certificate(validity)
1066 def perspective_x509_cert_remove(params):
1067 """Removes a X509 certificate.
1071 return backend.RemoveX509Certificate(name)
1076 def perspective_import_start(params):
1077 """Starts an import daemon.
1080 (opts_s, instance, component, (dest, dest_args)) = params
1082 opts = objects.ImportExportOptions.FromDict(opts_s)
1084 return backend.StartImportExportDaemon(constants.IEM_IMPORT, opts,
1086 objects.Instance.FromDict(instance),
1088 _DecodeImportExportIO(dest,
1092 def perspective_export_start(params):
1093 """Starts an export daemon.
1096 (opts_s, host, port, instance, component, (source, source_args)) = params
1098 opts = objects.ImportExportOptions.FromDict(opts_s)
1100 return backend.StartImportExportDaemon(constants.IEM_EXPORT, opts,
1102 objects.Instance.FromDict(instance),
1104 _DecodeImportExportIO(source,
1108 def perspective_impexp_status(params):
1109 """Retrieves the status of an import or export daemon.
1112 return backend.GetImportExportStatus(params[0])
1115 def perspective_impexp_abort(params):
1116 """Aborts an import or export.
1119 return backend.AbortImportExport(params[0])
1122 def perspective_impexp_cleanup(params):
1123 """Cleans up after an import or export.
1126 return backend.CleanupImportExport(params[0])
1129 def CheckNoded(_, args):
1130 """Initial checks whether to run or exit with a failure.
1133 if args: # noded doesn't take any arguments
1134 print >> sys.stderr, ("Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" %
1136 sys.exit(constants.EXIT_FAILURE)
1138 codecs.lookup("string-escape")
1140 print >> sys.stderr, ("Can't load the string-escape code which is part"
1141 " of the Python installation. Is your installation"
1142 " complete/correct? Aborting.")
1143 sys.exit(constants.EXIT_FAILURE)
1146 def PrepNoded(options, _):
1147 """Preparation node daemon function, executed with the PID file held.
1151 request_executor_class = MlockallRequestExecutor
1154 except errors.NoCtypesError:
1155 logging.warning("Cannot set memory lock, ctypes module not found")
1156 request_executor_class = http.server.HttpServerRequestExecutor
1158 request_executor_class = http.server.HttpServerRequestExecutor
1160 # Read SSL certificate
1162 ssl_params = http.HttpSslParams(ssl_key_path=options.ssl_key,
1163 ssl_cert_path=options.ssl_cert)
1167 err = _PrepareQueueLock()
1169 # this might be some kind of file-system/permission error; while
1170 # this breaks the job queue functionality, we shouldn't prevent
1171 # startup of the whole node daemon because of this
1172 logging.critical("Can't init/verify the queue, proceeding anyway: %s", err)
1174 handler = NodeRequestHandler()
1176 mainloop = daemon.Mainloop()
1178 http.server.HttpServer(mainloop, options.bind_address, options.port,
1179 handler, ssl_params=ssl_params, ssl_verify_peer=True,
1180 request_executor_class=request_executor_class)
1183 return (mainloop, server)
1186 def ExecNoded(options, args, prep_data): # pylint: disable=W0613
1187 """Main node daemon function, executed with the PID file held.
1190 (mainloop, server) = prep_data
1198 """Main function for the node daemon.
1201 parser = OptionParser(description="Ganeti node daemon",
1202 usage="%prog [-f] [-d] [-p port] [-b ADDRESS]\
1204 version="%%prog (ganeti) %s" %
1205 constants.RELEASE_VERSION)
1206 parser.add_option("--no-mlock", dest="mlock",
1207 help="Do not mlock the node memory in ram",
1208 default=True, action="store_false")
1210 daemon.GenericMain(constants.NODED, parser, CheckNoded, PrepNoded, ExecNoded,
1211 default_ssl_cert=pathutils.NODED_CERT_FILE,
1212 default_ssl_key=pathutils.NODED_CERT_FILE,
1213 console_logging=True)