4 # Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Ganeti node daemon"""
24 # pylint: disable=C0103,W0142
26 # C0103: Functions in this module need to have a given name structure,
27 # and the name of the daemon doesn't match
29 # W0142: Used * or ** magic, since we do use it extensively in this
38 from optparse import OptionParser
40 from ganeti import backend
41 from ganeti import constants
42 from ganeti import objects
43 from ganeti import errors
44 from ganeti import jstore
45 from ganeti import daemon
46 from ganeti import http
47 from ganeti import utils
48 from ganeti import storage
49 from ganeti import serializer
50 from ganeti import netutils
51 from ganeti import pathutils
52 from ganeti import ssconf
54 import ganeti.http.server # pylint: disable=W0611
60 def _PrepareQueueLock():
61 """Try to prepare the queue lock.
63 @return: None for success, otherwise an exception object
66 global queue_lock # pylint: disable=W0603
68 if queue_lock is not None:
73 queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
75 except EnvironmentError, err:
79 def _RequireJobQueueLock(fn):
80 """Decorator for job queue manipulating functions.
83 QUEUE_LOCK_TIMEOUT = 10
85 def wrapper(*args, **kwargs):
86 # Locking in exclusive, blocking mode because there could be several
87 # children running at the same time. Waiting up to 10 seconds.
88 if _PrepareQueueLock() is not None:
89 raise errors.JobQueueError("Job queue failed initialization,"
90 " cannot update jobs")
91 queue_lock.Exclusive(blocking=True, timeout=QUEUE_LOCK_TIMEOUT)
93 return fn(*args, **kwargs)
100 def _DecodeImportExportIO(ieio, ieioargs):
101 """Decodes import/export I/O information.
104 if ieio == constants.IEIO_RAW_DISK:
105 assert len(ieioargs) == 1
106 return (objects.Disk.FromDict(ieioargs[0]), )
108 if ieio == constants.IEIO_SCRIPT:
109 assert len(ieioargs) == 2
110 return (objects.Disk.FromDict(ieioargs[0]), ieioargs[1])
115 class MlockallRequestExecutor(http.server.HttpServerRequestExecutor):
116 """Subclass ensuring request handlers are locked in RAM.
119 def __init__(self, *args, **kwargs):
122 http.server.HttpServerRequestExecutor.__init__(self, *args, **kwargs)
125 class NodeRequestHandler(http.server.HttpServerHandler):
126 """The server implementation.
128 This class holds all methods exposed over the RPC interface.
131 # too many public methods, and unused args - all methods get params
133 # pylint: disable=R0904,W0613
135 http.server.HttpServerHandler.__init__(self)
136 self.noded_pid = os.getpid()
138 def HandleRequest(self, req):
142 if req.request_method.upper() != http.HTTP_POST:
143 raise http.HttpBadRequest("Only the POST method is supported")
145 path = req.request_path
146 if path.startswith("/"):
149 method = getattr(self, "perspective_%s" % path, None)
151 raise http.HttpNotFound()
154 result = (True, method(serializer.LoadJson(req.request_body)))
156 except backend.RPCFail, err:
157 # our custom failure exception; str(err) works fine if the
158 # exception was constructed with a single argument, and in
159 # this case, err.message == err.args[0] == str(err)
160 result = (False, str(err))
161 except errors.QuitGanetiException, err:
162 # Tell parent to quit
163 logging.info("Shutting down the node daemon, arguments: %s",
165 os.kill(self.noded_pid, signal.SIGTERM)
166 # And return the error's arguments, which must be already in
167 # correct tuple format
169 except Exception, err:
170 logging.exception("Error in RPC call")
171 result = (False, "Error while executing backend function: %s" % str(err))
173 return serializer.DumpJson(result)
175 # the new block devices --------------------------
178 def perspective_blockdev_create(params):
179 """Create a block device.
182 bdev_s, size, owner, on_primary, info = params
183 bdev = objects.Disk.FromDict(bdev_s)
185 raise ValueError("can't unserialize data!")
186 return backend.BlockdevCreate(bdev, size, owner, on_primary, info)
189 def perspective_blockdev_pause_resume_sync(params):
190 """Pause/resume sync of a block device.
193 disks_s, pause = params
194 disks = [objects.Disk.FromDict(bdev_s) for bdev_s in disks_s]
195 return backend.BlockdevPauseResumeSync(disks, pause)
198 def perspective_blockdev_wipe(params):
199 """Wipe a block device.
202 bdev_s, offset, size = params
203 bdev = objects.Disk.FromDict(bdev_s)
204 return backend.BlockdevWipe(bdev, offset, size)
207 def perspective_blockdev_remove(params):
208 """Remove a block device.
212 bdev = objects.Disk.FromDict(bdev_s)
213 return backend.BlockdevRemove(bdev)
216 def perspective_blockdev_rename(params):
217 """Remove a block device.
220 devlist = [(objects.Disk.FromDict(ds), uid) for ds, uid in params[0]]
221 return backend.BlockdevRename(devlist)
224 def perspective_blockdev_assemble(params):
225 """Assemble a block device.
228 bdev_s, owner, on_primary, idx = params
229 bdev = objects.Disk.FromDict(bdev_s)
231 raise ValueError("can't unserialize data!")
232 return backend.BlockdevAssemble(bdev, owner, on_primary, idx)
235 def perspective_blockdev_shutdown(params):
236 """Shutdown a block device.
240 bdev = objects.Disk.FromDict(bdev_s)
242 raise ValueError("can't unserialize data!")
243 return backend.BlockdevShutdown(bdev)
246 def perspective_blockdev_addchildren(params):
247 """Add a child to a mirror device.
249 Note: this is only valid for mirror devices. It's the caller's duty
250 to send a correct disk, otherwise we raise an error.
253 bdev_s, ndev_s = params
254 bdev = objects.Disk.FromDict(bdev_s)
255 ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
256 if bdev is None or ndevs.count(None) > 0:
257 raise ValueError("can't unserialize data!")
258 return backend.BlockdevAddchildren(bdev, ndevs)
261 def perspective_blockdev_removechildren(params):
262 """Remove a child from a mirror device.
264 This is only valid for mirror devices, of course. It's the callers
265 duty to send a correct disk, otherwise we raise an error.
268 bdev_s, ndev_s = params
269 bdev = objects.Disk.FromDict(bdev_s)
270 ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
271 if bdev is None or ndevs.count(None) > 0:
272 raise ValueError("can't unserialize data!")
273 return backend.BlockdevRemovechildren(bdev, ndevs)
276 def perspective_blockdev_getmirrorstatus(params):
277 """Return the mirror status for a list of disks.
280 disks = [objects.Disk.FromDict(dsk_s)
281 for dsk_s in params[0]]
282 return [status.ToDict()
283 for status in backend.BlockdevGetmirrorstatus(disks)]
286 def perspective_blockdev_getmirrorstatus_multi(params):
287 """Return the mirror status for a list of disks.
290 (node_disks, ) = params
292 disks = [objects.Disk.FromDict(dsk_s) for dsk_s in node_disks]
296 for (success, status) in backend.BlockdevGetmirrorstatusMulti(disks):
298 result.append((success, status.ToDict()))
300 result.append((success, status))
305 def perspective_blockdev_find(params):
306 """Expose the FindBlockDevice functionality for a disk.
308 This will try to find but not activate a disk.
311 disk = objects.Disk.FromDict(params[0])
313 result = backend.BlockdevFind(disk)
317 return result.ToDict()
320 def perspective_blockdev_snapshot(params):
321 """Create a snapshot device.
323 Note that this is only valid for LVM disks, if we get passed
324 something else we raise an exception. The snapshot device can be
325 remove by calling the generic block device remove call.
328 cfbd = objects.Disk.FromDict(params[0])
329 return backend.BlockdevSnapshot(cfbd)
332 def perspective_blockdev_grow(params):
333 """Grow a stack of devices.
337 raise ValueError("Received only 3 parameters in blockdev_grow,"
339 cfbd = objects.Disk.FromDict(params[0])
342 backingstore = params[3]
343 return backend.BlockdevGrow(cfbd, amount, dryrun, backingstore)
346 def perspective_blockdev_close(params):
347 """Closes the given block devices.
350 disks = [objects.Disk.FromDict(cf) for cf in params[1]]
351 return backend.BlockdevClose(params[0], disks)
354 def perspective_blockdev_getsize(params):
355 """Compute the sizes of the given block devices.
358 disks = [objects.Disk.FromDict(cf) for cf in params[0]]
359 return backend.BlockdevGetsize(disks)
362 def perspective_blockdev_export(params):
363 """Compute the sizes of the given block devices.
366 disk = objects.Disk.FromDict(params[0])
367 dest_node, dest_path, cluster_name = params[1:]
368 return backend.BlockdevExport(disk, dest_node, dest_path, cluster_name)
371 def perspective_blockdev_setinfo(params):
372 """Sets metadata information on the given block device.
375 (disk, info) = params
376 disk = objects.Disk.FromDict(disk)
377 return backend.BlockdevSetInfo(disk, info)
379 # blockdev/drbd specific methods ----------
382 def perspective_drbd_disconnect_net(params):
383 """Disconnects the network connection of drbd disks.
385 Note that this is only valid for drbd disks, so the members of the
386 disk list must all be drbd devices.
389 nodes_ip, disks = params
390 disks = [objects.Disk.FromDict(cf) for cf in disks]
391 return backend.DrbdDisconnectNet(nodes_ip, disks)
394 def perspective_drbd_attach_net(params):
395 """Attaches the network connection of drbd disks.
397 Note that this is only valid for drbd disks, so the members of the
398 disk list must all be drbd devices.
401 nodes_ip, disks, instance_name, multimaster = params
402 disks = [objects.Disk.FromDict(cf) for cf in disks]
403 return backend.DrbdAttachNet(nodes_ip, disks,
404 instance_name, multimaster)
407 def perspective_drbd_wait_sync(params):
408 """Wait until DRBD disks are synched.
410 Note that this is only valid for drbd disks, so the members of the
411 disk list must all be drbd devices.
414 nodes_ip, disks = params
415 disks = [objects.Disk.FromDict(cf) for cf in disks]
416 return backend.DrbdWaitSync(nodes_ip, disks)
419 def perspective_drbd_helper(params):
420 """Query drbd helper.
423 return backend.GetDrbdUsermodeHelper()
425 # export/import --------------------------
428 def perspective_finalize_export(params):
429 """Expose the finalize export functionality.
432 instance = objects.Instance.FromDict(params[0])
435 for disk in params[1]:
436 if isinstance(disk, bool):
437 snap_disks.append(disk)
439 snap_disks.append(objects.Disk.FromDict(disk))
441 return backend.FinalizeExport(instance, snap_disks)
444 def perspective_export_info(params):
445 """Query information about an existing export on this node.
447 The given path may not contain an export, in which case we return
452 return backend.ExportInfo(path)
455 def perspective_export_list(params):
456 """List the available exports on this node.
458 Note that as opposed to export_info, which may query data about an
459 export in any path, this only queries the standard Ganeti path
460 (pathutils.EXPORT_DIR).
463 return backend.ListExports()
466 def perspective_export_remove(params):
471 return backend.RemoveExport(export)
473 # block device ---------------------
475 def perspective_bdev_sizes(params):
476 """Query the list of block devices
480 return backend.GetBlockDevSizes(devices)
482 # volume --------------------------
485 def perspective_lv_list(params):
486 """Query the list of logical volumes in a given volume group.
490 return backend.GetVolumeList(vgname)
493 def perspective_vg_list(params):
494 """Query the list of volume groups.
497 return backend.ListVolumeGroups()
499 # Storage --------------------------
502 def perspective_storage_list(params):
503 """Get list of storage units.
506 (su_name, su_args, name, fields) = params
507 return storage.GetStorage(su_name, *su_args).List(name, fields)
510 def perspective_storage_modify(params):
511 """Modify a storage unit.
514 (su_name, su_args, name, changes) = params
515 return storage.GetStorage(su_name, *su_args).Modify(name, changes)
518 def perspective_storage_execute(params):
519 """Execute an operation on a storage unit.
522 (su_name, su_args, name, op) = params
523 return storage.GetStorage(su_name, *su_args).Execute(name, op)
525 # bridge --------------------------
528 def perspective_bridges_exist(params):
529 """Check if all bridges given exist on this node.
532 bridges_list = params[0]
533 return backend.BridgesExist(bridges_list)
535 # instance --------------------------
538 def perspective_instance_os_add(params):
539 """Install an OS on a given instance.
543 inst = objects.Instance.FromDict(inst_s)
544 reinstall = params[1]
546 return backend.InstanceOsAdd(inst, reinstall, debug)
549 def perspective_instance_run_rename(params):
550 """Runs the OS rename script for an instance.
553 inst_s, old_name, debug = params
554 inst = objects.Instance.FromDict(inst_s)
555 return backend.RunRenameInstance(inst, old_name, debug)
558 def perspective_instance_shutdown(params):
559 """Shutdown an instance.
562 instance = objects.Instance.FromDict(params[0])
564 return backend.InstanceShutdown(instance, timeout)
567 def perspective_instance_start(params):
568 """Start an instance.
571 (instance_name, startup_paused) = params
572 instance = objects.Instance.FromDict(instance_name)
573 return backend.StartInstance(instance, startup_paused)
576 def perspective_migration_info(params):
577 """Gather information about an instance to be migrated.
580 instance = objects.Instance.FromDict(params[0])
581 return backend.MigrationInfo(instance)
584 def perspective_accept_instance(params):
585 """Prepare the node to accept an instance.
588 instance, info, target = params
589 instance = objects.Instance.FromDict(instance)
590 return backend.AcceptInstance(instance, info, target)
593 def perspective_instance_finalize_migration_dst(params):
594 """Finalize the instance migration on the destination node.
597 instance, info, success = params
598 instance = objects.Instance.FromDict(instance)
599 return backend.FinalizeMigrationDst(instance, info, success)
602 def perspective_instance_migrate(params):
603 """Migrates an instance.
606 instance, target, live = params
607 instance = objects.Instance.FromDict(instance)
608 return backend.MigrateInstance(instance, target, live)
611 def perspective_instance_finalize_migration_src(params):
612 """Finalize the instance migration on the source node.
615 instance, success, live = params
616 instance = objects.Instance.FromDict(instance)
617 return backend.FinalizeMigrationSource(instance, success, live)
620 def perspective_instance_get_migration_status(params):
621 """Reports migration status.
624 instance = objects.Instance.FromDict(params[0])
625 return backend.GetMigrationStatus(instance).ToDict()
628 def perspective_instance_reboot(params):
629 """Reboot an instance.
632 instance = objects.Instance.FromDict(params[0])
633 reboot_type = params[1]
634 shutdown_timeout = params[2]
635 return backend.InstanceReboot(instance, reboot_type, shutdown_timeout)
638 def perspective_instance_balloon_memory(params):
639 """Modify instance runtime memory.
642 instance_dict, memory = params
643 instance = objects.Instance.FromDict(instance_dict)
644 return backend.InstanceBalloonMemory(instance, memory)
647 def perspective_instance_info(params):
648 """Query instance information.
651 return backend.GetInstanceInfo(params[0], params[1])
654 def perspective_instance_migratable(params):
655 """Query whether the specified instance can be migrated.
658 instance = objects.Instance.FromDict(params[0])
659 return backend.GetInstanceMigratable(instance)
662 def perspective_all_instances_info(params):
663 """Query information about all instances.
666 return backend.GetAllInstancesInfo(params[0])
669 def perspective_instance_list(params):
670 """Query the list of running instances.
673 return backend.GetInstanceList(params[0])
675 # node --------------------------
678 def perspective_node_has_ip_address(params):
679 """Checks if a node has the given ip address.
682 return netutils.IPAddress.Own(params[0])
685 def perspective_node_info(params):
686 """Query node information.
689 (vg_names, hv_names) = params
690 return backend.GetNodeInfo(vg_names, hv_names)
693 def perspective_etc_hosts_modify(params):
694 """Modify a node entry in /etc/hosts.
697 backend.EtcHostsModify(params[0], params[1], params[2])
702 def perspective_node_verify(params):
703 """Run a verify sequence on this node.
706 return backend.VerifyNode(params[0], params[1])
709 def perspective_node_start_master_daemons(params):
710 """Start the master daemons on this node.
713 return backend.StartMasterDaemons(params[0])
716 def perspective_node_activate_master_ip(params):
717 """Activate the master IP on this node.
720 master_params = objects.MasterNetworkParameters.FromDict(params[0])
721 return backend.ActivateMasterIp(master_params, params[1])
724 def perspective_node_deactivate_master_ip(params):
725 """Deactivate the master IP on this node.
728 master_params = objects.MasterNetworkParameters.FromDict(params[0])
729 return backend.DeactivateMasterIp(master_params, params[1])
732 def perspective_node_stop_master(params):
733 """Stops master daemons on this node.
736 return backend.StopMasterDaemons()
739 def perspective_node_change_master_netmask(params):
740 """Change the master IP netmask.
743 return backend.ChangeMasterNetmask(params[0], params[1], params[2],
747 def perspective_node_leave_cluster(params):
748 """Cleanup after leaving a cluster.
751 return backend.LeaveCluster(params[0])
754 def perspective_node_volumes(params):
755 """Query the list of all logical volume groups.
758 return backend.NodeVolumes()
761 def perspective_node_demote_from_mc(params):
762 """Demote a node from the master candidate role.
765 return backend.DemoteFromMC()
768 def perspective_node_powercycle(params):
769 """Tries to powercycle the nod.
772 hypervisor_type = params[0]
773 return backend.PowercycleNode(hypervisor_type)
775 # cluster --------------------------
778 def perspective_version(params):
779 """Query version information.
782 return constants.PROTOCOL_VERSION
785 def perspective_upload_file(params):
788 Note that the backend implementation imposes strict rules on which
792 return backend.UploadFile(*(params[0]))
795 def perspective_master_info(params):
796 """Query master information.
799 return backend.GetMasterInfo()
802 def perspective_run_oob(params):
806 output = backend.RunOob(params[0], params[1], params[2], params[3])
808 result = serializer.LoadJson(output)
814 def perspective_restricted_command(params):
815 """Runs a restricted command.
820 return backend.RunRestrictedCmd(cmd)
823 def perspective_write_ssconf_files(params):
824 """Write ssconf files.
828 return ssconf.WriteSsconfFiles(values)
830 # os -----------------------
833 def perspective_os_diagnose(params):
834 """Query detailed information about existing OSes.
837 return backend.DiagnoseOS()
840 def perspective_os_get(params):
841 """Query information about a given OS.
845 os_obj = backend.OSFromDisk(name)
846 return os_obj.ToDict()
849 def perspective_os_validate(params):
850 """Run a given OS' validation routine.
853 required, name, checks, params = params
854 return backend.ValidateOS(required, name, checks, params)
856 # hooks -----------------------
859 def perspective_hooks_runner(params):
863 hpath, phase, env = params
864 hr = backend.HooksRunner()
865 return hr.RunHooks(hpath, phase, env)
867 # iallocator -----------------
870 def perspective_iallocator_runner(params):
871 """Run an iallocator script.
875 iar = backend.IAllocatorRunner()
876 return iar.Run(name, idata)
878 # test -----------------------
881 def perspective_test_delay(params):
886 status, rval = utils.TestDelay(duration)
888 raise backend.RPCFail(rval)
891 # file storage ---------------
894 def perspective_file_storage_dir_create(params):
895 """Create the file storage directory.
898 file_storage_dir = params[0]
899 return backend.CreateFileStorageDir(file_storage_dir)
902 def perspective_file_storage_dir_remove(params):
903 """Remove the file storage directory.
906 file_storage_dir = params[0]
907 return backend.RemoveFileStorageDir(file_storage_dir)
910 def perspective_file_storage_dir_rename(params):
911 """Rename the file storage directory.
914 old_file_storage_dir = params[0]
915 new_file_storage_dir = params[1]
916 return backend.RenameFileStorageDir(old_file_storage_dir,
917 new_file_storage_dir)
919 # jobs ------------------------
922 @_RequireJobQueueLock
923 def perspective_jobqueue_update(params):
927 (file_name, content) = params
928 return backend.JobQueueUpdate(file_name, content)
931 @_RequireJobQueueLock
932 def perspective_jobqueue_purge(params):
936 return backend.JobQueuePurge()
939 @_RequireJobQueueLock
940 def perspective_jobqueue_rename(params):
941 """Rename a job queue file.
944 # TODO: What if a file fails to rename?
945 return [backend.JobQueueRename(old, new) for old, new in params[0]]
948 @_RequireJobQueueLock
949 def perspective_jobqueue_set_drain_flag(params):
950 """Set job queue's drain flag.
955 return jstore.SetDrainFlag(flag)
957 # hypervisor ---------------
960 def perspective_hypervisor_validate_params(params):
961 """Validate the hypervisor parameters.
964 (hvname, hvparams) = params
965 return backend.ValidateHVParams(hvname, hvparams)
970 def perspective_x509_cert_create(params):
971 """Creates a new X509 certificate for SSL/TLS.
974 (validity, ) = params
975 return backend.CreateX509Certificate(validity)
978 def perspective_x509_cert_remove(params):
979 """Removes a X509 certificate.
983 return backend.RemoveX509Certificate(name)
988 def perspective_import_start(params):
989 """Starts an import daemon.
992 (opts_s, instance, component, (dest, dest_args)) = params
994 opts = objects.ImportExportOptions.FromDict(opts_s)
996 return backend.StartImportExportDaemon(constants.IEM_IMPORT, opts,
998 objects.Instance.FromDict(instance),
1000 _DecodeImportExportIO(dest,
1004 def perspective_export_start(params):
1005 """Starts an export daemon.
1008 (opts_s, host, port, instance, component, (source, source_args)) = params
1010 opts = objects.ImportExportOptions.FromDict(opts_s)
1012 return backend.StartImportExportDaemon(constants.IEM_EXPORT, opts,
1014 objects.Instance.FromDict(instance),
1016 _DecodeImportExportIO(source,
1020 def perspective_impexp_status(params):
1021 """Retrieves the status of an import or export daemon.
1024 return backend.GetImportExportStatus(params[0])
1027 def perspective_impexp_abort(params):
1028 """Aborts an import or export.
1031 return backend.AbortImportExport(params[0])
1034 def perspective_impexp_cleanup(params):
1035 """Cleans up after an import or export.
1038 return backend.CleanupImportExport(params[0])
1041 def CheckNoded(_, args):
1042 """Initial checks whether to run or exit with a failure.
1045 if args: # noded doesn't take any arguments
1046 print >> sys.stderr, ("Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" %
1048 sys.exit(constants.EXIT_FAILURE)
1050 codecs.lookup("string-escape")
1052 print >> sys.stderr, ("Can't load the string-escape code which is part"
1053 " of the Python installation. Is your installation"
1054 " complete/correct? Aborting.")
1055 sys.exit(constants.EXIT_FAILURE)
1058 def PrepNoded(options, _):
1059 """Preparation node daemon function, executed with the PID file held.
1063 request_executor_class = MlockallRequestExecutor
1066 except errors.NoCtypesError:
1067 logging.warning("Cannot set memory lock, ctypes module not found")
1068 request_executor_class = http.server.HttpServerRequestExecutor
1070 request_executor_class = http.server.HttpServerRequestExecutor
1072 # Read SSL certificate
1074 ssl_params = http.HttpSslParams(ssl_key_path=options.ssl_key,
1075 ssl_cert_path=options.ssl_cert)
1079 err = _PrepareQueueLock()
1081 # this might be some kind of file-system/permission error; while
1082 # this breaks the job queue functionality, we shouldn't prevent
1083 # startup of the whole node daemon because of this
1084 logging.critical("Can't init/verify the queue, proceeding anyway: %s", err)
1086 handler = NodeRequestHandler()
1088 mainloop = daemon.Mainloop()
1090 http.server.HttpServer(mainloop, options.bind_address, options.port,
1091 handler, ssl_params=ssl_params, ssl_verify_peer=True,
1092 request_executor_class=request_executor_class)
1095 return (mainloop, server)
1098 def ExecNoded(options, args, prep_data): # pylint: disable=W0613
1099 """Main node daemon function, executed with the PID file held.
1102 (mainloop, server) = prep_data
1110 """Main function for the node daemon.
1113 parser = OptionParser(description="Ganeti node daemon",
1114 usage="%prog [-f] [-d] [-p port] [-b ADDRESS]",
1115 version="%%prog (ganeti) %s" %
1116 constants.RELEASE_VERSION)
1117 parser.add_option("--no-mlock", dest="mlock",
1118 help="Do not mlock the node memory in ram",
1119 default=True, action="store_false")
1121 daemon.GenericMain(constants.NODED, parser, CheckNoded, PrepNoded, ExecNoded,
1122 default_ssl_cert=pathutils.NODED_CERT_FILE,
1123 default_ssl_key=pathutils.NODED_CERT_FILE,
1124 console_logging=True)