from optparse import OptionParser
from ganeti import backend
-from ganeti import logger
from ganeti import constants
from ganeti import objects
from ganeti import errors
from ganeti import http
from ganeti import utils
+import ganeti.http.server
+
queue_lock = None
return wrapper
-class NodeHttpServer(http.HttpServer):
+class NodeHttpServer(http.server.HttpServer):
"""The server implementation.
This class holds all methods exposed over the RPC interface.
"""
def __init__(self, *args, **kwargs):
- http.HttpServer.__init__(self, *args, **kwargs)
+ http.server.HttpServer.__init__(self, *args, **kwargs)
self.noded_pid = os.getpid()
def HandleRequest(self, req):
"""Handle a request.
"""
- if req.request_method.upper() != "PUT":
- raise http.HTTPBadRequest()
+ if req.request_method.upper() != http.HTTP_PUT:
+ raise http.HttpBadRequest()
path = req.request_path
if path.startswith("/"):
method = getattr(self, "perspective_%s" % path, None)
if method is None:
- raise http.HTTPNotFound()
+ raise http.HttpNotFound()
try:
try:
- return method(req.request_post_data)
+ return method(req.request_body)
except:
logging.exception("Error in RPC call")
raise
"""Closes the given block devices.
"""
- disks = [objects.Disk.FromDict(cf) for cf in params]
- return backend.CloseBlockDevices(disks)
+ disks = [objects.Disk.FromDict(cf) for cf in params[1]]
+ return backend.CloseBlockDevices(params[0], disks)
+
+ # blockdev/drbd specific methods ----------
+
+ @staticmethod
+ def perspective_drbd_disconnect_net(params):
+ """Disconnects the network connection of drbd disks.
+
+ Note that this is only valid for drbd disks, so the members of the
+ disk list must all be drbd devices.
+
+ """
+ nodes_ip, disks = params
+ disks = [objects.Disk.FromDict(cf) for cf in disks]
+ return backend.DrbdDisconnectNet(nodes_ip, disks)
+
+ @staticmethod
+ def perspective_drbd_attach_net(params):
+ """Attaches the network connection of drbd disks.
+
+ Note that this is only valid for drbd disks, so the members of the
+ disk list must all be drbd devices.
+
+ """
+ nodes_ip, disks, instance_name, multimaster = params
+ disks = [objects.Disk.FromDict(cf) for cf in disks]
+ return backend.DrbdAttachNet(nodes_ip, disks, instance_name, multimaster)
+
+ @staticmethod
+ def perspective_drbd_wait_sync(params):
+ """Wait until DRBD disks are synched.
+
+ Note that this is only valid for drbd disks, so the members of the
+ disk list must all be drbd devices.
+
+ """
+ nodes_ip, disks = params
+ disks = [objects.Disk.FromDict(cf) for cf in disks]
+ return backend.DrbdWaitSync(nodes_ip, disks)
# export/import --------------------------
dest_node = params[1]
instance = objects.Instance.FromDict(params[2])
cluster_name = params[3]
- return backend.ExportSnapshot(disk, dest_node, instance, cluster_name)
+ dev_idx = params[4]
+ return backend.ExportSnapshot(disk, dest_node, instance,
+ cluster_name, dev_idx)
@staticmethod
def perspective_finalize_export(params):
"""Run the import function of an OS onto a given instance.
"""
- inst_s, os_disk, swap_disk, src_node, src_image, cluster_name = params
+ inst_s, src_node, src_images, cluster_name = params
inst = objects.Instance.FromDict(inst_s)
- return backend.ImportOSIntoInstance(inst, os_disk, swap_disk,
- src_node, src_image, cluster_name)
+ return backend.ImportOSIntoInstance(inst, src_node, src_images,
+ cluster_name)
@staticmethod
def perspective_instance_shutdown(params):
return backend.GetInstanceInfo(params[0], params[1])
@staticmethod
+ def perspective_instance_migratable(params):
+ """Query whether the specified instance can be migrated.
+
+ """
+ instance = objects.Instance.FromDict(params[0])
+ return backend.GetInstanceMigratable(instance)
+
+ @staticmethod
def perspective_all_instances_info(params):
"""Query information about all instances.
"""
return backend.NodeVolumes()
+ @staticmethod
+ def perspective_node_demote_from_mc(params):
+ """Demote a node from the master candidate role.
+
+ """
+ return backend.DemoteFromMC()
+
+
# cluster --------------------------
@staticmethod
"""
return backend.GetMasterInfo()
+ @staticmethod
+ def perspective_write_ssconf_files(params):
+ """Write ssconf files.
+
+ """
+ (values,) = params
+ return backend.WriteSsconfFiles(values)
+
# os -----------------------
@staticmethod
"""Rename a job queue file.
"""
- (old, new) = params
-
- return backend.JobQueueRename(old, new)
+ # TODO: What if a file fails to rename?
+ return [backend.JobQueueRename(old, new) for old, new in params]
@staticmethod
def perspective_jobqueue_set_drain(params):
def ParseOptions():
"""Parse the command line options.
- Returns:
- (options, args) as from OptionParser.parse_args()
+ @return: (options, args) as from OptionParser.parse_args()
"""
parser = OptionParser(description="Ganeti node daemon",
return options, args
+def EnsureRuntimeEnvironment():
+ """Ensure our run-time environment is complete.
+
+ Currently this creates directories which could be missing, either
+ due to directories being on a tmpfs mount, or due to incomplete
+ packaging.
+
+ """
+ dirs = [(val, constants.RUN_DIRS_MODE) for val in constants.SUB_RUN_DIRS]
+ dirs.append((constants.LOG_OS_DIR, 0750))
+ for dir_name, dir_mode in dirs:
+ if not os.path.exists(dir_name):
+ try:
+ os.mkdir(dir_name, dir_mode)
+ except EnvironmentError, err:
+ if err.errno != errno.EEXIST:
+ print ("Node setup wrong, cannot create directory '%s': %s" %
+ (dir_name, err))
+ sys.exit(5)
+ if not os.path.isdir(dir_name):
+ print ("Node setup wrong, '%s' is not a directory" % dir_name)
+ sys.exit(5)
+
+
def main():
"""Main function for the node daemon.
options, args = ParseOptions()
utils.debug = options.debug
+
+ if options.fork:
+ utils.CloseFDs()
+
for fname in (constants.SSL_CERT_FILE,):
if not os.path.isfile(fname):
print "config %s not there, will not run." % fname
try:
port = utils.GetNodeDaemonPort()
- pwdata = utils.GetNodeDaemonPassword()
except errors.ConfigurationError, err:
print "Cluster configuration incomplete: '%s'" % str(err)
sys.exit(5)
- # create the various SUB_RUN_DIRS, if not existing, so that we handle the
- # situation where RUN_DIR is tmpfs
- for dir_name in constants.SUB_RUN_DIRS:
- if not os.path.exists(dir_name):
- try:
- os.mkdir(dir_name, 0755)
- except EnvironmentError, err:
- if err.errno != errno.EEXIST:
- print ("Node setup wrong, cannot create directory %s: %s" %
- (dir_name, err))
- sys.exit(5)
- if not os.path.isdir(dir_name):
- print ("Node setup wrong, %s is not a directory" % dir_name)
- sys.exit(5)
+ EnsureRuntimeEnvironment()
# become a daemon
if options.fork:
utils.WritePidFile(constants.NODED_PID)
try:
- logger.SetupLogging(logfile=constants.LOG_NODESERVER, debug=options.debug,
- stderr_logging=not options.fork)
+ utils.SetupLogging(logfile=constants.LOG_NODESERVER, debug=options.debug,
+ stderr_logging=not options.fork)
logging.info("ganeti node daemon startup")
+ # Read SSL certificate
+ ssl_params = http.HttpSslParams(ssl_key_path=constants.SSL_CERT_FILE,
+ ssl_cert_path=constants.SSL_CERT_FILE)
+
# Prepare job queue
queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
mainloop = daemon.Mainloop()
- server = NodeHttpServer(mainloop, ("", port))
+ server = NodeHttpServer(mainloop, "", port,
+ ssl_params=ssl_params, ssl_verify_peer=True)
server.Start()
try:
mainloop.Run()