Forward-port DrbdNetReconfig
[ganeti-local] / daemons / ganeti-noded
index 66e98dd..c146e8b 100755 (executable)
 import os
 import sys
 import traceback
-import BaseHTTPServer
-import simplejson
+import SocketServer
 import errno
+import logging
+import signal
 
 from optparse import OptionParser
 
-
 from ganeti import backend
-from ganeti import logger
 from ganeti import constants
 from ganeti import objects
 from ganeti import errors
-from ganeti import ssconf
+from ganeti import jstore
+from ganeti import daemon
+from ganeti import http
 from ganeti import utils
 
+import ganeti.http.server
+
+
+queue_lock = None
+
+
+def _RequireJobQueueLock(fn):
+  """Decorator for job queue manipulating functions.
+
+  """
+  QUEUE_LOCK_TIMEOUT = 10
+
+  def wrapper(*args, **kwargs):
+    # Locking in exclusive, blocking mode because there could be several
+    # children running at the same time. Waiting up to 10 seconds.
+    queue_lock.Exclusive(blocking=True, timeout=QUEUE_LOCK_TIMEOUT)
+    try:
+      return fn(*args, **kwargs)
+    finally:
+      queue_lock.Unlock()
+
+  return wrapper
+
 
-class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
+class NodeHttpServer(http.server.HttpServer):
   """The server implementation.
 
   This class holds all methods exposed over the RPC interface.
 
   """
-  def do_PUT(self):
-    """Handle a post request.
+  def __init__(self, *args, **kwargs):
+    http.server.HttpServer.__init__(self, *args, **kwargs)
+    self.noded_pid = os.getpid()
+
+  def HandleRequest(self, req):
+    """Handle a request.
 
     """
-    path = self.path
+    if req.request_method.upper() != http.HTTP_PUT:
+      raise http.HttpBadRequest()
+
+    path = req.request_path
     if path.startswith("/"):
       path = path[1:]
-    mname = "perspective_%s" % path
-    if not hasattr(self, mname):
-      self.send_error(404)
-      return False
 
-    method = getattr(self, mname)
-    try:
-      body_length = int(self.headers.get('Content-Length', '0'))
-    except ValueError:
-      self.send_error(400, 'No Content-Length header or invalid format')
-      return False
+    method = getattr(self, "perspective_%s" % path, None)
+    if method is None:
+      raise http.HttpNotFound()
 
     try:
-      body = self.rfile.read(body_length)
-    except socket.error, err:
-      logger.Error("Socket error while reading: %s" % str(err))
-      return
-    try:
-      params = simplejson.loads(body)
-      result = method(params)
-      payload = simplejson.dumps(result)
-    except Exception, err:
-      self.send_error(500, "Error: %s" % str(err))
-      return False
-    self.send_response(200)
-    self.send_header('Content-Length', str(len(payload)))
-    self.end_headers()
-    self.wfile.write(payload)
-    return True
-
-  def log_message(self, format, *args):
-    """Log a request to the log.
-
-    This is the same as the parent, we just log somewhere else.
-
-    """
-    msg = ("%s - - [%s] %s" %
-           (self.address_string(),
-            self.log_date_time_string(),
-            format % args))
-    logger.Debug(msg)
+      try:
+        return method(req.request_body)
+      except:
+        logging.exception("Error in RPC call")
+        raise
+    except errors.QuitGanetiException, err:
+      # Tell parent to quit
+      os.kill(self.noded_pid, signal.SIGTERM)
 
   # the new block devices  --------------------------
 
@@ -211,6 +215,61 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     cfbd = objects.Disk.FromDict(params[0])
     return backend.SnapshotBlockDevice(cfbd)
 
+  @staticmethod
+  def perspective_blockdev_grow(params):
+    """Grow a stack of devices.
+
+    """
+    cfbd = objects.Disk.FromDict(params[0])
+    amount = params[1]
+    return backend.GrowBlockDevice(cfbd, amount)
+
+  @staticmethod
+  def perspective_blockdev_close(params):
+    """Closes the given block devices.
+
+    """
+    disks = [objects.Disk.FromDict(cf) for cf in params[1]]
+    return backend.CloseBlockDevices(params[0], disks)
+
+  # blockdev/drbd specific methods ----------
+
+  @staticmethod
+  def perspective_drbd_disconnect_net(params):
+    """Disconnects the network connection of drbd disks.
+
+    Note that this is only valid for drbd disks, so the members of the
+    disk list must all be drbd devices.
+
+    """
+    nodes_ip, disks = params
+    disks = [objects.Disk.FromDict(cf) for cf in disks]
+    return backend.DrbdDisconnectNet(nodes_ip, disks)
+
+  @staticmethod
+  def perspective_drbd_attach_net(params):
+    """Attaches the network connection of drbd disks.
+
+    Note that this is only valid for drbd disks, so the members of the
+    disk list must all be drbd devices.
+
+    """
+    nodes_ip, disks, instance_name, multimaster = params
+    disks = [objects.Disk.FromDict(cf) for cf in disks]
+    return backend.DrbdAttachNet(nodes_ip, disks, instance_name, multimaster)
+
+  @staticmethod
+  def perspective_drbd_wait_sync(params):
+    """Wait until DRBD disks are synched.
+
+    Note that this is only valid for drbd disks, so the members of the
+    disk list must all be drbd devices.
+
+    """
+    nodes_ip, disks = params
+    disks = [objects.Disk.FromDict(cf) for cf in disks]
+    return backend.DrbdWaitSync(nodes_ip, disks)
+
   # export/import  --------------------------
 
   @staticmethod
@@ -221,7 +280,10 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     disk = objects.Disk.FromDict(params[0])
     dest_node = params[1]
     instance = objects.Instance.FromDict(params[2])
-    return backend.ExportSnapshot(disk, dest_node, instance)
+    cluster_name = params[3]
+    dev_idx = params[4]
+    return backend.ExportSnapshot(disk, dest_node, instance,
+                                  cluster_name, dev_idx)
 
   @staticmethod
   def perspective_finalize_export(params):
@@ -300,28 +362,28 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     """Install an OS on a given instance.
 
     """
-    inst_s, os_disk, swap_disk = params
+    inst_s = params[0]
     inst = objects.Instance.FromDict(inst_s)
-    return backend.AddOSToInstance(inst, os_disk, swap_disk)
+    return backend.AddOSToInstance(inst)
 
   @staticmethod
   def perspective_instance_run_rename(params):
     """Runs the OS rename script for an instance.
 
     """
-    inst_s, old_name, os_disk, swap_disk = params
+    inst_s, old_name = params
     inst = objects.Instance.FromDict(inst_s)
-    return backend.RunRenameInstance(inst, old_name, os_disk, swap_disk)
+    return backend.RunRenameInstance(inst, old_name)
 
   @staticmethod
   def perspective_instance_os_import(params):
     """Run the import function of an OS onto a given instance.
 
     """
-    inst_s, os_disk, swap_disk, src_node, src_image = params
+    inst_s, src_node, src_images, cluster_name = params
     inst = objects.Instance.FromDict(inst_s)
-    return backend.ImportOSIntoInstance(inst, os_disk, swap_disk,
-                                        src_node, src_image)
+    return backend.ImportOSIntoInstance(inst, src_node, src_images,
+                                        cluster_name)
 
   @staticmethod
   def perspective_instance_shutdown(params):
@@ -341,6 +403,15 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     return backend.StartInstance(instance, extra_args)
 
   @staticmethod
+  def perspective_instance_migrate(params):
+    """Migrates an instance.
+
+    """
+    instance, target, live = params
+    instance = objects.Instance.FromDict(instance)
+    return backend.MigrateInstance(instance, target, live)
+
+  @staticmethod
   def perspective_instance_reboot(params):
     """Reboot an instance.
 
@@ -355,21 +426,29 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     """Query instance information.
 
     """
-    return backend.GetInstanceInfo(params[0])
+    return backend.GetInstanceInfo(params[0], params[1])
+
+  @staticmethod
+  def perspective_instance_migratable(params):
+    """Query whether the specified instance can be migrated.
+
+    """
+    instance = objects.Instance.FromDict(params[0])
+    return backend.GetInstanceMigratable(instance)
 
   @staticmethod
   def perspective_all_instances_info(params):
     """Query information about all instances.
 
     """
-    return backend.GetAllInstancesInfo()
+    return backend.GetAllInstancesInfo(params[0])
 
   @staticmethod
   def perspective_instance_list(params):
     """Query the list of running instances.
 
     """
-    return backend.GetInstanceList()
+    return backend.GetInstanceList(params[0])
 
   # node --------------------------
 
@@ -382,12 +461,19 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
                          live_port_needed=params[4], source=params[0])
 
   @staticmethod
+  def perspective_node_has_ip_address(params):
+    """Checks if a node has the given ip address.
+
+    """
+    return utils.OwnIpAddress(params[0])
+
+  @staticmethod
   def perspective_node_info(params):
     """Query node information.
 
     """
-    vgname = params[0]
-    return backend.GetNodeInfo(vgname)
+    vgname, hypervisor_type = params
+    return backend.GetNodeInfo(vgname, hypervisor_type)
 
   @staticmethod
   def perspective_node_add(params):
@@ -402,21 +488,21 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     """Run a verify sequence on this node.
 
     """
-    return backend.VerifyNode(params[0])
+    return backend.VerifyNode(params[0], params[1])
 
   @staticmethod
   def perspective_node_start_master(params):
     """Promote this node to master status.
 
     """
-    return backend.StartMaster()
+    return backend.StartMaster(params[0])
 
   @staticmethod
   def perspective_node_stop_master(params):
     """Demote this node from master status.
 
     """
-    return backend.StopMaster()
+    return backend.StopMaster(params[0])
 
   @staticmethod
   def perspective_node_leave_cluster(params):
@@ -432,6 +518,14 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     """
     return backend.NodeVolumes()
 
+  @staticmethod
+  def perspective_node_demote_from_mc(params):
+    """Demote a node from the master candidate role.
+
+    """
+    return backend.DemoteFromMC()
+
+
   # cluster --------------------------
 
   @staticmethod
@@ -451,6 +545,20 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     """
     return backend.UploadFile(*params)
 
+  @staticmethod
+  def perspective_master_info(params):
+    """Query master information.
+
+    """
+    return backend.GetMasterInfo()
+
+  @staticmethod
+  def perspective_write_ssconf_files(params):
+    """Write ssconf files.
+
+    """
+    (values,) = params
+    return backend.WriteSsconfFiles(values)
 
   # os -----------------------
 
@@ -484,6 +592,17 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     hr = backend.HooksRunner()
     return hr.RunHooks(hpath, phase, env)
 
+  # iallocator -----------------
+
+  @staticmethod
+  def perspective_iallocator_runner(params):
+    """Run an iallocator script.
+
+    """
+    name, idata = params
+    iar = backend.IAllocatorRunner()
+    return iar.Run(name, idata)
+
   # test -----------------------
 
   @staticmethod
@@ -494,6 +613,8 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     duration = params[0]
     return utils.TestDelay(duration)
 
+  # file storage ---------------
+
   @staticmethod
   def perspective_file_storage_dir_create(params):
     """Create the file storage directory.
@@ -520,12 +641,58 @@ class ServerObject(BaseHTTPServer.BaseHTTPRequestHandler):
     return backend.RenameFileStorageDir(old_file_storage_dir,
                                         new_file_storage_dir)
 
+  # jobs ------------------------
+
+  @staticmethod
+  @_RequireJobQueueLock
+  def perspective_jobqueue_update(params):
+    """Update job queue.
+
+    """
+    (file_name, content) = params
+    return backend.JobQueueUpdate(file_name, content)
+
+  @staticmethod
+  @_RequireJobQueueLock
+  def perspective_jobqueue_purge(params):
+    """Purge job queue.
+
+    """
+    return backend.JobQueuePurge()
+
+  @staticmethod
+  @_RequireJobQueueLock
+  def perspective_jobqueue_rename(params):
+    """Rename a job queue file.
+
+    """
+    # TODO: What if a file fails to rename?
+    return [backend.JobQueueRename(old, new) for old, new in params]
+
+  @staticmethod
+  def perspective_jobqueue_set_drain(params):
+    """Set/unset the queue drain flag.
+
+    """
+    drain_flag = params[0]
+    return backend.JobQueueSetDrainFlag(drain_flag)
+
+
+  # hypervisor ---------------
+
+  @staticmethod
+  def perspective_hypervisor_validate_params(params):
+    """Validate the hypervisor parameters.
+
+    """
+    (hvname, hvparams) = params
+    return backend.ValidateHVParams(hvname, hvparams)
+
 
 def ParseOptions():
   """Parse the command line options.
 
-  Returns:
-    (options, args) as from OptionParser.parse_args()
+  @return: (options, args) as from OptionParser.parse_args()
 
   """
   parser = OptionParser(description="Ganeti node daemon",
@@ -543,49 +710,82 @@ def ParseOptions():
   return options, args
 
 
+def EnsureRuntimeEnvironment():
+  """Ensure our run-time environment is complete.
+
+  Currently this creates directories which could be missing, either
+  due to directories being on a tmpfs mount, or due to incomplete
+  packaging.
+
+  """
+  dirs = [(val, constants.RUN_DIRS_MODE) for val in constants.SUB_RUN_DIRS]
+  dirs.append((constants.LOG_OS_DIR, 0750))
+  for dir_name, dir_mode in dirs:
+    if not os.path.exists(dir_name):
+      try:
+        os.mkdir(dir_name, dir_mode)
+      except EnvironmentError, err:
+        if err.errno != errno.EEXIST:
+          print ("Node setup wrong, cannot create directory '%s': %s" %
+                 (dir_name, err))
+          sys.exit(5)
+    if not os.path.isdir(dir_name):
+      print ("Node setup wrong, '%s' is not a directory" % dir_name)
+      sys.exit(5)
+
+
 def main():
   """Main function for the node daemon.
 
   """
+  global queue_lock
+
   options, args = ParseOptions()
   utils.debug = options.debug
+
+  if options.fork:
+    utils.CloseFDs()
+
   for fname in (constants.SSL_CERT_FILE,):
     if not os.path.isfile(fname):
       print "config %s not there, will not run." % fname
       sys.exit(5)
 
   try:
-    ss = ssconf.SimpleStore()
-    port = ss.GetNodeDaemonPort()
-    pwdata = ss.GetNodeDaemonPassword()
+    port = utils.GetNodeDaemonPort()
   except errors.ConfigurationError, err:
     print "Cluster configuration incomplete: '%s'" % str(err)
     sys.exit(5)
 
-  # create /var/run/ganeti if not existing, in order to take care of
-  # tmpfs /var/run
-  if not os.path.exists(constants.BDEV_CACHE_DIR):
-    try:
-      os.mkdir(constants.BDEV_CACHE_DIR, 0755)
-    except EnvironmentError, err:
-      if err.errno != errno.EEXIST:
-        print ("Node setup wrong, cannot create directory %s: %s" %
-               (constants.BDEV_CACHE_DIR, err))
-        sys.exit(5)
-  if not os.path.isdir(constants.BDEV_CACHE_DIR):
-    print ("Node setup wrong, %s is not a directory" %
-           constants.BDEV_CACHE_DIR)
-    sys.exit(5)
+  EnsureRuntimeEnvironment()
 
   # become a daemon
   if options.fork:
     utils.Daemonize(logfile=constants.LOG_NODESERVER)
 
-  logger.SetupLogging(twisted_workaround=True, debug=options.debug,
-                      program="ganeti-noded")
+  utils.WritePidFile(constants.NODED_PID)
+  try:
+    utils.SetupLogging(logfile=constants.LOG_NODESERVER, debug=options.debug,
+                       stderr_logging=not options.fork)
+    logging.info("ganeti node daemon startup")
+
+    # Read SSL certificate
+    ssl_params = http.HttpSslParams(ssl_key_path=constants.SSL_CERT_FILE,
+                                    ssl_cert_path=constants.SSL_CERT_FILE)
+
+    # Prepare job queue
+    queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
 
-  httpd = BaseHTTPServer.HTTPServer(('', port), ServerObject)
-  httpd.serve_forever()
+    mainloop = daemon.Mainloop()
+    server = NodeHttpServer(mainloop, "", port,
+                            ssl_params=ssl_params, ssl_verify_peer=True)
+    server.Start()
+    try:
+      mainloop.Run()
+    finally:
+      server.Stop()
+  finally:
+    utils.RemovePidFile(constants.NODED_PID)
 
 
 if __name__ == '__main__':