Fix broken commit 9e302a8
[ganeti-local] / lib / bootstrap.py
index ae0fae7..cc89bee 100644 (file)
@@ -100,37 +100,144 @@ def GenerateSelfSignedSslCert(file_name, validity=(365 * 5)):
     os.close(fd)
 
 
-def _InitGanetiServerSetup():
-  """Setup the necessary configuration for the initial node daemon.
+def GenerateHmacKey(file_name):
+  """Writes a new HMAC key.
 
-  This creates the nodepass file containing the shared password for
-  the cluster and also generates the SSL certificate.
+  @type file_name: str
+  @param file_name: Path to output file
+
+  """
+  utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
+                  backup=True)
+
+
+def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_confd_hmac_key,
+                          rapi_cert_pem=None,
+                          nodecert_file=constants.NODED_CERT_FILE,
+                          rapicert_file=constants.RAPI_CERT_FILE,
+                          hmackey_file=constants.CONFD_HMAC_KEY):
+  """Updates the cluster certificates, keys and secrets.
+
+  @type new_cluster_cert: bool
+  @param new_cluster_cert: Whether to generate a new cluster certificate
+  @type new_rapi_cert: bool
+  @param new_rapi_cert: Whether to generate a new RAPI certificate
+  @type new_confd_hmac_key: bool
+  @param new_confd_hmac_key: Whether to generate a new HMAC key
+  @type rapi_cert_pem: string
+  @param rapi_cert_pem: New RAPI certificate in PEM format
+  @type nodecert_file: string
+  @param nodecert_file: optional override of the node cert file path
+  @type rapicert_file: string
+  @param rapicert_file: optional override of the rapi cert file path
+  @type hmackey_file: string
+  @param hmackey_file: optional override of the hmac key file path
 
   """
-  GenerateSelfSignedSslCert(constants.SSL_CERT_FILE)
+  # noded SSL certificate
+  cluster_cert_exists = os.path.exists(nodecert_file)
+  if new_cluster_cert or not cluster_cert_exists:
+    if cluster_cert_exists:
+      utils.CreateBackup(nodecert_file)
+
+    logging.debug("Generating new cluster certificate at %s", nodecert_file)
+    GenerateSelfSignedSslCert(nodecert_file)
 
-  # Don't overwrite existing file
-  if not os.path.exists(constants.RAPI_CERT_FILE):
-    GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE)
+  # confd HMAC key
+  if new_confd_hmac_key or not os.path.exists(hmackey_file):
+    logging.debug("Writing new confd HMAC key to %s", hmackey_file)
+    GenerateHmacKey(hmackey_file)
+
+  # RAPI
+  rapi_cert_exists = os.path.exists(rapicert_file)
+
+  if rapi_cert_pem:
+    # Assume rapi_pem contains a valid PEM-formatted certificate and key
+    logging.debug("Writing RAPI certificate at %s", rapicert_file)
+    utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
+
+  elif new_rapi_cert or not rapi_cert_exists:
+    if rapi_cert_exists:
+      utils.CreateBackup(rapicert_file)
+
+    logging.debug("Generating new RAPI certificate at %s", rapicert_file)
+    GenerateSelfSignedSslCert(rapicert_file)
+
+
+def _InitGanetiServerSetup(master_name):
+  """Setup the necessary configuration for the initial node daemon.
 
-  if not os.path.exists(constants.HMAC_CLUSTER_KEY):
-    utils.WriteFile(constants.HMAC_CLUSTER_KEY,
-                    data=utils.GenerateSecret(),
-                    mode=0400)
+  This creates the nodepass file containing the shared password for
+  the cluster and also generates the SSL certificate.
 
-  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
+  """
+  # Generate cluster secrets
+  GenerateClusterCrypto(True, False, False)
 
+  result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
   if result.failed:
     raise errors.OpExecError("Could not start the node daemon, command %s"
                              " had exitcode %s and error %s" %
                              (result.cmd, result.exit_code, result.output))
 
+  _WaitForNodeDaemon(master_name)
+
+
+def _WaitForNodeDaemon(node_name):
+  """Wait for node daemon to become responsive.
+
+  """
+  def _CheckNodeDaemon():
+    result = rpc.RpcRunner.call_version([node_name])[node_name]
+    if result.fail_msg:
+      raise utils.RetryAgain()
+
+  try:
+    utils.Retry(_CheckNodeDaemon, 1.0, 10.0)
+  except utils.RetryTimeout:
+    raise errors.OpExecError("Node daemon on %s didn't answer queries within"
+                             " 10 seconds" % node_name)
+
+
+def _InitFileStorage(file_storage_dir):
+  """Initialize if needed the file storage.
+
+  @param file_storage_dir: the user-supplied value
+  @return: either empty string (if file storage was disabled at build
+      time) or the normalized path to the storage directory
+
+  """
+  if not constants.ENABLE_FILE_STORAGE:
+    return ""
+
+  file_storage_dir = os.path.normpath(file_storage_dir)
+
+  if not os.path.isabs(file_storage_dir):
+    raise errors.OpPrereqError("The file storage directory you passed is"
+                               " not an absolute path.", errors.ECODE_INVAL)
+
+  if not os.path.exists(file_storage_dir):
+    try:
+      os.makedirs(file_storage_dir, 0750)
+    except OSError, err:
+      raise errors.OpPrereqError("Cannot create file storage directory"
+                                 " '%s': %s" % (file_storage_dir, err),
+                                 errors.ECODE_ENVIRON)
+
+  if not os.path.isdir(file_storage_dir):
+    raise errors.OpPrereqError("The file storage directory '%s' is not"
+                               " a directory." % file_storage_dir,
+                               errors.ECODE_ENVIRON)
+  return file_storage_dir
+
 
 def InitCluster(cluster_name, mac_prefix,
                 master_netdev, file_storage_dir, candidate_pool_size,
                 secondary_ip=None, vg_name=None, beparams=None,
                 nicparams=None, hvparams=None, enabled_hypervisors=None,
-                modify_etc_hosts=True):
+                modify_etc_hosts=True, modify_ssh_setup=True,
+                maintain_node_health=False,
+                uid_pool=None):
   """Initialise the cluster.
 
   @type candidate_pool_size: int
@@ -139,43 +246,48 @@ def InitCluster(cluster_name, mac_prefix,
   """
   # TODO: complete the docstring
   if config.ConfigWriter.IsCluster():
-    raise errors.OpPrereqError("Cluster is already initialised")
+    raise errors.OpPrereqError("Cluster is already initialised",
+                               errors.ECODE_STATE)
 
   if not enabled_hypervisors:
     raise errors.OpPrereqError("Enabled hypervisors list must contain at"
-                               " least one member")
+                               " least one member", errors.ECODE_INVAL)
   invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
   if invalid_hvs:
     raise errors.OpPrereqError("Enabled hypervisors contains invalid"
-                               " entries: %s" % invalid_hvs)
+                               " entries: %s" % invalid_hvs,
+                               errors.ECODE_INVAL)
 
-  hostname = utils.HostInfo()
+  hostname = utils.GetHostInfo()
 
   if hostname.ip.startswith("127."):
     raise errors.OpPrereqError("This host's IP resolves to the private"
                                " range (%s). Please fix DNS or %s." %
-                               (hostname.ip, constants.ETC_HOSTS))
+                               (hostname.ip, constants.ETC_HOSTS),
+                               errors.ECODE_ENVIRON)
 
   if not utils.OwnIpAddress(hostname.ip):
     raise errors.OpPrereqError("Inconsistency: this host's name resolves"
                                " to %s,\nbut this ip address does not"
-                               " belong to this host."
-                               " Aborting." % hostname.ip)
+                               " belong to this host. Aborting." %
+                               hostname.ip, errors.ECODE_ENVIRON)
 
-  clustername = utils.HostInfo(cluster_name)
+  clustername = utils.GetHostInfo(utils.HostInfo.NormalizeName(cluster_name))
 
   if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
                    timeout=5):
-    raise errors.OpPrereqError("Cluster IP already active. Aborting.")
+    raise errors.OpPrereqError("Cluster IP already active. Aborting.",
+                               errors.ECODE_NOTUNIQUE)
 
   if secondary_ip:
     if not utils.IsValidIP(secondary_ip):
-      raise errors.OpPrereqError("Invalid secondary ip given")
+      raise errors.OpPrereqError("Invalid secondary ip given",
+                                 errors.ECODE_INVAL)
     if (secondary_ip != hostname.ip and
         not utils.OwnIpAddress(secondary_ip)):
       raise errors.OpPrereqError("You gave %s as secondary IP,"
                                  " but it does not belong to this host." %
-                                 secondary_ip)
+                                 secondary_ip, errors.ECODE_ENVIRON)
   else:
     secondary_ip = hostname.ip
 
@@ -185,39 +297,20 @@ def InitCluster(cluster_name, mac_prefix,
                                           constants.MIN_VG_SIZE)
     if vgstatus:
       raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
-                                 " you are not using lvm" % vgstatus)
-
-  file_storage_dir = os.path.normpath(file_storage_dir)
+                                 " you are not using lvm" % vgstatus,
+                                 errors.ECODE_INVAL)
 
-  if not os.path.isabs(file_storage_dir):
-    raise errors.OpPrereqError("The file storage directory you passed is"
-                               " not an absolute path.")
-
-  if not os.path.exists(file_storage_dir):
-    try:
-      os.makedirs(file_storage_dir, 0750)
-    except OSError, err:
-      raise errors.OpPrereqError("Cannot create file storage directory"
-                                 " '%s': %s" %
-                                 (file_storage_dir, err))
-
-  if not os.path.isdir(file_storage_dir):
-    raise errors.OpPrereqError("The file storage directory '%s' is not"
-                               " a directory." % file_storage_dir)
+  file_storage_dir = _InitFileStorage(file_storage_dir)
 
   if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
-    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
+    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
+                               errors.ECODE_INVAL)
 
   result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
   if result.failed:
     raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
                                (master_netdev,
-                                result.output.strip()))
-
-  if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
-          os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
-    raise errors.OpPrereqError("Init.d script '%s' missing or not"
-                               " executable." % constants.NODE_INITD_SCRIPT)
+                                result.output.strip()), errors.ECODE_INVAL)
 
   dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
   utils.EnsureDirs(dirs)
@@ -233,7 +326,7 @@ def InitCluster(cluster_name, mac_prefix,
     hv_class.CheckParameterSyntax(hv_params)
 
   # set up the inter-node password and certificate
-  _InitGanetiServerSetup()
+  _InitGanetiServerSetup(hostname.name)
 
   # set up ssh config and /etc/hosts
   sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
@@ -242,7 +335,8 @@ def InitCluster(cluster_name, mac_prefix,
   if modify_etc_hosts:
     utils.AddHostToEtcHosts(hostname.name)
 
-  _InitSSHSetup()
+  if modify_ssh_setup:
+    _InitSSHSetup()
 
   now = time.time()
 
@@ -265,9 +359,12 @@ def InitCluster(cluster_name, mac_prefix,
     hvparams=hvparams,
     candidate_pool_size=candidate_pool_size,
     modify_etc_hosts=modify_etc_hosts,
+    modify_ssh_setup=modify_ssh_setup,
+    uid_pool=uid_pool,
     ctime=now,
     mtime=now,
     uuid=utils.NewUUID(),
+    maintain_node_health=maintain_node_health,
     )
   master_node_config = objects.Node(name=hostname.name,
                                     primary_ip=hostname.ip,
@@ -279,7 +376,7 @@ def InitCluster(cluster_name, mac_prefix,
   InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
   cfg = config.ConfigWriter()
   ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
-  cfg.Update(cfg.GetClusterInfo())
+  cfg.Update(cfg.GetClusterInfo(), logging.error)
 
   # start the master ip
   # TODO: Review rpc call from bootstrap
@@ -327,11 +424,13 @@ def FinalizeClusterDestroy(master):
   begun in cmdlib.LUDestroyOpcode.
 
   """
+  cfg = config.ConfigWriter()
+  modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
   result = rpc.RpcRunner.call_node_stop_master(master, True)
   msg = result.fail_msg
   if msg:
-    logging.warning("Could not disable the master role: %s" % msg)
-  result = rpc.RpcRunner.call_node_leave_cluster(master)
+    logging.warning("Could not disable the master role: %s", msg)
+  result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
   msg = result.fail_msg
   if msg:
     logging.warning("Could not shutdown the node daemon and cleanup"
@@ -352,16 +451,16 @@ def SetupNodeDaemon(cluster_name, node, ssh_key_check):
   """
   sshrunner = ssh.SshRunner(cluster_name)
 
-  noded_cert = utils.ReadFile(constants.SSL_CERT_FILE)
+  noded_cert = utils.ReadFile(constants.NODED_CERT_FILE)
   rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
-  hmac_key = utils.ReadFile(constants.HMAC_CLUSTER_KEY)
+  confd_hmac_key = utils.ReadFile(constants.CONFD_HMAC_KEY)
 
   # in the base64 pem encoding, neither '!' nor '.' are valid chars,
   # so we use this to detect an invalid certificate; as long as the
   # cert doesn't contain this, the here-document will be correctly
   # parsed by the shell sequence below. HMAC keys are hexadecimal strings,
   # so the same restrictions apply.
-  for content in (noded_cert, rapi_cert, hmac_key):
+  for content in (noded_cert, rapi_cert, confd_hmac_key):
     if re.search('^!EOF\.', content, re.MULTILINE):
       raise errors.OpExecError("invalid SSL certificate or HMAC key")
 
@@ -369,8 +468,8 @@ def SetupNodeDaemon(cluster_name, node, ssh_key_check):
     noded_cert += "\n"
   if not rapi_cert.endswith("\n"):
     rapi_cert += "\n"
-  if not hmac_key.endswith("\n"):
-    hmac_key += "\n"
+  if not confd_hmac_key.endswith("\n"):
+    confd_hmac_key += "\n"
 
   # set up inter-node password and certificate and restarts the node daemon
   # and then connect with ssh to set password and start ganeti-noded
@@ -384,13 +483,13 @@ def SetupNodeDaemon(cluster_name, node, ssh_key_check):
                "cat > '%s' << '!EOF.' && \n"
                "%s!EOF.\n"
                "chmod 0400 %s %s %s && "
-               "%s restart" %
-               (constants.SSL_CERT_FILE, noded_cert,
+               "%s start %s" %
+               (constants.NODED_CERT_FILE, noded_cert,
                 constants.RAPI_CERT_FILE, rapi_cert,
-                constants.HMAC_CLUSTER_KEY, hmac_key,
-                constants.SSL_CERT_FILE, constants.RAPI_CERT_FILE,
-                constants.HMAC_CLUSTER_KEY,
-                constants.NODE_INITD_SCRIPT))
+                constants.CONFD_HMAC_KEY, confd_hmac_key,
+                constants.NODED_CERT_FILE, constants.RAPI_CERT_FILE,
+                constants.CONFD_HMAC_KEY,
+                constants.DAEMON_UTIL, constants.NODED))
 
   result = sshrunner.Run(node, 'root', mycommand, batch=False,
                          ask_key=ssh_key_check,
@@ -401,6 +500,8 @@ def SetupNodeDaemon(cluster_name, node, ssh_key_check):
                              " output: %s" %
                              (node, result.fail_reason, result.output))
 
+  _WaitForNodeDaemon(node)
+
 
 def MasterFailover(no_voting=False):
   """Failover the master node.
@@ -424,7 +525,7 @@ def MasterFailover(no_voting=False):
     raise errors.OpPrereqError("This commands must be run on the node"
                                " where you want the new master to be."
                                " %s is already the master" %
-                               old_master)
+                               old_master, errors.ECODE_INVAL)
 
   if new_master not in mc_list:
     mc_no_master = [name for name in mc_list if name != old_master]
@@ -432,7 +533,8 @@ def MasterFailover(no_voting=False):
                                " as master candidates. Only these nodes"
                                " can become masters. Current list of"
                                " master candidates is:\n"
-                               "%s" % ('\n'.join(mc_no_master)))
+                               "%s" % ('\n'.join(mc_no_master)),
+                               errors.ECODE_STATE)
 
   if not no_voting:
     vote_list = GatherMasterVotes(node_list)
@@ -441,13 +543,14 @@ def MasterFailover(no_voting=False):
       voted_master = vote_list[0][0]
       if voted_master is None:
         raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
-                                   " not respond.")
+                                   " not respond.", errors.ECODE_ENVIRON)
       elif voted_master != old_master:
         raise errors.OpPrereqError("I have a wrong configuration, I believe"
                                    " the master is %s but the other nodes"
                                    " voted %s. Please resync the configuration"
                                    " of this node." %
-                                   (old_master, voted_master))
+                                   (old_master, voted_master),
+                                   errors.ECODE_STATE)
   # end checks
 
   rcode = 0
@@ -470,7 +573,7 @@ def MasterFailover(no_voting=False):
   cluster_info.master_node = new_master
   # this will also regenerate the ssconf files, since we updated the
   # cluster info
-  cfg.Update(cluster_info)
+  cfg.Update(cluster_info, logging.error)
 
   result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
   msg = result.fail_msg