Rename OpRedistributeConfig and LURedistributeConfig
[ganeti-local] / lib / bootstrap.py
index eca3349..2038544 100644 (file)
@@ -223,11 +223,11 @@ def _InitFileStorage(file_storage_dir):
 def InitCluster(cluster_name, mac_prefix,
                 master_netdev, file_storage_dir, candidate_pool_size,
                 secondary_ip=None, vg_name=None, beparams=None,
-                nicparams=None, hvparams=None, enabled_hypervisors=None,
-                modify_etc_hosts=True, modify_ssh_setup=True,
-                maintain_node_health=False, drbd_helper=None,
-                uid_pool=None, default_iallocator=None,
-                primary_ip_version=None):
+                nicparams=None, ndparams=None, hvparams=None,
+                enabled_hypervisors=None, modify_etc_hosts=True,
+                modify_ssh_setup=True, maintain_node_health=False,
+                drbd_helper=None, uid_pool=None, default_iallocator=None,
+                primary_ip_version=None, prealloc_wipe_disks=False):
   """Initialise the cluster.
 
   @type candidate_pool_size: int
@@ -340,6 +340,11 @@ def InitCluster(cluster_name, mac_prefix,
   utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
   objects.NIC.CheckParameterSyntax(nicparams)
 
+  if ndparams is not None:
+    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
+  else:
+    ndparams = dict(constants.NDC_DEFAULTS)
+
   # hvparams is a mapping of hypervisor->hvparams dict
   for hv_name, hv_params in hvparams.iteritems():
     utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
@@ -383,6 +388,7 @@ def InitCluster(cluster_name, mac_prefix,
     enabled_hypervisors=enabled_hypervisors,
     beparams={constants.PP_DEFAULT: beparams},
     nicparams={constants.PP_DEFAULT: nicparams},
+    ndparams=ndparams,
     hvparams=hvparams,
     candidate_pool_size=candidate_pool_size,
     modify_etc_hosts=modify_etc_hosts,
@@ -394,6 +400,7 @@ def InitCluster(cluster_name, mac_prefix,
     drbd_usermode_helper=drbd_helper,
     default_iallocator=default_iallocator,
     primary_ip_family=ipcls.family,
+    prealloc_wipe_disks=prealloc_wipe_disks,
     )
   master_node_config = objects.Node(name=hostname.name,
                                     primary_ip=hostname.ip,
@@ -401,6 +408,7 @@ def InitCluster(cluster_name, mac_prefix,
                                     serial_no=1,
                                     master_candidate=True,
                                     offline=False, drained=False,
+                                    ctime=now, mtime=now,
                                     )
   InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
   cfg = config.ConfigWriter(offline=True)
@@ -444,7 +452,7 @@ def InitConfig(version, cluster_config, master_node_config,
     }
   default_nodegroup = objects.NodeGroup(
     uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
-    name="default",
+    name=constants.INITIAL_NODE_GROUP_NAME,
     members=[master_node_config.name],
     )
   nodegroups = {
@@ -497,27 +505,7 @@ def SetupNodeDaemon(cluster_name, node, ssh_key_check):
   """
   family = ssconf.SimpleStore().GetPrimaryIPFamily()
   sshrunner = ssh.SshRunner(cluster_name,
-                            ipv6=family==netutils.IP6Address.family)
-
-  noded_cert = utils.ReadFile(constants.NODED_CERT_FILE)
-  rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
-  confd_hmac_key = utils.ReadFile(constants.CONFD_HMAC_KEY)
-
-  # in the base64 pem encoding, neither '!' nor '.' are valid chars,
-  # so we use this to detect an invalid certificate; as long as the
-  # cert doesn't contain this, the here-document will be correctly
-  # parsed by the shell sequence below. HMAC keys are hexadecimal strings,
-  # so the same restrictions apply.
-  for content in (noded_cert, rapi_cert, confd_hmac_key):
-    if re.search('^!EOF\.', content, re.MULTILINE):
-      raise errors.OpExecError("invalid SSL certificate or HMAC key")
-
-  if not noded_cert.endswith("\n"):
-    noded_cert += "\n"
-  if not rapi_cert.endswith("\n"):
-    rapi_cert += "\n"
-  if not confd_hmac_key.endswith("\n"):
-    confd_hmac_key += "\n"
+                            ipv6=(family == netutils.IP6Address.family))
 
   bind_address = constants.IP4_ADDRESS_ANY
   if family == netutils.IP6Address.family:
@@ -530,8 +518,9 @@ def SetupNodeDaemon(cluster_name, node, ssh_key_check):
   sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
   sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
   sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
-  mycommand = ("%s start %s -b '%s'" % (constants.DAEMON_UTIL, constants.NODED,
-                                        bind_address))
+  mycommand = ("%s stop-all; %s start %s -b %s" %
+               (constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
+                utils.ShellQuote(bind_address)))
 
   result = sshrunner.Run(node, 'root', mycommand, batch=False,
                          ask_key=ssh_key_check,
@@ -599,12 +588,36 @@ def MasterFailover(no_voting=False):
 
   logging.info("Setting master to %s, old master: %s", new_master, old_master)
 
+  try:
+    # instantiate a real config writer, as we now know we have the
+    # configuration data
+    cfg = config.ConfigWriter(accept_foreign=True)
+
+    cluster_info = cfg.GetClusterInfo()
+    cluster_info.master_node = new_master
+    # this will also regenerate the ssconf files, since we updated the
+    # cluster info
+    cfg.Update(cluster_info, logging.error)
+  except errors.ConfigurationError, err:
+    logging.error("Error while trying to set the new master: %s",
+                  str(err))
+    return 1
+
+  # if cfg.Update worked, then it means the old master daemon won't be
+  # able now to write its own config file (we rely on locking in both
+  # backend.UploadFile() and ConfigWriter._Write(); hence the next
+  # step is to kill the old master
+
+  logging.info("Stopping the master daemon on node %s", old_master)
+
   result = rpc.RpcRunner.call_node_stop_master(old_master, True)
   msg = result.fail_msg
   if msg:
     logging.error("Could not disable the master role on the old master"
                  " %s, please disable manually: %s", old_master, msg)
 
+  logging.info("Checking master IP non-reachability...")
+
   master_ip = sstore.GetMasterIP()
   total_timeout = 30
   # Here we have a phase where no master should be running
@@ -619,15 +632,7 @@ def MasterFailover(no_voting=False):
                     " continuing but activating the master on the current"
                     " node will probably fail", total_timeout)
 
-  # instantiate a real config writer, as we now know we have the
-  # configuration data
-  cfg = config.ConfigWriter()
-
-  cluster_info = cfg.GetClusterInfo()
-  cluster_info.master_node = new_master
-  # this will also regenerate the ssconf files, since we updated the
-  # cluster info
-  cfg.Update(cluster_info, logging.error)
+  logging.info("Starting the master daemons on the new master")
 
   result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
   msg = result.fail_msg
@@ -636,6 +641,7 @@ def MasterFailover(no_voting=False):
                   " %s, please check: %s", new_master, msg)
     rcode = 1
 
+  logging.info("Master failed over from %s to %s", old_master, new_master)
   return rcode