- Reimplemented bash completion script to be more complete
- Improved burnin
- Added option to specify maximum timeout on instance shutdown
+- Added ``--no-ssh-init`` option to ``gnt-cluster init``
Version 2.0.4
"""Cleanup after leaving a cluster.
"""
- return backend.LeaveCluster()
+ return backend.LeaveCluster(params[0])
@staticmethod
def perspective_node_volumes(params):
utils.RunCmd([constants.SSH_INITD_SCRIPT, "restart"])
-def LeaveCluster():
+def LeaveCluster(modify_ssh_setup):
"""Cleans up and remove the current node.
This function cleans up and prepares the current node to be removed
L{errors.QuitGanetiException} which is used as a special case to
shutdown the node daemon.
+ @param modify_ssh_setup: boolean
+
"""
_CleanDirectory(constants.DATA_DIR)
JobQueuePurge()
- try:
- priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
+ if modify_ssh_setup:
+ try:
+ priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
- utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
+ utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
- utils.RemoveFile(priv_key)
- utils.RemoveFile(pub_key)
- except errors.OpExecError:
- logging.exception("Error while processing ssh files")
+ utils.RemoveFile(priv_key)
+ utils.RemoveFile(pub_key)
+ except errors.OpExecError:
+ logging.exception("Error while processing ssh files")
try:
utils.RemoveFile(constants.HMAC_CLUSTER_KEY)
master_netdev, file_storage_dir, candidate_pool_size,
secondary_ip=None, vg_name=None, beparams=None,
nicparams=None, hvparams=None, enabled_hypervisors=None,
- modify_etc_hosts=True):
+ modify_etc_hosts=True, modify_ssh_setup=True):
"""Initialise the cluster.
@type candidate_pool_size: int
if modify_etc_hosts:
utils.AddHostToEtcHosts(hostname.name)
- _InitSSHSetup()
+ if modify_ssh_setup:
+ _InitSSHSetup()
now = time.time()
hvparams=hvparams,
candidate_pool_size=candidate_pool_size,
modify_etc_hosts=modify_etc_hosts,
+ modify_ssh_setup=modify_ssh_setup,
ctime=now,
mtime=now,
uuid=utils.NewUUID(),
begun in cmdlib.LUDestroyOpcode.
"""
+ cfg = config.ConfigWriter()
+ modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
result = rpc.RpcRunner.call_node_stop_master(master, True)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master role: %s" % msg)
- result = rpc.RpcRunner.call_node_leave_cluster(master)
+ result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
msg = result.fail_msg
if msg:
logging.warning("Could not shutdown the node daemon and cleanup"
"NOIPCHECK_OPT",
"NOLVM_STORAGE_OPT",
"NOMODIFY_ETCHOSTS_OPT",
+ "NOMODIFY_SSH_SETUP_OPT",
"NONICS_OPT",
"NONLIVE_OPT",
"NONPLUS1_OPT",
help="Don't modify /etc/hosts",
action="store_false", default=True)
+NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
+ help="Don't initialize SSH keys",
+ action="store_false", default=True)
+
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
help="Enable parseable error messages",
action="store_true", default=False)
"""
master = self.cfg.GetMasterNode()
+ modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
# Run post hooks on master node before it's removed
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
result = self.rpc.call_node_stop_master(master, False)
result.Raise("Could not disable the master role")
- priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
- utils.CreateBackup(priv_key)
- utils.CreateBackup(pub_key)
+
+ if modify_ssh_setup:
+ priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
+ utils.CreateBackup(priv_key)
+ utils.CreateBackup(pub_key)
+
return master
logging.info("Stopping the node daemon and removing configs from node %s",
node.name)
+ modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
+
# Promote nodes to master candidate as needed
_AdjustCandidatePool(self, exceptions=[node.name])
self.context.RemoveNode(node.name)
except:
self.LogWarning("Errors occurred running hooks on %s" % node.name)
- result = self.rpc.call_node_leave_cluster(node.name)
+ result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
msg = result.fail_msg
if msg:
self.LogWarning("Errors encountered on the remote node while leaving"
(constants.PROTOCOL_VERSION, result.payload))
# setup ssh on node
- logging.info("Copy ssh key to node %s", node)
- priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
- keyarray = []
- keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
- constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
- priv_key, pub_key]
-
- for i in keyfiles:
- keyarray.append(utils.ReadFile(i))
-
- result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
- keyarray[2],
- keyarray[3], keyarray[4], keyarray[5])
- result.Raise("Cannot transfer ssh keys to the new node")
+ if self.cfg.GetClusterInfo().modify_ssh_setup:
+ logging.info("Copy ssh key to node %s", node)
+ priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
+ keyarray = []
+ keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
+ constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
+ priv_key, pub_key]
+
+ for i in keyfiles:
+ keyarray.append(utils.ReadFile(i))
+
+ result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
+ keyarray[2], keyarray[3], keyarray[4],
+ keyarray[5])
+ result.Raise("Cannot transfer ssh keys to the new node")
# Add node to our /etc/hosts, and add key to known_hosts
if self.cfg.GetClusterInfo().modify_etc_hosts:
"nicparams",
"candidate_pool_size",
"modify_etc_hosts",
+ "modify_ssh_setup",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
if self.modify_etc_hosts is None:
self.modify_etc_hosts = True
+ if self.modify_ssh_setup is None:
+ self.modify_ssh_setup = True
+
# default_bridge is no longer used it 2.1. The slot is left there to
# support auto-upgrading, but will be removed in 2.2
if self.default_bridge is not None:
return self._SingleNodeCall(node, "export_remove", [export])
@classmethod
- def call_node_leave_cluster(cls, node):
+ def call_node_leave_cluster(cls, node, modify_ssh_setup):
"""Requests a node to clean the cluster information it has.
This will remove the configuration information from the ganeti data
This is a single-node call.
"""
- return cls._StaticSingleNodeCall(node, "node_leave_cluster", [])
+ return cls._StaticSingleNodeCall(node, "node_leave_cluster",
+ [modify_ssh_setup])
def call_node_volumes(self, node_list):
"""Gets all volumes on node(s).
<sbr>
<arg>--no-lvm-storage</arg>
<sbr>
+ <arg>--no-etc-hosts</arg>
+ <sbr>
+ <arg>--no-ssh-init</arg>
+ <sbr>
<arg>--file-storage-dir <replaceable>dir</replaceable></arg>
<sbr>
<arg>--enabled-hypervisors <replaceable>hypervisors</replaceable></arg>
</para>
<para>
- The <option>--no-lvm-storage</option> allows you to initialize the
- cluster without lvm support. This means that only instances using
+ The <option>--no-lvm-storage</option> option allows you to initialize
+ the cluster without lvm support. This means that only instances using
files as storage backend will be possible to create. Once the cluster
is initialized you can change this setup with the
<command>modify</command> command.
</para>
<para>
+ The <option>--no-etc-hosts</option> option allows you to initialize the
+ cluster without modifying the <filename>/etc/hosts</filename> file.
+ </para>
+
+ <para>
+ The <option>--no-ssh-init</option> option allows you to initialize the
+ cluster without creating or distributing SSH key pairs.
+ </para>
+
+ <para>
The <option>--file-storage-dir</option> option allows you
set the directory to use for storing the instance disk
files when using file storage as backend for instance disks.
nicparams=nicparams,
candidate_pool_size=opts.candidate_pool_size,
modify_etc_hosts=opts.modify_etc_hosts,
+ modify_ssh_setup=opts.modify_ssh_setup,
)
op = opcodes.OpPostInitCluster()
SubmitOpCode(op)
InitCluster, [ArgHost(min=1, max=1)],
[BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT,
- NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, SECONDARY_IP_OPT, VG_NAME_OPT],
+ NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, NOMODIFY_SSH_SETUP_OPT,
+ SECONDARY_IP_OPT, VG_NAME_OPT],
"[opts...] <cluster_name>", "Initialises a new cluster configuration"),
'destroy': (
DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],