return ssh.SshRunner()
-def _CleanDirectory(path):
+def _CleanDirectory(path, exclude=[]):
+ """Removes all regular files in a directory.
+
+ @param exclude: List of files to be excluded.
+ @type exclude: list
+
+ """
if not os.path.isdir(path):
return
+
+ # Normalize excluded paths
+ exclude = [os.path.normpath(i) for i in exclude]
+
for rel_name in utils.ListVisibleFiles(path):
- full_name = os.path.join(path, rel_name)
+ full_name = os.path.normpath(os.path.join(path, rel_name))
+ if full_name in exclude:
+ continue
if os.path.isfile(full_name) and not os.path.islink(full_name):
utils.RemoveFile(full_name)
-def _GetMasterInfo():
- """Return the master ip and netdev.
+def _JobQueuePurge(keep_lock):
+ """Removes job queue files and archived jobs
+
+ """
+ if keep_lock:
+ exclude = [constants.JOB_QUEUE_LOCK_FILE]
+ else:
+ exclude = []
+
+ _CleanDirectory(constants.QUEUE_DIR, exclude=exclude)
+ _CleanDirectory(constants.JOB_QUEUE_ARCHIVE_DIR)
+
+
+def GetMasterInfo():
+ """Returns master information.
+
+ This is an utility function to compute master information, either
+ for consumption here or from the node daemon.
+
+ @rtype: tuple
+ @return: (master_netdev, master_ip, master_name)
"""
try:
ss = ssconf.SimpleStore()
master_netdev = ss.GetMasterNetdev()
master_ip = ss.GetMasterIP()
+ master_node = ss.GetMasterNode()
except errors.ConfigurationError, err:
logging.exception("Cluster configuration incomplete")
return (None, None)
- return (master_netdev, master_ip)
+ return (master_netdev, master_ip, master_node)
def StartMaster(start_daemons):
"""
ok = True
- master_netdev, master_ip = _GetMasterInfo()
+ master_netdev, master_ip, _ = GetMasterInfo()
if not master_netdev:
return False
stop the master daemons (ganet-masterd and ganeti-rapi).
"""
- master_netdev, master_ip = _GetMasterInfo()
+ master_netdev, master_ip, _ = GetMasterInfo()
if not master_netdev:
return False
"""
_CleanDirectory(constants.DATA_DIR)
- JobQueuePurge()
+ # The lock can be removed because we're going to quit anyway.
+ _JobQueuePurge(keep_lock=False)
try:
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
inst_os.path, create_script, instance.name,
real_os_dev.dev_path, real_swap_dev.dev_path,
logfile)
+ env = {'HYPERVISOR': ssconf.SimpleStore().GetHypervisorType()}
- result = utils.RunCmd(command)
+ result = utils.RunCmd(command, env=env)
if result.failed:
logging.error("os create command '%s' returned error: %s, logfile: %s,"
" output: %s", command, result.fail_reason, logfile,
logfile)
command = '|'.join([utils.ShellQuoteArgs(remotecmd), comprcmd, impcmd])
+ env = {'HYPERVISOR': ssconf.SimpleStore().GetHypervisorType()}
- result = utils.RunCmd(command)
+ result = utils.RunCmd(command, env=env)
if result.failed:
logging.error("os import command '%s' returned error: %s"
"""Removes job queue files and archived jobs
"""
- _CleanDirectory(constants.QUEUE_DIR)
- _CleanDirectory(constants.JOB_QUEUE_ARCHIVE_DIR)
+ # The lock must not be removed, otherwise another process could create
+ # it again.
+ return _JobQueuePurge(keep_lock=True)
+
+
+def JobQueueRename(old, new):
+ """Renames a job queue file.
+
+ """
+ if not (_IsJobQueueFile(old) and _IsJobQueueFile(new)):
+ return False
+
+ os.rename(old, new)
+
+ return True
def CloseBlockDevices(disks):