from ganeti import ssconf
+_BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
+
+
class RPCFail(Exception):
"""Class denoting RPC failure.
"""
+
def _Fail(msg, *args, **kwargs):
"""Log an error and the raise an RPCFail exception.
if stop_daemons:
# stop/kill the rapi and the master daemon
- for daemon in constants.RAPI_PID, constants.MASTERD_PID:
+ for daemon in constants.RAPI, constants.MASTERD:
utils.KillProcess(utils.ReadPidFile(utils.DaemonPidFileName(daemon)))
try:
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
- f = open(pub_key, 'r')
- try:
- utils.RemoveAuthorizedKey(auth_keys, f.read(8192))
- finally:
- f.close()
+ utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
utils.RemoveFile(priv_key)
utils.RemoveFile(pub_key)
except errors.OpExecError:
logging.exception("Error while processing ssh files")
+ try:
+ utils.RemoveFile(constants.HMAC_CLUSTER_KEY)
+ utils.RemoveFile(constants.RAPI_CERT_FILE)
+ utils.RemoveFile(constants.SSL_CERT_FILE)
+ except:
+ logging.exception("Error while removing cluster secrets")
+
+ confd_pid = utils.ReadPidFile(utils.DaemonPidFileName(constants.CONFD))
+
+ if confd_pid:
+ utils.KillProcess(confd_pid, timeout=2)
+
# Raise a custom exception (handled in ganeti-noded)
raise errors.QuitGanetiException(True, 'Shutdown scheduled')
if hyp_info is not None:
outputarray.update(hyp_info)
- f = open("/proc/sys/kernel/random/boot_id", 'r')
- try:
- outputarray["bootid"] = f.read(128).rstrip("\n")
- finally:
- f.close()
+ outputarray["bootid"] = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
return outputarray
tmp[my_name] = ("Can't find my own primary/secondary IP"
" in the node list")
else:
- port = utils.GetNodeDaemonPort()
+ port = utils.GetDaemonPort(constants.NODED)
for name, pip, sip in what[constants.NV_NODENETTEST]:
fail = []
if not utils.TcpPing(pip, port, source=my_pip):
name, size, attr = match.groups()
inactive = attr[4] == '-'
online = attr[5] == 'o'
+ virtual = attr[0] == 'v'
+ if virtual:
+ # we don't want to report such volumes as existing, since they
+ # don't really hold data
+ continue
lvs[name] = (size, inactive, online)
return lvs
rbd = _RecursiveFindBD(dsk)
if rbd is None:
_Fail("Can't find device %s", dsk)
+
stats.append(rbd.CombinedSyncStatus())
+
return stats
@type disk: L{objects.Disk}
@param disk: the disk to find
- @rtype: None or tuple
- @return: None if the disk cannot be found, otherwise a
- tuple (device_path, major, minor, sync_percent,
- estimated_time, is_degraded)
+ @rtype: None or objects.BlockDevStatus
+ @return: None if the disk cannot be found, otherwise a the current
+ information
"""
try:
rbd = _RecursiveFindBD(disk)
except errors.BlockDeviceError, err:
_Fail("Failed to find device: %s", err, exc=True)
+
if rbd is None:
return None
- return (rbd.dev_path, rbd.major, rbd.minor) + rbd.GetSyncStatus()
+
+ return rbd.GetSyncStatus()
+
+
+def BlockdevGetsize(disks):
+ """Computes the size of the given disks.
+
+ If a disk is not found, returns None instead.
+
+ @type disks: list of L{objects.Disk}
+ @param disks: the list of disk to compute the size for
+ @rtype: list
+ @return: list with elements None if the disk cannot be found,
+ otherwise the size
+
+ """
+ result = []
+ for cf in disks:
+ try:
+ rbd = _RecursiveFindBD(cf)
+ except errors.BlockDeviceError, err:
+ result.append(None)
+ continue
+ if rbd is None:
+ result.append(None)
+ else:
+ result.append(rbd.GetActualSize())
+ return result
+
+
+def BlockdevExport(disk, dest_node, dest_path, cluster_name):
+ """Export a block device to a remote node.
+
+ @type disk: L{objects.Disk}
+ @param disk: the description of the disk to export
+ @type dest_node: str
+ @param dest_node: the destination node to export to
+ @type dest_path: str
+ @param dest_path: the destination path on the target node
+ @type cluster_name: str
+ @param cluster_name: the cluster name, needed for SSH hostalias
+ @rtype: None
+
+ """
+ real_disk = _RecursiveFindBD(disk)
+ if real_disk is None:
+ _Fail("Block device '%s' is not set up", disk)
+
+ real_disk.Open()
+
+ # the block size on the read dd is 1MiB to match our units
+ expcmd = utils.BuildShellCmd("set -e; set -o pipefail; "
+ "dd if=%s bs=1048576 count=%s",
+ real_disk.dev_path, str(disk.size))
+
+ # we set here a smaller block size as, due to ssh buffering, more
+ # than 64-128k will mostly ignored; we use nocreat to fail if the
+ # device is not already there or we pass a wrong path; we use
+ # notrunc to no attempt truncate on an LV device; we use oflag=dsync
+ # to not buffer too much memory; this means that at best, we flush
+ # every 64k, which will not be very fast
+ destcmd = utils.BuildShellCmd("dd of=%s conv=nocreat,notrunc bs=65536"
+ " oflag=dsync", dest_path)
+
+ remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node,
+ constants.GANETI_RUNAS,
+ destcmd)
+
+ # all commands have been checked, so we're safe to combine them
+ command = '|'.join([expcmd, utils.ShellQuoteArgs(remotecmd)])
+
+ result = utils.RunCmd(["bash", "-c", command])
+
+ if result.failed:
+ _Fail("Disk copy command '%s' returned error: %s"
+ " output: %s", command, result.fail_reason, result.output)
def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
# the target command is built out of three individual commands,
# which are joined by pipes; we check each individual command for
# valid parameters
- expcmd = utils.BuildShellCmd("cd %s; %s 2>%s", inst_os.path,
- export_script, logfile)
+ expcmd = utils.BuildShellCmd("set -e; set -o pipefail; cd %s; %s 2>%s",
+ inst_os.path, export_script, logfile)
comprcmd = "gzip"
# all commands have been checked, so we're safe to combine them
command = '|'.join([expcmd, comprcmd, utils.ShellQuoteArgs(remotecmd)])
- result = utils.RunCmd(command, env=export_env)
+ result = utils.RunCmd(["bash", "-c", command], env=export_env)
if result.failed:
_Fail("OS snapshot export command '%s' returned error: %s"
master, myself = ssconf.GetMasterAndMyself()
if master == myself:
_Fail("ssconf status shows I'm the master node, will not demote")
- pid_file = utils.DaemonPidFileName(constants.MASTERD_PID)
+ pid_file = utils.DaemonPidFileName(constants.MASTERD)
if utils.IsProcessAlive(utils.ReadPidFile(pid_file)):
_Fail("The master daemon is running, will not demote")
try: