import tempfile
import zlib
import base64
+import signal
from ganeti import errors
from ganeti import utils
constants.RAPI_CERT_FILE,
constants.RAPI_USERS_FILE,
constants.CONFD_HMAC_KEY,
+ constants.CLUSTER_DOMAIN_SECRET_FILE,
])
for hv_name in constants.HYPER_TYPES:
"""
result = {}
+ my_name = utils.HostInfo().name
+ port = utils.GetDaemonPort(constants.NODED)
if constants.NV_HYPERVISOR in what:
result[constants.NV_HYPERVISOR] = tmp = {}
if constants.NV_NODENETTEST in what:
result[constants.NV_NODENETTEST] = tmp = {}
- my_name = utils.HostInfo().name
my_pip = my_sip = None
for name, pip, sip in what[constants.NV_NODENETTEST]:
if name == my_name:
tmp[my_name] = ("Can't find my own primary/secondary IP"
" in the node list")
else:
- port = utils.GetDaemonPort(constants.NODED)
for name, pip, sip in what[constants.NV_NODENETTEST]:
fail = []
if not utils.TcpPing(pip, port, source=my_pip):
tmp[name] = ("failure using the %s interface(s)" %
" and ".join(fail))
+ if constants.NV_MASTERIP in what:
+ # FIXME: add checks on incoming data structures (here and in the
+ # rest of the function)
+ master_name, master_ip = what[constants.NV_MASTERIP]
+ if master_name == my_name:
+ source = constants.LOCALHOST_IP_ADDRESS
+ else:
+ source = None
+ result[constants.NV_MASTERIP] = utils.TcpPing(master_ip, port,
+ source=source)
+
if constants.NV_LVLIST in what:
try:
val = GetVolumeList(what[constants.NV_LVLIST])
return False, ("File '%s' under path '%s' is not executable" %
(filename, os_dir))
- variants = None
+ variants = []
if constants.OS_VARIANTS_FILE in os_files:
variants_file = os_files[constants.OS_VARIANTS_FILE]
try:
return payload
-def OSEnvironment(instance, inst_os, debug=0):
- """Calculate the environment for an os script.
+def OSCoreEnv(inst_os, debug=0):
+ """Calculate the basic environment for an os script.
- @type instance: L{objects.Instance}
- @param instance: target instance for the os script run
@type inst_os: L{objects.OS}
@param inst_os: operating system for which the environment is being built
@type debug: integer
api_version = \
max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
result['OS_API_VERSION'] = '%d' % api_version
- result['INSTANCE_NAME'] = instance.name
- result['INSTANCE_OS'] = instance.os
- result['HYPERVISOR'] = instance.hypervisor
- result['DISK_COUNT'] = '%d' % len(instance.disks)
- result['NIC_COUNT'] = '%d' % len(instance.nics)
+ result['OS_NAME'] = inst_os.name
result['DEBUG_LEVEL'] = '%d' % debug
+
+ # OS variants
if api_version >= constants.OS_API_V15:
try:
- variant = instance.os.split('+', 1)[1]
+ variant = inst_os.name.split('+', 1)[1]
except IndexError:
variant = inst_os.supported_variants[0]
result['OS_VARIANT'] = variant
+
+ return result
+
+
+def OSEnvironment(instance, inst_os, debug=0):
+ """Calculate the environment for an os script.
+
+ @type instance: L{objects.Instance}
+ @param instance: target instance for the os script run
+ @type inst_os: L{objects.OS}
+ @param inst_os: operating system for which the environment is being built
+ @type debug: integer
+ @param debug: debug level (0 or 1, for OS Api 10)
+ @rtype: dict
+ @return: dict of environment variables
+ @raise errors.BlockDeviceError: if the block device
+ cannot be found
+
+ """
+ result = OSCoreEnv(inst_os, debug)
+
+ result['INSTANCE_NAME'] = instance.name
+ result['INSTANCE_OS'] = instance.os
+ result['HYPERVISOR'] = instance.hypervisor
+ result['DISK_COUNT'] = '%d' % len(instance.disks)
+ result['NIC_COUNT'] = '%d' % len(instance.nics)
+
+ # Disks
for idx, disk in enumerate(instance.disks):
real_disk = _OpenRealBD(disk)
result['DISK_%d_PATH' % idx] = real_disk.dev_path
elif disk.dev_type == constants.LD_FILE:
result['DISK_%d_BACKEND_TYPE' % idx] = \
'file:%s' % disk.physical_id[0]
+
+ # NICs
for idx, nic in enumerate(instance.nics):
result['NIC_%d_MAC' % idx] = nic.mac
if nic.ip:
result['NIC_%d_FRONTEND_TYPE' % idx] = \
instance.hvparams[constants.HV_NIC_TYPE]
+ # HV/BE params
for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
for key, value in source.items():
result["INSTANCE_%s_%s" % (kind, key)] = str(value)
disk.unique_id, disk.dev_type)
-def ExportSnapshot(disk, dest_node, instance, cluster_name, idx, debug):
- """Export a block device snapshot to a remote node.
-
- @type disk: L{objects.Disk}
- @param disk: the description of the disk to export
- @type dest_node: str
- @param dest_node: the destination node to export to
- @type instance: L{objects.Instance}
- @param instance: the instance object to whom the disk belongs
- @type cluster_name: str
- @param cluster_name: the cluster name, needed for SSH hostalias
- @type idx: int
- @param idx: the index of the disk in the instance's disk list,
- used to export to the OS scripts environment
- @type debug: integer
- @param debug: debug level, passed to the OS scripts
- @rtype: None
-
- """
- inst_os = OSFromDisk(instance.os)
- export_env = OSEnvironment(instance, inst_os, debug)
-
- export_script = inst_os.export_script
-
- logfile = _InstanceLogName("export", inst_os.name, instance.name)
- if not os.path.exists(constants.LOG_OS_DIR):
- os.mkdir(constants.LOG_OS_DIR, 0750)
-
- real_disk = _OpenRealBD(disk)
-
- export_env['EXPORT_DEVICE'] = real_disk.dev_path
- export_env['EXPORT_INDEX'] = str(idx)
-
- destdir = utils.PathJoin(constants.EXPORT_DIR, instance.name + ".new")
- destfile = disk.physical_id[1]
-
- # the target command is built out of three individual commands,
- # which are joined by pipes; we check each individual command for
- # valid parameters
- expcmd = utils.BuildShellCmd("set -e; set -o pipefail; cd %s; %s 2>%s",
- inst_os.path, export_script, logfile)
-
- comprcmd = "gzip"
-
- destcmd = utils.BuildShellCmd("mkdir -p %s && cat > %s",
- destdir, utils.PathJoin(destdir, destfile))
- remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node,
- constants.GANETI_RUNAS,
- destcmd)
-
- # all commands have been checked, so we're safe to combine them
- command = '|'.join([expcmd, comprcmd, utils.ShellQuoteArgs(remotecmd)])
-
- result = utils.RunCmd(["bash", "-c", command], env=export_env)
-
- if result.failed:
- _Fail("OS snapshot export command '%s' returned error: %s"
- " output: %s", command, result.fail_reason, result.output)
-
-
def FinalizeExport(instance, snap_disks):
"""Write out the export configuration information.
return config.Dumps()
-def ImportOSIntoInstance(instance, src_node, src_images, cluster_name, debug):
- """Import an os image into an instance.
-
- @type instance: L{objects.Instance}
- @param instance: instance to import the disks into
- @type src_node: string
- @param src_node: source node for the disk images
- @type src_images: list of string
- @param src_images: absolute paths of the disk images
- @type debug: integer
- @param debug: debug level, passed to the OS scripts
- @rtype: list of boolean
- @return: each boolean represent the success of importing the n-th disk
-
- """
- inst_os = OSFromDisk(instance.os)
- import_env = OSEnvironment(instance, inst_os, debug)
- import_script = inst_os.import_script
-
- logfile = _InstanceLogName("import", instance.os, instance.name)
- if not os.path.exists(constants.LOG_OS_DIR):
- os.mkdir(constants.LOG_OS_DIR, 0750)
-
- comprcmd = "gunzip"
- impcmd = utils.BuildShellCmd("(cd %s; %s >%s 2>&1)", inst_os.path,
- import_script, logfile)
-
- final_result = []
- for idx, image in enumerate(src_images):
- if image:
- destcmd = utils.BuildShellCmd('cat %s', image)
- remotecmd = _GetSshRunner(cluster_name).BuildCmd(src_node,
- constants.GANETI_RUNAS,
- destcmd)
- command = '|'.join([utils.ShellQuoteArgs(remotecmd), comprcmd, impcmd])
- import_env['IMPORT_DEVICE'] = import_env['DISK_%d_PATH' % idx]
- import_env['IMPORT_INDEX'] = str(idx)
- result = utils.RunCmd(command, env=import_env)
- if result.failed:
- logging.error("Disk import command '%s' returned error: %s"
- " output: %s", command, result.fail_reason,
- result.output)
- final_result.append("error importing disk %d: %s, %s" %
- (idx, result.fail_reason, result.output[-100]))
-
- if final_result:
- _Fail("; ".join(final_result), log=False)
-
-
def ListExports():
"""Return a list of exports currently available on this machine.
"""
if os.path.isdir(constants.EXPORT_DIR):
- return utils.ListVisibleFiles(constants.EXPORT_DIR)
+ return sorted(utils.ListVisibleFiles(constants.EXPORT_DIR))
else:
_Fail("No exports directory")
utils.RenameFile(old, new, mkdir=True)
-def JobQueueSetDrainFlag(drain_flag):
- """Set the drain flag for the queue.
-
- This will set or unset the queue drain flag.
-
- @type drain_flag: boolean
- @param drain_flag: if True, will set the drain flag, otherwise reset it.
- @rtype: truple
- @return: always True, None
- @warning: the function always returns True
-
- """
- if drain_flag:
- utils.WriteFile(constants.JOB_QUEUE_DRAIN_FILE, data="", close=True)
- else:
- utils.RemoveFile(constants.JOB_QUEUE_DRAIN_FILE)
-
-
def BlockdevClose(instance_name, disks):
"""Closes the given block devices.
env = None
prefix = None
suffix = None
+ exp_size = None
if ieio == constants.IEIO_FILE:
(filename, ) = ieargs
elif mode == constants.IEM_EXPORT:
suffix = "< %s" % quoted_filename
+ # Retrieve file size
+ try:
+ st = os.stat(filename)
+ except EnvironmentError, err:
+ logging.error("Can't stat(2) %s: %s", filename, err)
+ else:
+ exp_size = utils.BytesToMebibyte(st.st_size)
+
elif ieio == constants.IEIO_RAW_DISK:
(disk, ) = ieargs
real_disk.dev_path,
str(1024 * 1024), # 1 MB
str(disk.size))
+ exp_size = disk.size
elif ieio == constants.IEIO_SCRIPT:
(disk, disk_index, ) = ieargs
elif mode == constants.IEM_EXPORT:
prefix = "%s |" % script_cmd
+ # Let script predict size
+ exp_size = constants.IE_CUSTOM_SIZE
+
else:
_Fail("Invalid %s I/O mode %r", mode, ieio)
- return (env, prefix, suffix)
+ return (env, prefix, suffix, exp_size)
def _CreateImportExportStatusDir(prefix):
(prefix, utils.TimestampForFilename())))
-def StartImportExportDaemon(mode, key_name, ca, host, port, instance,
- ieio, ieioargs):
+def StartImportExportDaemon(mode, opts, host, port, instance, ieio, ieioargs):
"""Starts an import or export daemon.
@param mode: Import/output mode
- @type key_name: string
- @param key_name: RSA key name (None to use cluster certificate)
- @type ca: string:
- @param ca: Remote CA in PEM format (None to use cluster certificate)
+ @type opts: L{objects.ImportExportOptions}
+ @param opts: Daemon options
@type host: string
@param host: Remote host for export (None for import)
@type port: int
else:
_Fail("Invalid mode %r", mode)
- if (key_name is None) ^ (ca is None):
+ if (opts.key_name is None) ^ (opts.ca_pem is None):
_Fail("Cluster certificate can only be used for both key and CA")
- (cmd_env, cmd_prefix, cmd_suffix) = \
+ (cmd_env, cmd_prefix, cmd_suffix, exp_size) = \
_GetImportExportIoCommand(instance, mode, ieio, ieioargs)
- if key_name is None:
+ if opts.key_name is None:
# Use server.pem
key_path = constants.NODED_CERT_FILE
cert_path = constants.NODED_CERT_FILE
- assert ca is None
+ assert opts.ca_pem is None
else:
(_, key_path, cert_path) = _GetX509Filenames(constants.CRYPTO_KEYS_DIR,
- key_name)
- assert ca is not None
+ opts.key_name)
+ assert opts.ca_pem is not None
+
+ for i in [key_path, cert_path]:
+ if not os.path.exists(i):
+ _Fail("File '%s' does not exist" % i)
status_dir = _CreateImportExportStatusDir(prefix)
try:
status_file = utils.PathJoin(status_dir, _IES_STATUS_FILE)
pid_file = utils.PathJoin(status_dir, _IES_PID_FILE)
+ ca_file = utils.PathJoin(status_dir, _IES_CA_FILE)
- if ca is None:
+ if opts.ca_pem is None:
# Use server.pem
- # TODO: If socat runs as a non-root user, this might need to be copied to
- # a separate file
- ca_path = constants.NODED_CERT_FILE
+ ca = utils.ReadFile(constants.NODED_CERT_FILE)
else:
- ca_path = utils.PathJoin(status_dir, _IES_CA_FILE)
- utils.WriteFile(ca_path, data=ca, mode=0400)
+ ca = opts.ca_pem
+
+ # Write CA file
+ utils.WriteFile(ca_file, data=ca, mode=0400)
cmd = [
constants.IMPORT_EXPORT_DAEMON,
status_file, mode,
"--key=%s" % key_path,
"--cert=%s" % cert_path,
- "--ca=%s" % ca_path,
+ "--ca=%s" % ca_file,
]
if host:
if port:
cmd.append("--port=%s" % port)
+ if opts.compress:
+ cmd.append("--compress=%s" % opts.compress)
+
+ if opts.magic:
+ cmd.append("--magic=%s" % opts.magic)
+
+ if exp_size is not None:
+ cmd.append("--expected-size=%s" % exp_size)
+
if cmd_prefix:
cmd.append("--cmd-prefix=%s" % cmd_prefix)
return result
+def AbortImportExport(name):
+ """Sends SIGTERM to a running import/export daemon.
+
+ """
+ logging.info("Abort import/export %s", name)
+
+ status_dir = utils.PathJoin(constants.IMPORT_EXPORT_DIR, name)
+ pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE))
+
+ if pid:
+ logging.info("Import/export %s is running with PID %s, sending SIGTERM",
+ name, pid)
+ utils.IgnoreProcessNotFound(os.kill, pid, signal.SIGTERM)
+
+
def CleanupImportExport(name):
"""Cleanup after an import or export.
pid = 0
if pid > 0:
return "Reboot scheduled in 5 seconds"
+ # ensure the child is running on ram
+ try:
+ utils.Mlockall()
+ except Exception: # pylint: disable-msg=W0703
+ pass
time.sleep(5)
hyper.PowercycleNode()