constants.DATA_DIR,
constants.JOB_QUEUE_ARCHIVE_DIR,
constants.QUEUE_DIR,
+ constants.CRYPTO_KEYS_DIR,
])
+_MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60
+_X509_KEY_FILE = "key"
+_X509_CERT_FILE = "cert"
class RPCFail(Exception):
constants.VNC_PASSWORD_FILE,
constants.RAPI_CERT_FILE,
constants.RAPI_USERS_FILE,
- constants.HMAC_CLUSTER_KEY,
+ constants.CONFD_HMAC_KEY,
])
for hv_name in constants.HYPER_TYPES:
"""
_CleanDirectory(constants.DATA_DIR)
+ _CleanDirectory(constants.CRYPTO_KEYS_DIR)
JobQueuePurge()
if modify_ssh_setup:
logging.exception("Error while processing ssh files")
try:
- utils.RemoveFile(constants.HMAC_CLUSTER_KEY)
+ utils.RemoveFile(constants.CONFD_HMAC_KEY)
utils.RemoveFile(constants.RAPI_CERT_FILE)
- utils.RemoveFile(constants.SSL_CERT_FILE)
+ utils.RemoveFile(constants.NODED_CERT_FILE)
except: # pylint: disable-msg=W0702
logging.exception("Error while removing cluster secrets")
if constants.NV_HYPERVISOR in what:
result[constants.NV_HYPERVISOR] = tmp = {}
for hv_name in what[constants.NV_HYPERVISOR]:
- tmp[hv_name] = hypervisor.GetHypervisor(hv_name).Verify()
+ try:
+ val = hypervisor.GetHypervisor(hv_name).Verify()
+ except errors.HypervisorError, err:
+ val = "Error while checking hypervisor: %s" % str(err)
+ tmp[hv_name] = val
if constants.NV_FILELIST in what:
result[constants.NV_FILELIST] = utils.FingerprintFiles(
" and ".join(fail))
if constants.NV_LVLIST in what:
- result[constants.NV_LVLIST] = GetVolumeList(what[constants.NV_LVLIST])
+ try:
+ val = GetVolumeList(what[constants.NV_LVLIST])
+ except RPCFail, err:
+ val = str(err)
+ result[constants.NV_LVLIST] = val
if constants.NV_INSTANCELIST in what:
- result[constants.NV_INSTANCELIST] = GetInstanceList(
- what[constants.NV_INSTANCELIST])
+ # GetInstanceList can fail
+ try:
+ val = GetInstanceList(what[constants.NV_INSTANCELIST])
+ except RPCFail, err:
+ val = str(err)
+ result[constants.NV_INSTANCELIST] = val
if constants.NV_VGLIST in what:
result[constants.NV_VGLIST] = utils.ListVolumeGroups()
result.output)
def parse_dev(dev):
- if '(' in dev:
- return dev.split('(')[0]
- else:
- return dev
+ return dev.split('(')[0]
+
+ def handle_dev(dev):
+ return [parse_dev(x) for x in dev.split(",")]
def map_line(line):
- return {
- 'name': line[0].strip(),
- 'size': line[1].strip(),
- 'dev': parse_dev(line[2].strip()),
- 'vg': line[3].strip(),
- }
+ line = [v.strip() for v in line]
+ return [{'name': line[0], 'size': line[1],
+ 'dev': dev, 'vg': line[3]} for dev in handle_dev(line[2])]
- return [map_line(line.split('|')) for line in result.stdout.splitlines()
- if line.count('|') >= 3]
+ all_devs = []
+ for line in result.stdout.splitlines():
+ if line.count('|') >= 3:
+ all_devs.extend(map_line(line.split('|')))
+ else:
+ logging.warning("Strange line in the output from lvs: '%s'", line)
+ return all_devs
def BridgesExist(bridges_list):
@param instance: the name of the instance being imported/added/etc.
"""
- base = "%s-%s-%s-%d.log" % (kind, os_name, instance, int(time.time()))
+ base = ("%s-%s-%s-%s.log" %
+ (kind, os_name, instance, utils.TimestampForFilename()))
return utils.PathJoin(constants.LOG_OS_DIR, base)
else:
devs.append(bd.dev_path)
else:
+ if not utils.IsNormAbsPath(rpath):
+ _Fail("Strange path returned from StaticDevPath: '%s'", rpath)
devs.append(rpath)
parent_bdev.RemoveChildren(devs)
return bdev.FindDevice(disk.dev_type, disk.physical_id, children, disk.size)
+def _OpenRealBD(disk):
+ """Opens the underlying block device of a disk.
+
+ @type disk: L{objects.Disk}
+ @param disk: the disk object we want to open
+
+ """
+ real_disk = _RecursiveFindBD(disk)
+ if real_disk is None:
+ _Fail("Block device '%s' is not set up", disk)
+
+ real_disk.Open()
+
+ return real_disk
+
+
def BlockdevFind(disk):
"""Check if a device is activated.
@rtype: None
"""
- real_disk = _RecursiveFindBD(disk)
- if real_disk is None:
- _Fail("Block device '%s' is not set up", disk)
-
- real_disk.Open()
+ real_disk = _OpenRealBD(disk)
# the block size on the read dd is 1MiB to match our units
expcmd = utils.BuildShellCmd("set -e; set -o pipefail; "
variant = inst_os.supported_variants[0]
result['OS_VARIANT'] = variant
for idx, disk in enumerate(instance.disks):
- real_disk = _RecursiveFindBD(disk)
- if real_disk is None:
- raise errors.BlockDeviceError("Block device '%s' is not set up" %
- str(disk))
- real_disk.Open()
+ real_disk = _OpenRealBD(disk)
result['DISK_%d_PATH' % idx] = real_disk.dev_path
result['DISK_%d_ACCESS' % idx] = disk.mode
if constants.HV_DISK_TYPE in instance.hvparams:
return result
+
def BlockdevGrow(disk, amount):
"""Grow a stack of block devices.
logfile = _InstanceLogName("export", inst_os.name, instance.name)
if not os.path.exists(constants.LOG_OS_DIR):
os.mkdir(constants.LOG_OS_DIR, 0750)
- real_disk = _RecursiveFindBD(disk)
- if real_disk is None:
- _Fail("Block device '%s' is not set up", disk)
- real_disk.Open()
+ real_disk = _OpenRealBD(disk)
export_env['EXPORT_DEVICE'] = real_disk.dev_path
export_env['EXPORT_INDEX'] = str(idx)
comprcmd = "gzip"
- destcmd = utils.BuildShellCmd("mkdir -p %s && cat > %s/%s",
- destdir, destdir, destfile)
+ destcmd = utils.BuildShellCmd("mkdir -p %s && cat > %s",
+ destdir, utils.PathJoin(destdir, destfile))
remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node,
constants.GANETI_RUNAS,
destcmd)
config.set(constants.INISECT_INS, 'vcpus', '%d' %
instance.beparams[constants.BE_VCPUS])
config.set(constants.INISECT_INS, 'disk_template', instance.disk_template)
+ config.set(constants.INISECT_INS, 'hypervisor', instance.hypervisor)
nic_total = 0
for nic_count, nic in enumerate(instance.nics):
config.set(constants.INISECT_INS, 'nic%d_mac' %
nic_count, '%s' % nic.mac)
config.set(constants.INISECT_INS, 'nic%d_ip' % nic_count, '%s' % nic.ip)
- config.set(constants.INISECT_INS, 'nic%d_bridge' % nic_count,
- '%s' % nic.bridge)
+ for param in constants.NICS_PARAMETER_TYPES:
+ config.set(constants.INISECT_INS, 'nic%d_%s' % (nic_count, param),
+ '%s' % nic.nicparams.get(param, None))
# TODO: redundant: on load can read nics until it doesn't exist
config.set(constants.INISECT_INS, 'nic_count' , '%d' % nic_total)
config.set(constants.INISECT_INS, 'disk_count' , '%d' % disk_total)
+ # New-style hypervisor/backend parameters
+
+ config.add_section(constants.INISECT_HYP)
+ for name, value in instance.hvparams.items():
+ if name not in constants.HVC_GLOBALS:
+ config.set(constants.INISECT_HYP, name, str(value))
+
+ config.add_section(constants.INISECT_BEP)
+ for name, value in instance.beparams.items():
+ config.set(constants.INISECT_BEP, name, str(value))
+
utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE),
data=config.Dumps())
- shutil.rmtree(finaldestdir, True)
+ shutil.rmtree(finaldestdir, ignore_errors=True)
shutil.move(destdir, finaldestdir)
@return: the normalized path if valid, None otherwise
"""
+ if not constants.ENABLE_FILE_STORAGE:
+ _Fail("File storage disabled at configure time")
cfg = _GetConfig()
file_storage_dir = os.path.normpath(file_storage_dir)
base_file_storage_dir = cfg.GetFileStorageDir()
- if (not os.path.commonprefix([file_storage_dir, base_file_storage_dir]) ==
+ if (os.path.commonprefix([file_storage_dir, base_file_storage_dir]) !=
base_file_storage_dir):
_Fail("File storage directory '%s' is not under base file"
" storage directory '%s'", file_storage_dir, base_file_storage_dir)
utils.RemoveFile(constants.CLUSTER_CONF_FILE)
+def _GetX509Filenames(cryptodir, name):
+ """Returns the full paths for the private key and certificate.
+
+ """
+ return (utils.PathJoin(cryptodir, name),
+ utils.PathJoin(cryptodir, name, _X509_KEY_FILE),
+ utils.PathJoin(cryptodir, name, _X509_CERT_FILE))
+
+
+def CreateX509Certificate(validity, cryptodir=constants.CRYPTO_KEYS_DIR):
+ """Creates a new X509 certificate for SSL/TLS.
+
+ @type validity: int
+ @param validity: Validity in seconds
+ @rtype: tuple; (string, string)
+ @return: Certificate name and public part
+
+ """
+ (key_pem, cert_pem) = \
+ utils.GenerateSelfSignedX509Cert(utils.HostInfo.SysName(),
+ min(validity, _MAX_SSL_CERT_VALIDITY))
+
+ cert_dir = tempfile.mkdtemp(dir=cryptodir,
+ prefix="x509-%s-" % utils.TimestampForFilename())
+ try:
+ name = os.path.basename(cert_dir)
+ assert len(name) > 5
+
+ (_, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
+
+ utils.WriteFile(key_file, mode=0400, data=key_pem)
+ utils.WriteFile(cert_file, mode=0400, data=cert_pem)
+
+ # Never return private key as it shouldn't leave the node
+ return (name, cert_pem)
+ except Exception:
+ shutil.rmtree(cert_dir, ignore_errors=True)
+ raise
+
+
+def RemoveX509Certificate(name, cryptodir=constants.CRYPTO_KEYS_DIR):
+ """Removes a X509 certificate.
+
+ @type name: string
+ @param name: Certificate name
+
+ """
+ (cert_dir, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
+
+ utils.RemoveFile(key_file)
+ utils.RemoveFile(cert_file)
+
+ try:
+ os.rmdir(cert_dir)
+ except EnvironmentError, err:
+ _Fail("Cannot remove certificate directory '%s': %s",
+ cert_dir, err)
+
+
def _FindDisks(nodes_ip, disks):
"""Sets the physical ID on disks and returns the block devices.
subdir = "%s-%s.d" % (hpath, suffix)
- dir_name = "%s/%s" % (self._BASE_DIR, subdir)
+ dir_name = utils.PathJoin(self._BASE_DIR, subdir)
results = []
if dev_path.startswith(cls._DEV_PREFIX):
dev_path = dev_path[len(cls._DEV_PREFIX):]
dev_path = dev_path.replace("/", "_")
- fpath = "%s/bdev_%s" % (cls._ROOT_DIR, dev_path)
+ fpath = utils.PathJoin(cls._ROOT_DIR, "bdev_%s" % dev_path)
return fpath
@classmethod