from ganeti import ssconf
-def _GetSshRunner():
- return ssh.SshRunner()
+def _GetConfig():
+ return ssconf.SimpleConfigReader()
-def _CleanDirectory(path):
+def _GetSshRunner(cluster_name):
+ return ssh.SshRunner(cluster_name)
+
+
+def _CleanDirectory(path, exclude=[]):
+ """Removes all regular files in a directory.
+
+ @param exclude: List of files to be excluded.
+ @type exclude: list
+
+ """
if not os.path.isdir(path):
return
+
+ # Normalize excluded paths
+ exclude = [os.path.normpath(i) for i in exclude]
+
for rel_name in utils.ListVisibleFiles(path):
- full_name = os.path.join(path, rel_name)
+ full_name = os.path.normpath(os.path.join(path, rel_name))
+ if full_name in exclude:
+ continue
if os.path.isfile(full_name) and not os.path.islink(full_name):
utils.RemoveFile(full_name)
-def _GetMasterInfo():
- """Return the master ip and netdev.
+def JobQueuePurge():
+ """Removes job queue files and archived jobs
+
+ """
+ _CleanDirectory(constants.QUEUE_DIR, exclude=[constants.JOB_QUEUE_LOCK_FILE])
+ _CleanDirectory(constants.JOB_QUEUE_ARCHIVE_DIR)
+
+
+def GetMasterInfo():
+ """Returns master information.
+
+ This is an utility function to compute master information, either
+ for consumption here or from the node daemon.
+
+ @rtype: tuple
+ @return: (master_netdev, master_ip, master_name)
"""
try:
- ss = ssconf.SimpleStore()
- master_netdev = ss.GetMasterNetdev()
- master_ip = ss.GetMasterIP()
+ cfg = _GetConfig()
+ master_netdev = cfg.GetMasterNetdev()
+ master_ip = cfg.GetMasterIP()
+ master_node = cfg.GetMasterNode()
except errors.ConfigurationError, err:
logging.exception("Cluster configuration incomplete")
return (None, None)
- return (master_netdev, master_ip)
+ return (master_netdev, master_ip, master_node)
def StartMaster(start_daemons):
"""
ok = True
- master_netdev, master_ip = _GetMasterInfo()
+ master_netdev, master_ip, _ = GetMasterInfo()
if not master_netdev:
return False
if utils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
- if utils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT,
- source=constants.LOCALHOST_IP_ADDRESS):
+ if utils.OwnIpAddress(master_ip):
# we already have the ip:
logging.debug("Already started")
else:
stop the master daemons (ganet-masterd and ganeti-rapi).
"""
- master_netdev, master_ip = _GetMasterInfo()
+ master_netdev, master_ip, _ = GetMasterInfo()
if not master_netdev:
return False
"""
_CleanDirectory(constants.DATA_DIR)
-
JobQueuePurge()
try:
raise errors.QuitGanetiException(False, 'Shutdown scheduled')
-def GetNodeInfo(vgname):
+def GetNodeInfo(vgname, hypervisor_type):
"""Gives back a hash with different informations about the node.
- Returns:
- { 'vg_size' : xxx, 'vg_free' : xxx, 'memory_domain0': xxx,
- 'memory_free' : xxx, 'memory_total' : xxx }
- where
- vg_size is the size of the configured volume group in MiB
- vg_free is the free size of the volume group in MiB
- memory_dom0 is the memory allocated for domain0 in MiB
- memory_free is the currently available (free) ram in MiB
- memory_total is the total number of ram in MiB
+ @type vgname: C{string}
+ @param vgname: the name of the volume group to ask for disk space information
+ @type hypervisor_type: C{str}
+ @param hypervisor_type: the name of the hypervisor to ask for
+ memory information
+ @rtype: C{dict}
+ @return: dictionary with the following keys:
+ - vg_size is the size of the configured volume group in MiB
+ - vg_free is the free size of the volume group in MiB
+ - memory_dom0 is the memory allocated for domain0 in MiB
+ - memory_free is the currently available (free) ram in MiB
+ - memory_total is the total number of ram in MiB
"""
outputarray = {}
outputarray['vg_size'] = vginfo['vg_size']
outputarray['vg_free'] = vginfo['vg_free']
- hyper = hypervisor.GetHypervisor()
+ hyper = hypervisor.GetHypervisor(hypervisor_type)
hyp_info = hyper.GetNodeInfo()
if hyp_info is not None:
outputarray.update(hyp_info)
return outputarray
-def VerifyNode(what):
+def VerifyNode(what, cluster_name):
"""Verify the status of the local node.
- Args:
- what - a dictionary of things to check:
- 'filelist' : list of files for which to compute checksums
- 'nodelist' : list of nodes we should check communication with
- 'hypervisor': run the hypervisor-specific verify
+ Based on the input L{what} parameter, various checks are done on the
+ local node.
+
+ If the I{filelist} key is present, this list of
+ files is checksummed and the file/checksum pairs are returned.
+
+ If the I{nodelist} key is present, we check that we have
+ connectivity via ssh with the target nodes (and check the hostname
+ report).
- Requested files on local node are checksummed and the result returned.
+ If the I{node-net-test} key is present, we check that we have
+ connectivity to the given nodes via both primary IP and, if
+ applicable, secondary IPs.
- The nodelist is traversed, with the following checks being made
- for each node:
- - known_hosts key correct
- - correct resolving of node name (target node returns its own hostname
- by ssh-execution of 'hostname', result compared against name in list.
+ @type what: C{dict}
+ @param what: a dictionary of things to check:
+ - filelist: list of files for which to compute checksums
+ - nodelist: list of nodes we should check ssh communication with
+ - node-net-test: list of nodes we should check node daemon port
+ connectivity with
+ - hypervisor: list with hypervisors to run the verify for
"""
result = {}
if 'hypervisor' in what:
- result['hypervisor'] = hypervisor.GetHypervisor().Verify()
+ result['hypervisor'] = my_dict = {}
+ for hv_name in what['hypervisor']:
+ my_dict[hv_name] = hypervisor.GetHypervisor(hv_name).Verify()
if 'filelist' in what:
result['filelist'] = utils.FingerprintFiles(what['filelist'])
result['nodelist'] = {}
random.shuffle(what['nodelist'])
for node in what['nodelist']:
- success, message = _GetSshRunner().VerifyNodeHostname(node)
+ success, message = _GetSshRunner(cluster_name).VerifyNodeHostname(node)
if not success:
result['nodelist'][node] = message
if 'node-net-test' in what:
" primary/secondary IP"
" in the node list")
else:
- port = ssconf.SimpleStore().GetNodeDaemonPort()
+ port = utils.GetNodeDaemonPort()
for name, pip, sip in what['node-net-test']:
fail = []
if not utils.TcpPing(pip, port, source=my_pip):
return True
-def GetInstanceList():
+def GetInstanceList(hypervisor_list):
"""Provides a list of instances.
- Returns:
- A list of all running instances on the current node
- - instance1.example.com
- - instance2.example.com
+ @type hypervisor_list: list
+ @param hypervisor_list: the list of hypervisors to query information
+
+ @rtype: list
+ @return: a list of all running instances on the current node
+ - instance1.example.com
+ - instance2.example.com
"""
- try:
- names = hypervisor.GetHypervisor().ListInstances()
- except errors.HypervisorError, err:
- logging.exception("Error enumerating instances")
- raise
+ results = []
+ for hname in hypervisor_list:
+ try:
+ names = hypervisor.GetHypervisor(hname).ListInstances()
+ results.extend(names)
+ except errors.HypervisorError, err:
+ logging.exception("Error enumerating instances for hypevisor %s", hname)
+ # FIXME: should we somehow not propagate this to the master?
+ raise
- return names
+ return results
-def GetInstanceInfo(instance):
+def GetInstanceInfo(instance, hname):
"""Gives back the informations about an instance as a dictionary.
- Args:
- instance: name of the instance (ex. instance1.example.com)
+ @type instance: string
+ @param instance: the instance name
+ @type hname: string
+ @param hname: the hypervisor type of the instance
- Returns:
- { 'memory' : 511, 'state' : '-b---', 'time' : 3188.8, }
- where
- memory: memory size of instance (int)
- state: xen state of instance (string)
- time: cpu time of instance (float)
+ @rtype: dict
+ @return: dictionary with the following keys:
+ - memory: memory size of instance (int)
+ - state: xen state of instance (string)
+ - time: cpu time of instance (float)
"""
output = {}
- iinfo = hypervisor.GetHypervisor().GetInstanceInfo(instance)
+ iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance)
if iinfo is not None:
output['memory'] = iinfo[2]
output['state'] = iinfo[4]
return output
-def GetAllInstancesInfo():
+def GetAllInstancesInfo(hypervisor_list):
"""Gather data about all instances.
This is the equivalent of `GetInstanceInfo()`, except that it
computes data for all instances at once, thus being faster if one
needs data about more than one instance.
- Returns: a dictionary of dictionaries, keys being the instance name,
- and with values:
- { 'memory' : 511, 'state' : '-b---', 'time' : 3188.8, }
- where
- memory: memory size of instance (int)
- state: xen state of instance (string)
- time: cpu time of instance (float)
- vcpus: the number of cpus
+ @type hypervisor_list: list
+ @param hypervisor_list: list of hypervisors to query for instance data
+
+ @rtype: dict of dicts
+ @return: dictionary of instance: data, with data having the following keys:
+ - memory: memory size of instance (int)
+ - state: xen state of instance (string)
+ - time: cpu time of instance (float)
+ - vcpuus: the number of vcpus
"""
output = {}
- iinfo = hypervisor.GetHypervisor().GetAllInstancesInfo()
- if iinfo:
- for name, inst_id, memory, vcpus, state, times in iinfo:
- output[name] = {
- 'memory': memory,
- 'vcpus': vcpus,
- 'state': state,
- 'time': times,
- }
+ for hname in hypervisor_list:
+ iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo()
+ if iinfo:
+ for name, inst_id, memory, vcpus, state, times in iinfo:
+ value = {
+ 'memory': memory,
+ 'vcpus': vcpus,
+ 'state': state,
+ 'time': times,
+ }
+ if name in output and output[name] != value:
+ raise errors.HypervisorError("Instance %s running duplicate"
+ " with different parameters" % name)
+ output[name] = value
return output
-def AddOSToInstance(instance, os_disk, swap_disk):
+def AddOSToInstance(instance):
"""Add an OS to an instance.
- Args:
- instance: the instance object
- os_disk: the instance-visible name of the os device
- swap_disk: the instance-visible name of the swap device
+ @type instance: L{objects.Instance}
+ @param instance: Instance whose OS is to be installed
"""
inst_os = OSFromDisk(instance.os)
create_script = inst_os.create_script
-
- os_device = instance.FindDisk(os_disk)
- if os_device is None:
- logging.error("Can't find this device-visible name '%s'", os_disk)
- return False
-
- swap_device = instance.FindDisk(swap_disk)
- if swap_device is None:
- logging.error("Can't find this device-visible name '%s'", swap_disk)
- return False
-
- real_os_dev = _RecursiveFindBD(os_device)
- if real_os_dev is None:
- raise errors.BlockDeviceError("Block device '%s' is not set up" %
- str(os_device))
- real_os_dev.Open()
-
- real_swap_dev = _RecursiveFindBD(swap_device)
- if real_swap_dev is None:
- raise errors.BlockDeviceError("Block device '%s' is not set up" %
- str(swap_device))
- real_swap_dev.Open()
+ create_env = OSEnvironment(instance)
logfile = "%s/add-%s-%s-%d.log" % (constants.LOG_OS_DIR, instance.os,
instance.name, int(time.time()))
if not os.path.exists(constants.LOG_OS_DIR):
os.mkdir(constants.LOG_OS_DIR, 0750)
- command = utils.BuildShellCmd("cd %s && %s -i %s -b %s -s %s &>%s",
- inst_os.path, create_script, instance.name,
- real_os_dev.dev_path, real_swap_dev.dev_path,
- logfile)
+ command = utils.BuildShellCmd("cd %s && %s &>%s",
+ inst_os.path, create_script, logfile)
- result = utils.RunCmd(command)
+ result = utils.RunCmd(command, env=create_env)
if result.failed:
logging.error("os create command '%s' returned error: %s, logfile: %s,"
" output: %s", command, result.fail_reason, logfile,
return True
-def RunRenameInstance(instance, old_name, os_disk, swap_disk):
+def RunRenameInstance(instance, old_name):
"""Run the OS rename script for an instance.
- Args:
- instance: the instance object
- old_name: the old name of the instance
- os_disk: the instance-visible name of the os device
- swap_disk: the instance-visible name of the swap device
+ @type instance: objects.Instance
+ @param instance: Instance whose OS is to be installed
+ @type old_name: string
+ @param old_name: previous instance name
"""
inst_os = OSFromDisk(instance.os)
script = inst_os.rename_script
-
- os_device = instance.FindDisk(os_disk)
- if os_device is None:
- logging.error("Can't find this device-visible name '%s'", os_disk)
- return False
-
- swap_device = instance.FindDisk(swap_disk)
- if swap_device is None:
- logging.error("Can't find this device-visible name '%s'", swap_disk)
- return False
-
- real_os_dev = _RecursiveFindBD(os_device)
- if real_os_dev is None:
- raise errors.BlockDeviceError("Block device '%s' is not set up" %
- str(os_device))
- real_os_dev.Open()
-
- real_swap_dev = _RecursiveFindBD(swap_device)
- if real_swap_dev is None:
- raise errors.BlockDeviceError("Block device '%s' is not set up" %
- str(swap_device))
- real_swap_dev.Open()
+ rename_env = OSEnvironment(instance)
+ rename_env['OLD_INSTANCE_NAME'] = old_name
logfile = "%s/rename-%s-%s-%s-%d.log" % (constants.LOG_OS_DIR, instance.os,
old_name,
if not os.path.exists(constants.LOG_OS_DIR):
os.mkdir(constants.LOG_OS_DIR, 0750)
- command = utils.BuildShellCmd("cd %s && %s -o %s -n %s -b %s -s %s &>%s",
- inst_os.path, script, old_name, instance.name,
- real_os_dev.dev_path, real_swap_dev.dev_path,
- logfile)
+ command = utils.BuildShellCmd("cd %s && %s &>%s",
+ inst_os.path, script, logfile)
- result = utils.RunCmd(command)
+ result = utils.RunCmd(command, env=rename_env)
if result.failed:
logging.error("os create command '%s' returned error: %s output: %s",
def StartInstance(instance, extra_args):
"""Start an instance.
- Args:
- instance - name of instance to start.
+ @type instance: instance object
+ @param instance: the instance object
+ @rtype: boolean
+ @return: whether the startup was successful or not
"""
- running_instances = GetInstanceList()
+ running_instances = GetInstanceList([instance.hypervisor])
if instance.name in running_instances:
return True
block_devices = _GatherBlockDevs(instance)
- hyper = hypervisor.GetHypervisor()
+ hyper = hypervisor.GetHypervisor(instance.hypervisor)
try:
hyper.StartInstance(instance, block_devices, extra_args)
def ShutdownInstance(instance):
"""Shut an instance down.
- Args:
- instance - name of instance to shutdown.
+ @type instance: instance object
+ @param instance: the instance object
+ @rtype: boolean
+ @return: whether the startup was successful or not
"""
- running_instances = GetInstanceList()
+ hv_name = instance.hypervisor
+ running_instances = GetInstanceList([hv_name])
if instance.name not in running_instances:
return True
- hyper = hypervisor.GetHypervisor()
+ hyper = hypervisor.GetHypervisor(hv_name)
try:
hyper.StopInstance(instance)
except errors.HypervisorError, err:
time.sleep(1)
for dummy in range(11):
- if instance.name not in GetInstanceList():
+ if instance.name not in GetInstanceList([hv_name]):
break
time.sleep(10)
else:
return False
time.sleep(1)
- if instance.name in GetInstanceList():
+ if instance.name in GetInstanceList([hv_name]):
logging.error("could not shutdown instance '%s' even by destroy",
instance.name)
return False
reboot_type - how to reboot [soft,hard,full]
"""
- running_instances = GetInstanceList()
+ running_instances = GetInstanceList([instance.hypervisor])
if instance.name not in running_instances:
logging.error("Cannot reboot instance that is not running")
return False
- hyper = hypervisor.GetHypervisor()
+ hyper = hypervisor.GetHypervisor(instance.hypervisor)
if reboot_type == constants.INSTANCE_REBOOT_SOFT:
try:
hyper.RebootInstance(instance)
else:
raise errors.ParameterError("reboot_type invalid")
-
return True
def MigrateInstance(instance, target, live):
"""Migrates an instance to another node.
+ @type instance: C{objects.Instance}
+ @param instance: the instance definition
+ @type target: string
+ @param target: the target node name
+ @type live: boolean
+ @param live: whether the migration should be done live or not (the
+ interpretation of this parameter is left to the hypervisor)
+ @rtype: tuple
+ @return: a tuple of (success, msg) where:
+ - succes is a boolean denoting the success/failure of the operation
+ - msg is a string with details in case of failure
+
"""
- hyper = hypervisor.GetHypervisor()
+ hyper = hypervisor.GetHypervisor(instance.hypervisor_name)
try:
- hyper.MigrateInstance(instance, target, live)
+ hyper.MigrateInstance(instance.name, target, live)
except errors.HypervisorError, err:
msg = "Failed to migrate instance: %s" % str(err)
logging.error(msg)
return rbd
return (rbd.dev_path, rbd.major, rbd.minor) + rbd.GetSyncStatus()
-def _IsJobQueueFile(file_name):
- queue_dir = os.path.normpath(constants.QUEUE_DIR)
- return os.path.commonprefix([queue_dir, file_name]) == queue_dir
def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
"""Write a file to the filesystem.
constants.SSH_KNOWN_HOSTS_FILE,
constants.VNC_PASSWORD_FILE,
]
- allowed_files.extend(ssconf.SimpleStore().GetFileList())
- if not (file_name in allowed_files or _IsJobQueueFile(file_name)):
+ if file_name not in allowed_files:
logging.error("Filename passed to UploadFile not in allowed"
" upload targets: '%s'", file_name)
return False
try:
f = open(api_file)
try:
- api_version = f.read(256)
+ api_versions = f.readlines()
finally:
f.close()
except EnvironmentError, err:
raise errors.InvalidOS(name, os_dir, "error while reading the"
" API version (%s)" % _ErrnoOrStr(err))
- api_version = api_version.strip()
+ api_versions = [version.strip() for version in api_versions]
try:
- api_version = int(api_version)
+ api_versions = [int(version) for version in api_versions]
except (TypeError, ValueError), err:
raise errors.InvalidOS(name, os_dir,
"API version is not integer (%s)" % str(err))
- return api_version
+ return api_versions
def DiagnoseOS(top_dirs=None):
`errors.InvalidOS` exception, detailing why this is not a valid
OS.
- Args:
- os_dir: Directory containing the OS scripts. Defaults to a search
- in all the OS_SEARCH_PATH directories.
+ @type base_dir: string
+ @keyword base_dir: Base directory containing OS installations.
+ Defaults to a search in all the OS_SEARCH_PATH dirs.
"""
else:
os_dir = os.path.sep.join([base_dir, name])
- api_version = _OSOndiskVersion(name, os_dir)
+ api_versions = _OSOndiskVersion(name, os_dir)
- if api_version != constants.OS_API_VERSION:
+ if constants.OS_API_VERSION not in api_versions:
raise errors.InvalidOS(name, os_dir, "API version mismatch"
" (found %s want %s)"
- % (api_version, constants.OS_API_VERSION))
+ % (api_versions, constants.OS_API_VERSION))
# OS Scripts dictionary, we will populate it with the actual script names
- os_scripts = {'create': '', 'export': '', 'import': '', 'rename': ''}
+ os_scripts = dict.fromkeys(constants.OS_SCRIPTS)
for script in os_scripts:
os_scripts[script] = os.path.sep.join([os_dir, script])
return objects.OS(name=name, path=os_dir, status=constants.OS_VALID_STATUS,
- create_script=os_scripts['create'],
- export_script=os_scripts['export'],
- import_script=os_scripts['import'],
- rename_script=os_scripts['rename'],
- api_version=api_version)
+ create_script=os_scripts[constants.OS_SCRIPT_CREATE],
+ export_script=os_scripts[constants.OS_SCRIPT_EXPORT],
+ import_script=os_scripts[constants.OS_SCRIPT_IMPORT],
+ rename_script=os_scripts[constants.OS_SCRIPT_RENAME],
+ api_versions=api_versions)
+
+def OSEnvironment(instance, debug=0):
+ """Calculate the environment for an os script.
+
+ @type instance: instance object
+ @param instance: target instance for the os script run
+ @type debug: integer
+ @param debug: debug level (0 or 1, for os api 10)
+ @rtype: dict
+ @return: dict of environment variables
+ """
+ result = {}
+ result['OS_API_VERSION'] = '%d' % constants.OS_API_VERSION
+ result['INSTANCE_NAME'] = instance.name
+ result['HYPERVISOR'] = instance.hypervisor
+ result['DISK_COUNT'] = '%d' % len(instance.disks)
+ result['NIC_COUNT'] = '%d' % len(instance.nics)
+ result['DEBUG_LEVEL'] = '%d' % debug
+ for idx, disk in enumerate(instance.disks):
+ real_disk = _RecursiveFindBD(disk)
+ if real_disk is None:
+ raise errors.BlockDeviceError("Block device '%s' is not set up" %
+ str(disk))
+ real_disk.Open()
+ result['DISK_%d_PATH' % idx] = real_disk.dev_path
+ # FIXME: When disks will have read-only mode, populate this
+ result['DISK_%d_ACCESS' % idx] = 'W'
+ if constants.HV_DISK_TYPE in instance.hvparams:
+ result['DISK_%d_FRONTEND_TYPE' % idx] = \
+ instance.hvparams[constants.HV_DISK_TYPE]
+ if disk.dev_type in constants.LDS_BLOCK:
+ result['DISK_%d_BACKEND_TYPE' % idx] = 'block'
+ elif disk.dev_type == constants.LD_FILE:
+ result['DISK_%d_BACKEND_TYPE' % idx] = \
+ 'file:%s' % disk.physical_id[0]
+ for idx, nic in enumerate(instance.nics):
+ result['NIC_%d_MAC' % idx] = nic.mac
+ if nic.ip:
+ result['NIC_%d_IP' % idx] = nic.ip
+ result['NIC_%d_BRIDGE' % idx] = nic.bridge
+ if constants.HV_NIC_TYPE in instance.hvparams:
+ result['NIC_%d_FRONTEND_TYPE' % idx] = \
+ instance.hvparams[constants.HV_NIC_TYPE]
+
+ return result
def GrowBlockDevice(disk, amount):
"""Grow a stack of block devices.
This function is called recursively, and the snapshot is actually created
just for the leaf lvm backend device.
- Args:
- disk: the disk to be snapshotted
-
- Returns:
- a config entry for the actual lvm device snapshotted.
+ @type disk: L{objects.Disk}
+ @param disk: the disk to be snapshotted
+ @rtype: string
+ @return: snapshot disk path
"""
if disk.children:
(disk.unique_id, disk.dev_type))
-def ExportSnapshot(disk, dest_node, instance):
+def ExportSnapshot(disk, dest_node, instance, cluster_name):
"""Export a block device snapshot to a remote node.
Args:
True if successful, False otherwise.
"""
+ # TODO(ultrotter): Import/Export still to be converted to OS API 10
+ logging.error("Import/Export still to be converted to OS API 10")
+ return False
+
inst_os = OSFromDisk(instance.os)
export_script = inst_os.export_script
destcmd = utils.BuildShellCmd("mkdir -p %s && cat > %s/%s",
destdir, destdir, destfile)
- remotecmd = _GetSshRunner().BuildCmd(dest_node, constants.GANETI_RUNAS,
- destcmd)
+ remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node,
+ constants.GANETI_RUNAS,
+ destcmd)
# all commands have been checked, so we're safe to combine them
command = '|'.join([expcmd, comprcmd, utils.ShellQuoteArgs(remotecmd)])
config.add_section(constants.INISECT_INS)
config.set(constants.INISECT_INS, 'name', instance.name)
- config.set(constants.INISECT_INS, 'memory', '%d' % instance.memory)
- config.set(constants.INISECT_INS, 'vcpus', '%d' % instance.vcpus)
+ config.set(constants.INISECT_INS, 'memory', '%d' %
+ instance.beparams[constants.BE_MEMORY])
+ config.set(constants.INISECT_INS, 'vcpus', '%d' %
+ instance.beparams[constants.BE_VCPUS])
config.set(constants.INISECT_INS, 'disk_template', instance.disk_template)
nic_count = 0
disk_count = 0
for disk_count, disk in enumerate(snap_disks):
- config.set(constants.INISECT_INS, 'disk%d_ivname' % disk_count,
- ('%s' % disk.iv_name))
- config.set(constants.INISECT_INS, 'disk%d_dump' % disk_count,
- ('%s' % disk.physical_id[1]))
- config.set(constants.INISECT_INS, 'disk%d_size' % disk_count,
- ('%d' % disk.size))
+ if disk:
+ config.set(constants.INISECT_INS, 'disk%d_ivname' % disk_count,
+ ('%s' % disk.iv_name))
+ config.set(constants.INISECT_INS, 'disk%d_dump' % disk_count,
+ ('%s' % disk.physical_id[1]))
+ config.set(constants.INISECT_INS, 'disk%d_size' % disk_count,
+ ('%d' % disk.size))
config.set(constants.INISECT_INS, 'disk_count' , '%d' % disk_count)
cff = os.path.join(destdir, constants.EXPORT_CONF_FILE)
return config
-def ImportOSIntoInstance(instance, os_disk, swap_disk, src_node, src_image):
+def ImportOSIntoInstance(instance, os_disk, swap_disk, src_node, src_image,
+ cluster_name):
"""Import an os image into an instance.
Args:
False in case of error, True otherwise.
"""
+ # TODO(ultrotter): Import/Export still to be converted to OS API 10
+ logging.error("Import/Export still to be converted to OS API 10")
+ return False
+
inst_os = OSFromDisk(instance.os)
import_script = inst_os.import_script
os.mkdir(constants.LOG_OS_DIR, 0750)
destcmd = utils.BuildShellCmd('cat %s', src_image)
- remotecmd = _GetSshRunner().BuildCmd(src_node, constants.GANETI_RUNAS,
- destcmd)
+ remotecmd = _GetSshRunner(cluster_name).BuildCmd(src_node,
+ constants.GANETI_RUNAS,
+ destcmd)
comprcmd = "gunzip"
impcmd = utils.BuildShellCmd("(cd %s; %s -i %s -b %s -s %s &>%s)",
logfile)
command = '|'.join([utils.ShellQuoteArgs(remotecmd), comprcmd, impcmd])
+ env = {'HYPERVISOR': instance.hypervisor}
- result = utils.RunCmd(command)
+ result = utils.RunCmd(command, env=env)
if result.failed:
logging.error("os import command '%s' returned error: %s"
normalized file_storage_dir (string) if valid, None otherwise
"""
+ cfg = _GetConfig()
file_storage_dir = os.path.normpath(file_storage_dir)
- base_file_storage_dir = ssconf.SimpleStore().GetFileStorageDir()
+ base_file_storage_dir = cfg.GetFileStorageDir()
if (not os.path.commonprefix([file_storage_dir, base_file_storage_dir]) ==
base_file_storage_dir):
logging.error("file storage directory '%s' is not under base file"
return result
-def JobQueuePurge():
- """Removes job queue files and archived jobs
+def _IsJobQueueFile(file_name):
+ """Checks whether the given filename is in the queue directory.
"""
- _CleanDirectory(constants.QUEUE_DIR)
- _CleanDirectory(constants.JOB_QUEUE_ARCHIVE_DIR)
+ queue_dir = os.path.normpath(constants.QUEUE_DIR)
+ result = (os.path.commonprefix([queue_dir, file_name]) == queue_dir)
+
+ if not result:
+ logging.error("'%s' is not a file in the queue directory",
+ file_name)
+
+ return result
+
+
+def JobQueueUpdate(file_name, content):
+ """Updates a file in the queue directory.
+
+ """
+ if not _IsJobQueueFile(file_name):
+ return False
+
+ # Write and replace the file atomically
+ utils.WriteFile(file_name, data=content)
+
+ return True
+
+
+def JobQueueRename(old, new):
+ """Renames a job queue file.
+
+ """
+ if not (_IsJobQueueFile(old) and _IsJobQueueFile(new)):
+ return False
+
+ os.rename(old, new)
+
+ return True
+
+
+def JobQueueSetDrainFlag(drain_flag):
+ """Set the drain flag for the queue.
+
+ This will set or unset the queue drain flag.
+
+ @type drain_flag: bool
+ @param drain_flag: if True, will set the drain flag, otherwise reset it.
+
+ """
+ if drain_flag:
+ utils.WriteFile(constants.JOB_QUEUE_DRAIN_FILE, data="", close=True)
+ else:
+ utils.RemoveFile(constants.JOB_QUEUE_DRAIN_FILE)
+
+ return True
def CloseBlockDevices(disks):
return (True, "All devices secondary")
+def ValidateHVParams(hvname, hvparams):
+ """Validates the given hypervisor parameters.
+
+ @type hvname: string
+ @param hvname: the hypervisor name
+ @type hvparams: dict
+ @param hvparams: the hypervisor parameters to be validated
+ @rtype: tuple (bool, str)
+ @return: tuple of (success, message)
+
+ """
+ try:
+ hv_type = hypervisor.GetHypervisor(hvname)
+ hv_type.ValidateParameters(hvparams)
+ return (True, "Validation passed")
+ except errors.HypervisorError, err:
+ return (False, str(err))
+
+
class HooksRunner(object):
"""Hook runner.