if constants.NV_DRBDLIST in what:
try:
used_minors = bdev.DRBD8.GetUsedDevs().keys()
- except errors.BlockDeviceError:
+ except errors.BlockDeviceError, err:
logging.warning("Can't get used minors list", exc_info=True)
- used_minors = []
+ used_minors = str(err)
result[constants.NV_DRBDLIST] = used_minors
return result
results.extend(names)
except errors.HypervisorError, err:
logging.exception("Error enumerating instances for hypevisor %s", hname)
- # FIXME: should we somehow not propagate this to the master?
raise
return results
'state': state,
'time': times,
}
- if name in output and output[name] != value:
- raise errors.HypervisorError("Instance %s running duplicate"
- " with different parameters" % name)
+ if name in output:
+ # we only check static parameters, like memory and vcpus,
+ # and not state and time which can change between the
+ # invocations of the different hypervisors
+ for key in 'memory', 'vcpus':
+ if value[key] != output[name][key]:
+ raise errors.HypervisorError("Instance %s is running twice"
+ " with different parameters" % name)
output[name] = value
return output
-def InstanceOsAdd(instance):
+def InstanceOsAdd(instance, reinstall):
"""Add an OS to an instance.
@type instance: L{objects.Instance}
@param instance: Instance whose OS is to be installed
+ @type reinstall: boolean
+ @param reinstall: whether this is an instance reinstall
@rtype: boolean
@return: the success of the operation
(os_name, os_dir, os_err))
create_env = OSEnvironment(instance)
+ if reinstall:
+ create_env['INSTANCE_REINSTALL'] = "1"
logfile = "%s/add-%s-%s-%d.log" % (constants.LOG_OS_DIR, instance.os,
instance.name, int(time.time()))
return block_devices
-def StartInstance(instance, extra_args):
+def StartInstance(instance):
"""Start an instance.
@type instance: L{objects.Instance}
try:
block_devices = _GatherAndLinkBlockDevs(instance)
hyper = hypervisor.GetHypervisor(instance.hypervisor)
- hyper.StartInstance(instance, block_devices, extra_args)
+ hyper.StartInstance(instance, block_devices)
except errors.BlockDeviceError, err:
logging.exception("Failed to start instance")
return (False, "Block device error: %s" % str(err))
return (True, "Instance started successfully")
-def ShutdownInstance(instance):
+def InstanceShutdown(instance):
"""Shut an instance down.
@note: this functions uses polling with a hardcoded timeout.
running_instances = GetInstanceList([hv_name])
if instance.name not in running_instances:
- return True
+ return (True, "Instance already stopped")
hyper = hypervisor.GetHypervisor(hv_name)
try:
hyper.StopInstance(instance)
except errors.HypervisorError, err:
- logging.error("Failed to stop instance: %s" % err)
- return False
+ msg = "Failed to stop instance %s: %s" % (instance.name, err)
+ logging.error(msg)
+ return (False, msg)
# test every 10secs for 2min
try:
hyper.StopInstance(instance, force=True)
except errors.HypervisorError, err:
- logging.exception("Failed to stop instance: %s" % err)
- return False
+ msg = "Failed to force stop instance %s: %s" % (instance.name, err)
+ logging.error(msg)
+ return (False, msg)
time.sleep(1)
if instance.name in GetInstanceList([hv_name]):
- logging.error("Could not shutdown instance '%s' even by destroy",
- instance.name)
- return False
+ msg = ("Could not shutdown instance %s even by destroy" %
+ instance.name)
+ logging.error(msg)
+ return (False, msg)
_RemoveBlockDevLinks(instance.name, instance.disks)
- return True
+ return (True, "Instance has been shutdown successfully")
-def RebootInstance(instance, reboot_type, extra_args):
+def InstanceReboot(instance, reboot_type):
"""Reboot an instance.
@type instance: L{objects.Instance}
running_instances = GetInstanceList([instance.hypervisor])
if instance.name not in running_instances:
- logging.error("Cannot reboot instance that is not running")
- return False
+ msg = "Cannot reboot instance %s that is not running" % instance.name
+ logging.error(msg)
+ return (False, msg)
hyper = hypervisor.GetHypervisor(instance.hypervisor)
if reboot_type == constants.INSTANCE_REBOOT_SOFT:
try:
hyper.RebootInstance(instance)
except errors.HypervisorError, err:
- logging.exception("Failed to soft reboot instance")
- return False
+ msg = "Failed to soft reboot instance %s: %s" % (instance.name, err)
+ logging.error(msg)
+ return (False, msg)
elif reboot_type == constants.INSTANCE_REBOOT_HARD:
try:
- ShutdownInstance(instance)
- StartInstance(instance, extra_args)
+ stop_result = InstanceShutdown(instance)
+ if not stop_result[0]:
+ return stop_result
+ return StartInstance(instance)
except errors.HypervisorError, err:
- logging.exception("Failed to hard reboot instance")
- return False
+ msg = "Failed to hard reboot instance %s: %s" % (instance.name, err)
+ logging.error(msg)
+ return (False, msg)
else:
- raise errors.ParameterError("reboot_type invalid")
+ return (False, "Invalid reboot_type received: %s" % (reboot_type,))
- return True
+ return (True, "Reboot successful")
def MigrationInfo(instance):
try:
crdev.Open()
except errors.BlockDeviceError, err:
- errmsg = "Can't make child '%s' read-write: %s" (child, err)
+ errmsg = "Can't make child '%s' read-write: %s" % (child, err)
logging.error(errmsg)
return False, errmsg
clist.append(crdev)
"""
parent_bdev = _RecursiveFindBD(parent_cdev)
if parent_bdev is None:
- logging.error("Can't find parent device")
- return False
+ msg = "Can't find parent device %s" % str(parent_cdev)
+ logging.error("BlockdevAddchildren: %s", msg)
+ return (False, msg)
new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
if new_bdevs.count(None) > 0:
- logging.error("Can't find new device(s) to add: %s:%s",
- new_bdevs, new_cdevs)
- return False
+ msg = "Can't find new device(s) to add: %s:%s" % (new_bdevs, new_cdevs)
+ logging.error(msg)
+ return (False, msg)
parent_bdev.AddChildren(new_bdevs)
- return True
+ return (True, None)
def BlockdevRemovechildren(parent_cdev, new_cdevs):
"""
if not os.path.isabs(file_name):
- logging.error("Filename passed to UploadFile is not absolute: '%s'",
- file_name)
- return False
+ err = "Filename passed to UploadFile is not absolute: '%s'" % file_name
+ logging.error(err)
+ return (False, err)
- allowed_files = [
+ allowed_files = set([
constants.CLUSTER_CONF_FILE,
constants.ETC_HOSTS,
constants.SSH_KNOWN_HOSTS_FILE,
constants.VNC_PASSWORD_FILE,
- ]
+ constants.RAPI_CERT_FILE,
+ constants.RAPI_USERS_FILE,
+ ])
+
+ for hv_name in constants.HYPER_TYPES:
+ hv_class = hypervisor.GetHypervisor(hv_name)
+ allowed_files.update(hv_class.GetAncillaryFiles())
if file_name not in allowed_files:
- logging.error("Filename passed to UploadFile not in allowed"
- " upload targets: '%s'", file_name)
- return False
+ err = "Filename passed to UploadFile not in allowed upload targets: '%s'" \
+ % file_name
+ logging.error(err)
+ return (False, err)
raw_data = _Decompress(data)
utils.WriteFile(file_name, data=raw_data, mode=mode, uid=uid, gid=gid,
atime=atime, mtime=mtime)
- return True
+ return (True, "success")
def WriteSsconfFiles(values):
str(disk))
real_disk.Open()
result['DISK_%d_PATH' % idx] = real_disk.dev_path
- # FIXME: When disks will have read-only mode, populate this
result['DISK_%d_ACCESS' % idx] = disk.mode
if constants.HV_DISK_TYPE in instance.hvparams:
result['DISK_%d_FRONTEND_TYPE' % idx] = \
@return: True if all renames succeeded, False otherwise
"""
+ msgs = []
result = True
for disk, unique_id in devlist:
dev = _RecursiveFindBD(disk)
if dev is None:
+ msgs.append("Can't find device %s in rename" % str(disk))
result = False
continue
try:
# cache? for now, we only lose lvm data when we rename, which
# is less critical than DRBD or MD
except errors.BlockDeviceError, err:
+ msgs.append("Can't rename device '%s' to '%s': %s" %
+ (dev, unique_id, err))
logging.exception("Can't rename device '%s' to '%s'", dev, unique_id)
result = False
- return result
+ return (result, "; ".join(msgs))
def _TransformFileStorageDir(file_storage_dir):
return (not failure, (alldone, min_resync))
+def PowercycleNode(hypervisor_type):
+ """Hard-powercycle the node.
+
+ Because we need to return first, and schedule the powercycle in the
+ background, we won't be able to report failures nicely.
+
+ """
+ hyper = hypervisor.GetHypervisor(hypervisor_type)
+ try:
+ pid = os.fork()
+ except OSError, err:
+ # if we can't fork, we'll pretend that we're in the child process
+ pid = 0
+ if pid > 0:
+ return (True, "Reboot scheduled in 5 seconds")
+ time.sleep(5)
+ hyper.PowercycleNode()
+
+
class HooksRunner(object):
"""Hook runner.