+def BlockdevRename(devlist):
+ """Rename a list of block devices.
+
+ @type devlist: list of tuples
+ @param devlist: list of tuples of the form (disk,
+ new_logical_id, new_physical_id); disk is an
+ L{objects.Disk} object describing the current disk,
+ and new logical_id/physical_id is the name we
+ rename it to
+ @rtype: boolean
+ @return: True if all renames succeeded, False otherwise
+
+ """
+ result = True
+ for disk, unique_id in devlist:
+ dev = _RecursiveFindBD(disk)
+ if dev is None:
+ result = False
+ continue
+ try:
+ old_rpath = dev.dev_path
+ dev.Rename(unique_id)
+ new_rpath = dev.dev_path
+ if old_rpath != new_rpath:
+ DevCacheManager.RemoveCache(old_rpath)
+ # FIXME: we should add the new cache information here, like:
+ # DevCacheManager.UpdateCache(new_rpath, owner, ...)
+ # but we don't have the owner here - maybe parse from existing
+ # cache? for now, we only lose lvm data when we rename, which
+ # is less critical than DRBD or MD
+ except errors.BlockDeviceError:
+ logging.exception("Can't rename device '%s' to '%s'", dev, unique_id)
+ result = False
+ return result
+
+
+def _TransformFileStorageDir(file_storage_dir):
+ """Checks whether given file_storage_dir is valid.
+
+ Checks wheter the given file_storage_dir is within the cluster-wide
+ default file_storage_dir stored in SimpleStore. Only paths under that
+ directory are allowed.
+
+ @type file_storage_dir: str
+ @param file_storage_dir: the path to check
+
+ @return: the normalized path if valid, None otherwise
+
+ """
+ cfg = _GetConfig()
+ file_storage_dir = os.path.normpath(file_storage_dir)
+ base_file_storage_dir = cfg.GetFileStorageDir()
+ if (not os.path.commonprefix([file_storage_dir, base_file_storage_dir]) ==
+ base_file_storage_dir):
+ logging.error("file storage directory '%s' is not under base file"
+ " storage directory '%s'",
+ file_storage_dir, base_file_storage_dir)
+ return None
+ return file_storage_dir
+
+
+def CreateFileStorageDir(file_storage_dir):
+ """Create file storage directory.
+
+ @type file_storage_dir: str
+ @param file_storage_dir: directory to create
+
+ @rtype: tuple
+ @return: tuple with first element a boolean indicating wheter dir
+ creation was successful or not
+
+ """
+ file_storage_dir = _TransformFileStorageDir(file_storage_dir)
+ result = True,
+ if not file_storage_dir:
+ result = False,
+ else:
+ if os.path.exists(file_storage_dir):
+ if not os.path.isdir(file_storage_dir):
+ logging.error("'%s' is not a directory", file_storage_dir)
+ result = False,
+ else:
+ try:
+ os.makedirs(file_storage_dir, 0750)
+ except OSError, err:
+ logging.error("Cannot create file storage directory '%s': %s",
+ file_storage_dir, err)
+ result = False,
+ return result
+
+
+def RemoveFileStorageDir(file_storage_dir):
+ """Remove file storage directory.
+
+ Remove it only if it's empty. If not log an error and return.
+
+ @type file_storage_dir: str
+ @param file_storage_dir: the directory we should cleanup
+ @rtype: tuple (success,)
+ @return: tuple of one element, C{success}, denoting
+ whether the operation was successful
+
+ """
+ file_storage_dir = _TransformFileStorageDir(file_storage_dir)
+ result = True,
+ if not file_storage_dir:
+ result = False,
+ else:
+ if os.path.exists(file_storage_dir):
+ if not os.path.isdir(file_storage_dir):
+ logging.error("'%s' is not a directory", file_storage_dir)
+ result = False,
+ # deletes dir only if empty, otherwise we want to return False
+ try:
+ os.rmdir(file_storage_dir)
+ except OSError:
+ logging.exception("Cannot remove file storage directory '%s'",
+ file_storage_dir)
+ result = False,
+ return result
+
+
+def RenameFileStorageDir(old_file_storage_dir, new_file_storage_dir):
+ """Rename the file storage directory.
+
+ @type old_file_storage_dir: str
+ @param old_file_storage_dir: the current path
+ @type new_file_storage_dir: str
+ @param new_file_storage_dir: the name we should rename to
+ @rtype: tuple (success,)
+ @return: tuple of one element, C{success}, denoting
+ whether the operation was successful
+
+ """
+ old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir)
+ new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir)
+ result = True,
+ if not old_file_storage_dir or not new_file_storage_dir:
+ result = False,
+ else:
+ if not os.path.exists(new_file_storage_dir):
+ if os.path.isdir(old_file_storage_dir):
+ try:
+ os.rename(old_file_storage_dir, new_file_storage_dir)
+ except OSError:
+ logging.exception("Cannot rename '%s' to '%s'",
+ old_file_storage_dir, new_file_storage_dir)
+ result = False,
+ else:
+ logging.error("'%s' is not a directory", old_file_storage_dir)
+ result = False,
+ else:
+ if os.path.exists(old_file_storage_dir):
+ logging.error("Cannot rename '%s' to '%s'. Both locations exist.",
+ old_file_storage_dir, new_file_storage_dir)
+ result = False,
+ return result
+
+
+def _IsJobQueueFile(file_name):
+ """Checks whether the given filename is in the queue directory.
+
+ @type file_name: str
+ @param file_name: the file name we should check
+ @rtype: boolean
+ @return: whether the file is under the queue directory
+
+ """
+ queue_dir = os.path.normpath(constants.QUEUE_DIR)
+ result = (os.path.commonprefix([queue_dir, file_name]) == queue_dir)
+
+ if not result:
+ logging.error("'%s' is not a file in the queue directory",
+ file_name)
+
+ return result
+
+
+def JobQueueUpdate(file_name, content):
+ """Updates a file in the queue directory.
+
+ This is just a wrapper over L{utils.WriteFile}, with proper
+ checking.
+
+ @type file_name: str
+ @param file_name: the job file name
+ @type content: str
+ @param content: the new job contents
+ @rtype: boolean
+ @return: the success of the operation
+
+ """
+ if not _IsJobQueueFile(file_name):
+ return False
+
+ # Write and replace the file atomically
+ utils.WriteFile(file_name, data=_Decompress(content))
+
+ return True
+
+
+def JobQueueRename(old, new):
+ """Renames a job queue file.
+
+ This is just a wrapper over os.rename with proper checking.
+
+ @type old: str
+ @param old: the old (actual) file name
+ @type new: str
+ @param new: the desired file name
+ @rtype: boolean
+ @return: the success of the operation
+
+ """
+ if not (_IsJobQueueFile(old) and _IsJobQueueFile(new)):
+ return False
+
+ utils.RenameFile(old, new, mkdir=True)
+
+ return True
+
+
+def JobQueueSetDrainFlag(drain_flag):
+ """Set the drain flag for the queue.
+
+ This will set or unset the queue drain flag.
+
+ @type drain_flag: boolean
+ @param drain_flag: if True, will set the drain flag, otherwise reset it.
+ @rtype: boolean
+ @return: always True
+ @warning: the function always returns True
+
+ """
+ if drain_flag:
+ utils.WriteFile(constants.JOB_QUEUE_DRAIN_FILE, data="", close=True)
+ else:
+ utils.RemoveFile(constants.JOB_QUEUE_DRAIN_FILE)
+
+ return True
+
+
+def BlockdevClose(instance_name, disks):
+ """Closes the given block devices.
+
+ This means they will be switched to secondary mode (in case of
+ DRBD).
+
+ @param instance_name: if the argument is not empty, the symlinks
+ of this instance will be removed
+ @type disks: list of L{objects.Disk}
+ @param disks: the list of disks to be closed
+ @rtype: tuple (success, message)
+ @return: a tuple of success and message, where success
+ indicates the succes of the operation, and message
+ which will contain the error details in case we
+ failed
+
+ """
+ bdevs = []
+ for cf in disks:
+ rd = _RecursiveFindBD(cf)
+ if rd is None:
+ return (False, "Can't find device %s" % cf)
+ bdevs.append(rd)
+
+ msg = []
+ for rd in bdevs:
+ try:
+ rd.Close()
+ except errors.BlockDeviceError, err:
+ msg.append(str(err))
+ if msg:
+ return (False, "Can't make devices secondary: %s" % ",".join(msg))
+ else:
+ if instance_name:
+ _RemoveBlockDevLinks(instance_name, disks)
+ return (True, "All devices secondary")
+
+
+def ValidateHVParams(hvname, hvparams):
+ """Validates the given hypervisor parameters.
+
+ @type hvname: string
+ @param hvname: the hypervisor name
+ @type hvparams: dict
+ @param hvparams: the hypervisor parameters to be validated
+ @rtype: tuple (success, message)
+ @return: a tuple of success and message, where success
+ indicates the succes of the operation, and message
+ which will contain the error details in case we
+ failed
+
+ """
+ try:
+ hv_type = hypervisor.GetHypervisor(hvname)
+ hv_type.ValidateParameters(hvparams)
+ return (True, "Validation passed")
+ except errors.HypervisorError, err:
+ return (False, str(err))
+
+
+def DemoteFromMC():
+ """Demotes the current node from master candidate role.
+
+ """
+ # try to ensure we're not the master by mistake
+ master, myself = ssconf.GetMasterAndMyself()
+ if master == myself:
+ return (False, "ssconf status shows I'm the master node, will not demote")
+ pid_file = utils.DaemonPidFileName(constants.MASTERD_PID)
+ if utils.IsProcessAlive(utils.ReadPidFile(pid_file)):
+ return (False, "The master daemon is running, will not demote")
+ try:
+ if os.path.isfile(constants.CLUSTER_CONF_FILE):
+ utils.CreateBackup(constants.CLUSTER_CONF_FILE)
+ except EnvironmentError, err:
+ if err.errno != errno.ENOENT:
+ return (False, "Error while backing up cluster file: %s" % str(err))
+ utils.RemoveFile(constants.CLUSTER_CONF_FILE)
+ return (True, "Done")
+
+
+def _FindDisks(nodes_ip, disks):
+ """Sets the physical ID on disks and returns the block devices.
+
+ """
+ # set the correct physical ID
+ my_name = utils.HostInfo().name
+ for cf in disks:
+ cf.SetPhysicalID(my_name, nodes_ip)
+
+ bdevs = []
+
+ for cf in disks:
+ rd = _RecursiveFindBD(cf)
+ if rd is None:
+ return (False, "Can't find device %s" % cf)
+ bdevs.append(rd)
+ return (True, bdevs)
+
+
+def DrbdDisconnectNet(nodes_ip, disks):
+ """Disconnects the network on a list of drbd devices.
+
+ """
+ status, bdevs = _FindDisks(nodes_ip, disks)
+ if not status:
+ return status, bdevs
+
+ # disconnect disks
+ for rd in bdevs:
+ try:
+ rd.DisconnectNet()
+ except errors.BlockDeviceError, err:
+ logging.exception("Failed to go into standalone mode")
+ return (False, "Can't change network configuration: %s" % str(err))
+ return (True, "All disks are now disconnected")
+
+
+def DrbdAttachNet(nodes_ip, disks, instance_name, multimaster):
+ """Attaches the network on a list of drbd devices.
+
+ """
+ status, bdevs = _FindDisks(nodes_ip, disks)
+ if not status:
+ return status, bdevs
+
+ if multimaster:
+ for idx, rd in enumerate(bdevs):
+ try:
+ _SymlinkBlockDev(instance_name, rd.dev_path, idx)
+ except EnvironmentError, err:
+ return (False, "Can't create symlink: %s" % str(err))
+ # reconnect disks, switch to new master configuration and if
+ # needed primary mode
+ for rd in bdevs:
+ try:
+ rd.AttachNet(multimaster)
+ except errors.BlockDeviceError, err:
+ return (False, "Can't change network configuration: %s" % str(err))
+ # wait until the disks are connected; we need to retry the re-attach
+ # if the device becomes standalone, as this might happen if the one
+ # node disconnects and reconnects in a different mode before the
+ # other node reconnects; in this case, one or both of the nodes will
+ # decide it has wrong configuration and switch to standalone
+ RECONNECT_TIMEOUT = 2 * 60
+ sleep_time = 0.100 # start with 100 miliseconds
+ timeout_limit = time.time() + RECONNECT_TIMEOUT
+ while time.time() < timeout_limit:
+ all_connected = True
+ for rd in bdevs:
+ stats = rd.GetProcStatus()
+ if not (stats.is_connected or stats.is_in_resync):
+ all_connected = False
+ if stats.is_standalone:
+ # peer had different config info and this node became
+ # standalone, even though this should not happen with the
+ # new staged way of changing disk configs
+ try:
+ rd.AttachNet(multimaster)
+ except errors.BlockDeviceError, err:
+ return (False, "Can't change network configuration: %s" % str(err))
+ if all_connected:
+ break
+ time.sleep(sleep_time)
+ sleep_time = min(5, sleep_time * 1.5)
+ if not all_connected:
+ return (False, "Timeout in disk reconnecting")
+ if multimaster:
+ # change to primary mode
+ for rd in bdevs:
+ try:
+ rd.Open()
+ except errors.BlockDeviceError, err:
+ return (False, "Can't change to primary mode: %s" % str(err))
+ if multimaster:
+ msg = "multi-master and primary"
+ else:
+ msg = "single-master"
+ return (True, "Disks are now configured as %s" % msg)
+
+
+def DrbdWaitSync(nodes_ip, disks):
+ """Wait until DRBDs have synchronized.
+
+ """
+ status, bdevs = _FindDisks(nodes_ip, disks)
+ if not status:
+ return status, bdevs
+
+ min_resync = 100
+ alldone = True
+ failure = False
+ for rd in bdevs:
+ stats = rd.GetProcStatus()
+ if not (stats.is_connected or stats.is_in_resync):
+ failure = True
+ break
+ alldone = alldone and (not stats.is_in_resync)
+ if stats.sync_percent is not None:
+ min_resync = min(min_resync, stats.sync_percent)
+ return (not failure, (alldone, min_resync))
+
+