"""
if not os.path.isabs(file_name):
- logging.error("Filename passed to UploadFile is not absolute: '%s'",
- file_name)
- return False
+ err = "Filename passed to UploadFile is not absolute: '%s'" % file_name
+ logging.error(err)
+ return (False, err)
- allowed_files = [
+ allowed_files = set([
constants.CLUSTER_CONF_FILE,
constants.ETC_HOSTS,
constants.SSH_KNOWN_HOSTS_FILE,
constants.VNC_PASSWORD_FILE,
- ]
+ constants.RAPI_CERT_FILE,
+ constants.RAPI_USERS_FILE,
+ ])
+
+ for hv_name in constants.HYPER_TYPES:
+ hv_class = hypervisor.GetHypervisor(hv_name)
+ allowed_files.update(hv_class.GetAncillaryFiles())
if file_name not in allowed_files:
- logging.error("Filename passed to UploadFile not in allowed"
- " upload targets: '%s'", file_name)
- return False
+ err = "Filename passed to UploadFile not in allowed upload targets: '%s'" \
+ % file_name
+ logging.error(err)
+ return (False, err)
raw_data = _Decompress(data)
utils.WriteFile(file_name, data=raw_data, mode=mode, uid=uid, gid=gid,
atime=atime, mtime=mtime)
- return True
+ return (True, "success")
def WriteSsconfFiles(values):
return (not failure, (alldone, min_resync))
+def PowercycleNode(hypervisor_type):
+ """Hard-powercycle the node.
+
+ Because we need to return first, and schedule the powercycle in the
+ background, we won't be able to report failures nicely.
+
+ """
+ hyper = hypervisor.GetHypervisor(hypervisor_type)
+ try:
+ pid = os.fork()
+ except OSError, err:
+ # if we can't fork, we'll pretend that we're in the child process
+ pid = 0
+ if pid > 0:
+ return (True, "Reboot scheduled in 5 seconds")
+ time.sleep(5)
+ hyper.PowercycleNode()
+
+
class HooksRunner(object):
"""Hook runner.