Convert the file storage rpcs to new style result
[ganeti-local] / lib / backend.py
index d9dce47..79e44bf 100644 (file)
@@ -161,8 +161,8 @@ def GetMasterInfo():
   for consumption here or from the node daemon.
 
   @rtype: tuple
-  @return: (master_netdev, master_ip, master_name) if we have a good
-      configuration, otherwise (None, None, None)
+  @return: True, (master_netdev, master_ip, master_name) in case of success
+  @raise RPCFail: in case of errors
 
   """
   try:
@@ -171,9 +171,8 @@ def GetMasterInfo():
     master_ip = cfg.GetMasterIP()
     master_node = cfg.GetMasterNode()
   except errors.ConfigurationError, err:
-    logging.exception("Cluster configuration incomplete")
-    return (None, None, None)
-  return (master_netdev, master_ip, master_node)
+    _Fail("Cluster configuration incomplete", exc=True)
+  return True, (master_netdev, master_ip, master_node)
 
 
 def StartMaster(start_daemons):
@@ -189,25 +188,26 @@ def StartMaster(start_daemons):
   @rtype: None
 
   """
-  ok = True
-  master_netdev, master_ip, _ = GetMasterInfo()
-  if not master_netdev:
-    return False
+  # GetMasterInfo will raise an exception if not able to return data
+  master_netdev, master_ip, _ = GetMasterInfo()[1]
 
+  payload = []
   if utils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
     if utils.OwnIpAddress(master_ip):
       # we already have the ip:
-      logging.debug("Already started")
+      logging.debug("Master IP already configured, doing nothing")
     else:
-      logging.error("Someone else has the master ip, not activating")
-      ok = False
+      msg = "Someone else has the master ip, not activating"
+      logging.error(msg)
+      payload.append(msg)
   else:
     result = utils.RunCmd(["ip", "address", "add", "%s/32" % master_ip,
                            "dev", master_netdev, "label",
                            "%s:0" % master_netdev])
     if result.failed:
-      logging.error("Can't activate master IP: %s", result.output)
-      ok = False
+      msg = "Can't activate master IP: %s" % result.output
+      logging.error(msg)
+      payload.append(msg)
 
     result = utils.RunCmd(["arping", "-q", "-U", "-c 3", "-I", master_netdev,
                            "-s", master_ip, master_ip])
@@ -218,9 +218,11 @@ def StartMaster(start_daemons):
     for daemon in 'ganeti-masterd', 'ganeti-rapi':
       result = utils.RunCmd([daemon])
       if result.failed:
-        logging.error("Can't start daemon %s: %s", daemon, result.output)
-        ok = False
-  return ok
+        msg = "Can't start daemon %s: %s" % (daemon, result.output)
+        logging.error(msg)
+        payload.append(msg)
+
+  return not payload, "; ".join(payload)
 
 
 def StopMaster(stop_daemons):
@@ -236,9 +238,11 @@ def StopMaster(stop_daemons):
   @rtype: None
 
   """
-  master_netdev, master_ip, _ = GetMasterInfo()
-  if not master_netdev:
-    return False
+  # TODO: log and report back to the caller the error failures; we
+  # need to decide in which case we fail the RPC for this
+
+  # GetMasterInfo will raise an exception if not able to return data
+  master_netdev, master_ip, _ = GetMasterInfo()[1]
 
   result = utils.RunCmd(["ip", "address", "del", "%s/32" % master_ip,
                          "dev", master_netdev])
@@ -251,7 +255,7 @@ def StopMaster(stop_daemons):
     for daemon in constants.RAPI_PID, constants.MASTERD_PID:
       utils.KillProcess(utils.ReadPidFile(utils.DaemonPidFileName(daemon)))
 
-  return True
+  return True, None
 
 
 def AddNode(dsa, dsapub, rsa, rsapub, sshkey, sshpub):
@@ -317,21 +321,20 @@ def LeaveCluster():
 
   try:
     priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
-  except errors.OpExecError:
-    logging.exception("Error while processing ssh files")
-    return
 
-  f = open(pub_key, 'r')
-  try:
-    utils.RemoveAuthorizedKey(auth_keys, f.read(8192))
-  finally:
-    f.close()
+    f = open(pub_key, 'r')
+    try:
+      utils.RemoveAuthorizedKey(auth_keys, f.read(8192))
+    finally:
+      f.close()
 
-  utils.RemoveFile(priv_key)
-  utils.RemoveFile(pub_key)
+    utils.RemoveFile(priv_key)
+    utils.RemoveFile(pub_key)
+  except errors.OpExecError:
+    logging.exception("Error while processing ssh files")
 
-  # Return a reassuring string to the caller, and quit
-  raise errors.QuitGanetiException(False, 'Shutdown scheduled')
+  # Raise a custom exception (handled in ganeti-noded)
+  raise errors.QuitGanetiException(True, 'Shutdown scheduled')
 
 
 def GetNodeInfo(vgname, hypervisor_type):
@@ -367,7 +370,7 @@ def GetNodeInfo(vgname, hypervisor_type):
   finally:
     f.close()
 
-  return outputarray
+  return True, outputarray
 
 
 def VerifyNode(what, cluster_name):
@@ -451,7 +454,7 @@ def VerifyNode(what, cluster_name):
       what[constants.NV_INSTANCELIST])
 
   if constants.NV_VGLIST in what:
-    result[constants.NV_VGLIST] = ListVolumeGroups()
+    result[constants.NV_VGLIST] = utils.ListVolumeGroups()
 
   if constants.NV_VERSION in what:
     result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
@@ -469,7 +472,7 @@ def VerifyNode(what, cluster_name):
       used_minors = str(err)
     result[constants.NV_DRBDLIST] = used_minors
 
-  return result
+  return True, result
 
 
 def GetVolumeList(vg_name):
@@ -494,9 +497,7 @@ def GetVolumeList(vg_name):
                          "--separator=%s" % sep,
                          "-olv_name,lv_size,lv_attr", vg_name])
   if result.failed:
-    logging.error("Failed to list logical volumes, lvs output: %s",
-                  result.output)
-    return result.output
+    _Fail("Failed to list logical volumes, lvs output: %s", result.output)
 
   valid_line_re = re.compile("^ *([^|]+)\|([0-9.]+)\|([^|]{6})\|?$")
   for line in result.stdout.splitlines():
@@ -521,7 +522,7 @@ def ListVolumeGroups():
       size of the volume
 
   """
-  return utils.ListVolumeGroups()
+  return True, utils.ListVolumeGroups()
 
 
 def NodeVolumes():
@@ -547,9 +548,8 @@ def NodeVolumes():
                          "--separator=|",
                          "--options=lv_name,lv_size,devices,vg_name"])
   if result.failed:
-    logging.error("Failed to list logical volumes, lvs output: %s",
-                  result.output)
-    return []
+    _Fail("Failed to list logical volumes, lvs output: %s",
+          result.output)
 
   def parse_dev(dev):
     if '(' in dev:
@@ -565,8 +565,9 @@ def NodeVolumes():
       'vg': line[3].strip(),
     }
 
-  return [map_line(line.split('|')) for line in result.stdout.splitlines()
-          if line.count('|') >= 3]
+  return True, [map_line(line.split('|'))
+                for line in result.stdout.splitlines()
+                if line.count('|') >= 3]
 
 
 def BridgesExist(bridges_list):
@@ -576,11 +577,15 @@ def BridgesExist(bridges_list):
   @return: C{True} if all of them exist, C{False} otherwise
 
   """
+  missing = []
   for bridge in bridges_list:
     if not utils.BridgeExists(bridge):
-      return False
+      missing.append(bridge)
 
-  return True
+  if missing:
+    return False, "Missing bridges %s" % (", ".join(missing),)
+
+  return True, None
 
 
 def GetInstanceList(hypervisor_list):
@@ -601,8 +606,8 @@ def GetInstanceList(hypervisor_list):
       names = hypervisor.GetHypervisor(hname).ListInstances()
       results.extend(names)
     except errors.HypervisorError, err:
-      logging.exception("Error enumerating instances for hypevisor %s", hname)
-      raise
+      _Fail("Error enumerating instances (hypervisor %s): %s",
+            hname, err, exc=True)
 
   return results
 
@@ -630,7 +635,7 @@ def GetInstanceInfo(instance, hname):
     output['state'] = iinfo[4]
     output['time'] = iinfo[5]
 
-  return output
+  return True, output
 
 
 def GetInstanceMigratable(instance):
@@ -693,11 +698,11 @@ def GetAllInstancesInfo(hypervisor_list):
           # invocations of the different hypervisors
           for key in 'memory', 'vcpus':
             if value[key] != output[name][key]:
-              raise errors.HypervisorError("Instance %s is running twice"
-                                           " with different parameters" % name)
+              _Fail("Instance %s is running twice"
+                    " with different parameters", name)
         output[name] = value
 
-  return output
+  return True, output
 
 
 def InstanceOsAdd(instance, reinstall):
@@ -1480,6 +1485,7 @@ def WriteSsconfFiles(values):
 
   """
   ssconf.SimpleStore().WriteFiles(values)
+  return True, None
 
 
 def _ErrnoOrStr(err):
@@ -1946,13 +1952,12 @@ def ImportOSIntoInstance(instance, src_node, src_images, cluster_name):
         logging.error("Disk import command '%s' returned error: %s"
                       " output: %s", command, result.fail_reason,
                       result.output)
-        final_result.append(False)
-      else:
-        final_result.append(True)
-    else:
-      final_result.append(True)
+        final_result.append("error importing disk %d: %s, %s" %
+                            (idx, result.fail_reason, result.output[-100]))
 
-  return final_result
+  if final_result:
+    return False, "; ".join(final_result)
+  return True, None
 
 
 def ListExports():
@@ -1979,11 +1984,12 @@ def RemoveExport(export):
   """
   target = os.path.join(constants.EXPORT_DIR, export)
 
-  shutil.rmtree(target)
-  # TODO: catch some of the relevant exceptions and provide a pretty
-  # error message if rmtree fails.
+  try:
+    shutil.rmtree(target)
+  except EnvironmentError, err:
+    _Fail("Error while removing the export: %s", err, exc=True)
 
-  return True
+  return True, None
 
 
 def BlockdevRename(devlist):
@@ -2044,10 +2050,8 @@ def _TransformFileStorageDir(file_storage_dir):
   base_file_storage_dir = cfg.GetFileStorageDir()
   if (not os.path.commonprefix([file_storage_dir, base_file_storage_dir]) ==
       base_file_storage_dir):
-    logging.error("file storage directory '%s' is not under base file"
-                  " storage directory '%s'",
-                  file_storage_dir, base_file_storage_dir)
-    return None
+    _Fail("File storage directory '%s' is not under base file"
+          " storage directory '%s'", file_storage_dir, base_file_storage_dir)
   return file_storage_dir
 
 
@@ -2063,22 +2067,17 @@ def CreateFileStorageDir(file_storage_dir):
 
   """
   file_storage_dir = _TransformFileStorageDir(file_storage_dir)
-  result = True,
-  if not file_storage_dir:
-    result = False,
+  if os.path.exists(file_storage_dir):
+    if not os.path.isdir(file_storage_dir):
+      _Fail("Specified storage dir '%s' is not a directory",
+            file_storage_dir)
   else:
-    if os.path.exists(file_storage_dir):
-      if not os.path.isdir(file_storage_dir):
-        logging.error("'%s' is not a directory", file_storage_dir)
-        result = False,
-    else:
-      try:
-        os.makedirs(file_storage_dir, 0750)
-      except OSError, err:
-        logging.error("Cannot create file storage directory '%s': %s",
-                      file_storage_dir, err)
-        result = False,
-  return result
+    try:
+      os.makedirs(file_storage_dir, 0750)
+    except OSError, err:
+      _Fail("Cannot create file storage directory '%s': %s",
+            file_storage_dir, err, exc=True)
+  return True, None
 
 
 def RemoveFileStorageDir(file_storage_dir):
@@ -2094,22 +2093,18 @@ def RemoveFileStorageDir(file_storage_dir):
 
   """
   file_storage_dir = _TransformFileStorageDir(file_storage_dir)
-  result = True,
-  if not file_storage_dir:
-    result = False,
-  else:
-    if os.path.exists(file_storage_dir):
-      if not os.path.isdir(file_storage_dir):
-        logging.error("'%s' is not a directory", file_storage_dir)
-        result = False,
-      # deletes dir only if empty, otherwise we want to return False
-      try:
-        os.rmdir(file_storage_dir)
-      except OSError, err:
-        logging.exception("Cannot remove file storage directory '%s'",
-                          file_storage_dir)
-        result = False,
-  return result
+  if os.path.exists(file_storage_dir):
+    if not os.path.isdir(file_storage_dir):
+      _Fail("Specified Storage directory '%s' is not a directory",
+            file_storage_dir)
+    # deletes dir only if empty, otherwise we want to return False
+    try:
+      os.rmdir(file_storage_dir)
+    except OSError, err:
+      _Fail("Cannot remove file storage directory '%s': %s",
+            file_storage_dir, err)
+
+  return True, None
 
 
 def RenameFileStorageDir(old_file_storage_dir, new_file_storage_dir):
@@ -2126,27 +2121,21 @@ def RenameFileStorageDir(old_file_storage_dir, new_file_storage_dir):
   """
   old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir)
   new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir)
-  result = True,
-  if not old_file_storage_dir or not new_file_storage_dir:
-    result = False,
-  else:
-    if not os.path.exists(new_file_storage_dir):
-      if os.path.isdir(old_file_storage_dir):
-        try:
-          os.rename(old_file_storage_dir, new_file_storage_dir)
-        except OSError, err:
-          logging.exception("Cannot rename '%s' to '%s'",
-                            old_file_storage_dir, new_file_storage_dir)
-          result =  False,
-      else:
-        logging.error("'%s' is not a directory", old_file_storage_dir)
-        result = False,
+  if not os.path.exists(new_file_storage_dir):
+    if os.path.isdir(old_file_storage_dir):
+      try:
+        os.rename(old_file_storage_dir, new_file_storage_dir)
+      except OSError, err:
+        _Fail("Cannot rename '%s' to '%s': %s",
+              old_file_storage_dir, new_file_storage_dir, err)
     else:
-      if os.path.exists(old_file_storage_dir):
-        logging.error("Cannot rename '%s' to '%s'. Both locations exist.",
-                      old_file_storage_dir, new_file_storage_dir)
-        result = False,
-  return result
+      _Fail("Specified storage dir '%s' is not a directory",
+            old_file_storage_dir)
+  else:
+    if os.path.exists(old_file_storage_dir):
+      _Fail("Cannot rename '%s' to '%s': both locations exist",
+            old_file_storage_dir, new_file_storage_dir)
+  return True, None
 
 
 def _IsJobQueueFile(file_name):
@@ -2551,7 +2540,8 @@ class HooksRunner(object):
     elif phase == constants.HOOKS_PHASE_POST:
       suffix = "post"
     else:
-      raise errors.ProgrammerError("Unknown hooks phase: '%s'" % phase)
+      _Fail("Unknown hooks phase '%s'", phase)
+
     rr = []
 
     subdir = "%s-%s.d" % (hpath, suffix)
@@ -2560,7 +2550,7 @@ class HooksRunner(object):
       dir_contents = utils.ListVisibleFiles(dir_name)
     except OSError, err:
       # FIXME: must log output in case of failures
-      return rr
+      return True, rr
 
     # we use the standard python sort order,
     # so 00name is the recommended naming scheme
@@ -2579,7 +2569,7 @@ class HooksRunner(object):
           rrval = constants.HKR_SUCCESS
       rr.append(("%s/%s" % (subdir, relname), rrval, output))
 
-    return rr
+    return True, rr
 
 
 class IAllocatorRunner(object):
@@ -2598,17 +2588,15 @@ class IAllocatorRunner(object):
     @param idata: the allocator input data
 
     @rtype: tuple
-    @return: four element tuple of:
-       - run status (one of the IARUN_ constants)
-       - stdout
-       - stderr
-       - fail reason (as from L{utils.RunResult})
+    @return: two element tuple of:
+       - status
+       - either error message or stdout of allocator (for success)
 
     """
     alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH,
                                   os.path.isfile)
     if alloc_script is None:
-      return (constants.IARUN_NOTFOUND, None, None, None)
+      _Fail("iallocator module '%s' not found in the search path", name)
 
     fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
     try:
@@ -2616,12 +2604,12 @@ class IAllocatorRunner(object):
       os.close(fd)
       result = utils.RunCmd([alloc_script, fin_name])
       if result.failed:
-        return (constants.IARUN_FAILURE, result.stdout, result.stderr,
-                result.fail_reason)
+        _Fail("iallocator module '%s' failed: %s, output '%s'",
+              name, result.fail_reason, result.output)
     finally:
       os.unlink(fin_name)
 
-    return (constants.IARUN_SUCCESS, result.stdout, result.stderr, None)
+    return True, result.stdout
 
 
 class DevCacheManager(object):