cmdlib: Cleanup public/private functions
authorThomas Thrainer <thomasth@google.com>
Tue, 14 May 2013 12:30:08 +0000 (14:30 +0200)
committerThomas Thrainer <thomasth@google.com>
Fri, 17 May 2013 09:32:16 +0000 (11:32 +0200)
All functions/classes which are used outside of their defining module
(with tests as an exception) no longer have a leading underscore.

Signed-off-by: Thomas Thrainer <thomasth@google.com>
Reviewed-by: Bernardo Dal Seno <bdalseno@google.com>

19 files changed:
lib/cmdlib/backup.py
lib/cmdlib/base.py
lib/cmdlib/cluster.py
lib/cmdlib/common.py
lib/cmdlib/group.py
lib/cmdlib/instance.py
lib/cmdlib/instance_migration.py
lib/cmdlib/instance_operation.py
lib/cmdlib/instance_query.py
lib/cmdlib/instance_storage.py
lib/cmdlib/instance_utils.py
lib/cmdlib/misc.py
lib/cmdlib/network.py
lib/cmdlib/node.py
lib/cmdlib/operating_system.py
lib/cmdlib/query.py
lib/cmdlib/tags.py
lib/cmdlib/test.py
test/py/ganeti.cmdlib_unittest.py

index e5954f5..f41ee52 100644 (file)
@@ -33,16 +33,16 @@ from ganeti import qlang
 from ganeti import query
 from ganeti import utils
 
-from ganeti.cmdlib.base import _QueryBase, NoHooksLU, LogicalUnit
-from ganeti.cmdlib.common import _GetWantedNodes, _ShareAll, \
-  _CheckNodeOnline, _ExpandNodeName
-from ganeti.cmdlib.instance_storage import _StartInstanceDisks, \
-  _ShutdownInstanceDisks
-from ganeti.cmdlib.instance_utils import _GetClusterDomainSecret, \
-  _BuildInstanceHookEnvByObject, _CheckNodeNotDrained, _RemoveInstance
+from ganeti.cmdlib.base import QueryBase, NoHooksLU, LogicalUnit
+from ganeti.cmdlib.common import GetWantedNodes, ShareAll, CheckNodeOnline, \
+  ExpandNodeName
+from ganeti.cmdlib.instance_storage import StartInstanceDisks, \
+  ShutdownInstanceDisks
+from ganeti.cmdlib.instance_utils import GetClusterDomainSecret, \
+  BuildInstanceHookEnvByObject, CheckNodeNotDrained, RemoveInstance
 
 
-class _ExportQuery(_QueryBase):
+class ExportQuery(QueryBase):
   FIELDS = query.EXPORT_FIELDS
 
   #: The node name is not a unique key for this query
@@ -53,14 +53,14 @@ class _ExportQuery(_QueryBase):
 
     # The following variables interact with _QueryBase._GetNames
     if self.names:
-      self.wanted = _GetWantedNodes(lu, self.names)
+      self.wanted = GetWantedNodes(lu, self.names)
     else:
       self.wanted = locking.ALL_SET
 
     self.do_locking = self.use_locking
 
     if self.do_locking:
-      lu.share_locks = _ShareAll()
+      lu.share_locks = ShareAll()
       lu.needed_locks = {
         locking.LEVEL_NODE: self.wanted,
         }
@@ -102,8 +102,8 @@ class LUBackupQuery(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
-                             ["node", "export"], self.op.use_locking)
+    self.expq = ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
+                            ["node", "export"], self.op.use_locking)
 
   def ExpandNames(self):
     self.expq.ExpandNames(self)
@@ -141,9 +141,9 @@ class LUBackupPrepare(NoHooksLU):
     self.instance = self.cfg.GetInstanceInfo(instance_name)
     assert self.instance is not None, \
           "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckNodeOnline(self, self.instance.primary_node)
+    CheckNodeOnline(self, self.instance.primary_node)
 
-    self._cds = _GetClusterDomainSecret()
+    self._cds = GetClusterDomainSecret()
 
   def Exec(self, feedback_fn):
     """Prepares an instance for an export.
@@ -237,7 +237,7 @@ class LUBackupExport(LogicalUnit):
       "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
       }
 
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+    env.update(BuildInstanceHookEnvByObject(self, self.instance))
 
     return env
 
@@ -263,7 +263,7 @@ class LUBackupExport(LogicalUnit):
     self.instance = self.cfg.GetInstanceInfo(instance_name)
     assert self.instance is not None, \
           "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckNodeOnline(self, self.instance.primary_node)
+    CheckNodeOnline(self, self.instance.primary_node)
 
     if (self.op.remove_instance and
         self.instance.admin_state == constants.ADMINST_UP and
@@ -272,12 +272,12 @@ class LUBackupExport(LogicalUnit):
                                  " down before", errors.ECODE_STATE)
 
     if self.op.mode == constants.EXPORT_MODE_LOCAL:
-      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
+      self.op.target_node = ExpandNodeName(self.cfg, self.op.target_node)
       self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
       assert self.dst_node is not None
 
-      _CheckNodeOnline(self, self.dst_node.name)
-      _CheckNodeNotDrained(self, self.dst_node.name)
+      CheckNodeOnline(self, self.dst_node.name)
+      CheckNodeNotDrained(self, self.dst_node.name)
 
       self._cds = None
       self.dest_disk_info = None
@@ -293,7 +293,7 @@ class LUBackupExport(LogicalUnit):
                                     len(self.instance.disks)),
                                    errors.ECODE_INVAL)
 
-      cds = _GetClusterDomainSecret()
+      cds = GetClusterDomainSecret()
 
       # Check X509 key name
       try:
@@ -403,7 +403,7 @@ class LUBackupExport(LogicalUnit):
     if activate_disks:
       # Activate the instance disks if we'exporting a stopped instance
       feedback_fn("Activating disks for %s" % instance.name)
-      _StartInstanceDisks(self, instance, None)
+      StartInstanceDisks(self, instance, None)
 
     try:
       helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
@@ -422,7 +422,7 @@ class LUBackupExport(LogicalUnit):
           msg = result.fail_msg
           if msg:
             feedback_fn("Failed to start instance: %s" % msg)
-            _ShutdownInstanceDisks(self, instance)
+            ShutdownInstanceDisks(self, instance)
             raise errors.OpExecError("Could not start instance: %s" % msg)
 
         if self.op.mode == constants.EXPORT_MODE_LOCAL:
@@ -451,7 +451,7 @@ class LUBackupExport(LogicalUnit):
     finally:
       if activate_disks:
         feedback_fn("Deactivating disks for %s" % instance.name)
-        _ShutdownInstanceDisks(self, instance)
+        ShutdownInstanceDisks(self, instance)
 
     if not (compat.all(dresults) and fin_resu):
       failures = []
@@ -470,8 +470,8 @@ class LUBackupExport(LogicalUnit):
     # Remove instance if requested
     if self.op.remove_instance:
       feedback_fn("Removing instance %s" % instance.name)
-      _RemoveInstance(self, feedback_fn, instance,
-                      self.op.ignore_remove_failures)
+      RemoveInstance(self, feedback_fn, instance,
+                     self.op.ignore_remove_failures)
 
     if self.op.mode == constants.EXPORT_MODE_LOCAL:
       self._CleanupExports(feedback_fn)
index 142981f..18ab241 100644 (file)
@@ -28,7 +28,7 @@ from ganeti import constants
 from ganeti import locking
 from ganeti import query
 from ganeti import utils
-from ganeti.cmdlib.common import _ExpandInstanceName
+from ganeti.cmdlib.common import ExpandInstanceName
 
 
 class ResultWithJobs:
@@ -319,8 +319,8 @@ class LogicalUnit(object):
     else:
       assert locking.LEVEL_INSTANCE not in self.needed_locks, \
         "_ExpandAndLockInstance called with instance-level locks set"
-    self.op.instance_name = _ExpandInstanceName(self.cfg,
-                                                self.op.instance_name)
+    self.op.instance_name = ExpandInstanceName(self.cfg,
+                                               self.op.instance_name)
     self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
 
   def _LockInstancesNodes(self, primary_only=False,
@@ -444,7 +444,7 @@ class Tasklet:
     raise NotImplementedError
 
 
-class _QueryBase:
+class QueryBase:
   """Base for query utility classes.
 
   """
index 4514dfe..aee895b 100644 (file)
@@ -49,15 +49,14 @@ from ganeti import uidpool
 from ganeti import utils
 from ganeti import vcluster
 
-from ganeti.cmdlib.base import NoHooksLU, _QueryBase, LogicalUnit, \
+from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \
   ResultWithJobs
-from ganeti.cmdlib.common import _ShareAll, _RunPostHook, \
-  _ComputeAncillaryFiles, _RedistributeAncillaryFiles, _UploadHelper, \
-  _GetWantedInstances, _MergeAndVerifyHvState, _MergeAndVerifyDiskState, \
-  _GetUpdatedIPolicy, _ComputeNewInstanceViolations, _GetUpdatedParams, \
-  _CheckOSParams, _CheckHVParams, _AdjustCandidatePool, _CheckNodePVs, \
-  _ComputeIPolicyInstanceViolation, _AnnotateDiskParams, \
-  _SupportsOob
+from ganeti.cmdlib.common import ShareAll, RunPostHook, \
+  ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \
+  GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
+  GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
+  CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
+  ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob
 
 import ganeti.masterd.instance
 
@@ -99,7 +98,7 @@ class LUClusterConfigQuery(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.cq = _ClusterQuery(None, self.op.output_fields, False)
+    self.cq = ClusterQuery(None, self.op.output_fields, False)
 
   def ExpandNames(self):
     self.cq.ExpandNames(self)
@@ -164,7 +163,7 @@ class LUClusterDestroy(LogicalUnit):
     master_params = self.cfg.GetMasterNetworkParameters()
 
     # Run post hooks on master node before it's removed
-    _RunPostHook(self, master_params.name)
+    RunPostHook(self, master_params.name)
 
     ems = self.cfg.GetUseExternalMipScript()
     result = self.rpc.call_node_deactivate_master_ip(master_params.name,
@@ -204,7 +203,7 @@ class LUClusterPostInit(LogicalUnit):
     return True
 
 
-class _ClusterQuery(_QueryBase):
+class ClusterQuery(QueryBase):
   FIELDS = query.CLUSTER_FIELDS
 
   #: Do not sort (there is only one item)
@@ -344,14 +343,14 @@ class LUClusterRedistConf(NoHooksLU):
       locking.LEVEL_NODE: locking.ALL_SET,
       locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
     }
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
 
   def Exec(self, feedback_fn):
     """Redistribute the configuration.
 
     """
     self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
-    _RedistributeAncillaryFiles(self)
+    RedistributeAncillaryFiles(self)
 
 
 class LUClusterRename(LogicalUnit):
@@ -426,7 +425,7 @@ class LUClusterRename(LogicalUnit):
         node_list.remove(master_params.name)
       except ValueError:
         pass
-      _UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
+      UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
     finally:
       master_params.ip = new_ip
       result = self.rpc.call_node_activate_master_ip(master_params.name,
@@ -447,7 +446,7 @@ class LUClusterRepairDiskSizes(NoHooksLU):
 
   def ExpandNames(self):
     if self.op.instances:
-      self.wanted_names = _GetWantedInstances(self, self.op.instances)
+      self.wanted_names = GetWantedInstances(self, self.op.instances)
       # Not getting the node allocation lock as only a specific set of
       # instances (and their nodes) is going to be acquired
       self.needed_locks = {
@@ -633,7 +632,7 @@ class LUClusterSetParams(LogicalUnit):
       locking.LEVEL_NODEGROUP: locking.ALL_SET,
       locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
     }
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -727,22 +726,22 @@ class LUClusterSetParams(LogicalUnit):
             constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
 
     if self.op.hv_state:
-      new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
-                                            self.cluster.hv_state_static)
+      new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
+                                           self.cluster.hv_state_static)
       self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
                                for hv, values in new_hv_state.items())
 
     if self.op.disk_state:
-      new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
-                                                self.cluster.disk_state_static)
+      new_disk_state = MergeAndVerifyDiskState(self.op.disk_state,
+                                               self.cluster.disk_state_static)
       self.new_disk_state = \
         dict((storage, dict((name, cluster.SimpleFillDiskState(values))
                             for name, values in svalues.items()))
              for storage, svalues in new_disk_state.items())
 
     if self.op.ipolicy:
-      self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
-                                            group_policy=False)
+      self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
+                                           group_policy=False)
 
       all_instances = self.cfg.GetAllInstancesInfo().values()
       violations = set()
@@ -752,8 +751,8 @@ class LUClusterSetParams(LogicalUnit):
                                              for node in inst.all_nodes)])
         new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
         ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
-        new = _ComputeNewInstanceViolations(ipol,
-                                            new_ipolicy, instances, self.cfg)
+        new = ComputeNewInstanceViolations(ipol,
+                                           new_ipolicy, instances, self.cfg)
         if new:
           violations.update(new)
 
@@ -831,16 +830,16 @@ class LUClusterSetParams(LogicalUnit):
         if os_name not in self.new_osp:
           self.new_osp[os_name] = {}
 
-        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
-                                                  use_none=True)
+        self.new_osp[os_name] = GetUpdatedParams(self.new_osp[os_name], osp,
+                                                 use_none=True)
 
         if not self.new_osp[os_name]:
           # we removed all parameters
           del self.new_osp[os_name]
         else:
           # check the parameter validity (remote check)
-          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
-                         os_name, self.new_osp[os_name])
+          CheckOSParams(self, False, [self.cfg.GetMasterNode()],
+                        os_name, self.new_osp[os_name])
 
     # changes to the hypervisor list
     if self.op.enabled_hypervisors is not None:
@@ -868,7 +867,7 @@ class LUClusterSetParams(LogicalUnit):
           hv_class = hypervisor.GetHypervisorClass(hv_name)
           utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
           hv_class.CheckParameterSyntax(hv_params)
-          _CheckHVParams(self, node_list, hv_name, hv_params)
+          CheckHVParams(self, node_list, hv_name, hv_params)
 
     self._CheckDiskTemplateConsistency()
 
@@ -883,7 +882,7 @@ class LUClusterSetParams(LogicalUnit):
           new_osp = objects.FillDict(cluster_defaults, hv_params)
           hv_class = hypervisor.GetHypervisorClass(hv_name)
           hv_class.CheckParameterSyntax(new_osp)
-          _CheckHVParams(self, node_list, hv_name, new_osp)
+          CheckHVParams(self, node_list, hv_name, new_osp)
 
     if self.op.default_iallocator:
       alloc_script = utils.FindFile(self.op.default_iallocator,
@@ -963,7 +962,7 @@ class LUClusterSetParams(LogicalUnit):
     if self.op.candidate_pool_size is not None:
       self.cluster.candidate_pool_size = self.op.candidate_pool_size
       # we need to update the pool size here, otherwise the save will fail
-      _AdjustCandidatePool(self, [])
+      AdjustCandidatePool(self, [])
 
     if self.op.maintain_node_health is not None:
       if self.op.maintain_node_health and not constants.ENABLE_CONFD:
@@ -1242,7 +1241,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
 
   def ExpandNames(self):
     self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
 
   def CheckPrereq(self):
     """Check prerequisites.
@@ -1399,7 +1398,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
       }
 
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE:
@@ -1607,7 +1606,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
 
     # Check PVs
-    (errmsgs, pvminmax) = _CheckNodePVs(nresult, self._exclusive_storage)
+    (errmsgs, pvminmax) = CheckNodePVs(nresult, self._exclusive_storage)
     for em in errmsgs:
       self._Error(constants.CV_ENODELVM, node, em)
     if pvminmax is not None:
@@ -1748,7 +1747,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     cluster = self.cfg.GetClusterInfo()
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                             self.group_info)
-    err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
+    err = ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
     _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
              code=self.ETYPE_WARNING)
 
@@ -2354,7 +2353,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
       # _AnnotateDiskParams makes already copies of the disks
       devonly = []
       for (inst, dev) in disks:
-        (anno_disk,) = _AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
+        (anno_disk,) = AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
         self.cfg.SetDiskID(anno_disk, nname)
         devonly.append(anno_disk)
 
@@ -2505,7 +2504,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     # FIXME: verify OS list
 
     # File verification
-    filemap = _ComputeAncillaryFiles(cluster, False)
+    filemap = ComputeAncillaryFiles(cluster, False)
 
     # do local checksums
     master_node = self.master_node = self.cfg.GetMasterNode()
@@ -2580,7 +2579,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     # Gather OOB paths
     oob_paths = []
     for node in self.all_node_info.values():
-      path = _SupportsOob(self.cfg, node)
+      path = SupportsOob(self.cfg, node)
       if path and path not in oob_paths:
         oob_paths.append(path)
 
@@ -2862,7 +2861,7 @@ class LUClusterVerifyDisks(NoHooksLU):
   REQ_BGL = False
 
   def ExpandNames(self):
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
     self.needed_locks = {
       locking.LEVEL_NODEGROUP: locking.ALL_SET,
       }
index 0aadf19..4759736 100644 (file)
@@ -65,24 +65,24 @@ def _ExpandItemName(fn, name, kind):
   return full_name
 
 
-def _ExpandInstanceName(cfg, name):
+def ExpandInstanceName(cfg, name):
   """Wrapper over L{_ExpandItemName} for instance."""
   return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
 
 
-def _ExpandNodeName(cfg, name):
+def ExpandNodeName(cfg, name):
   """Wrapper over L{_ExpandItemName} for nodes."""
   return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
 
 
-def _ShareAll():
+def ShareAll():
   """Returns a dict declaring all lock levels shared.
 
   """
   return dict.fromkeys(locking.LEVELS, 1)
 
 
-def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
+def CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
   """Checks if the instances in a node group are still correct.
 
   @type cfg: L{config.ConfigWriter}
@@ -106,7 +106,7 @@ def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
   return wanted_instances
 
 
-def _GetWantedNodes(lu, nodes):
+def GetWantedNodes(lu, nodes):
   """Returns list of checked and expanded node names.
 
   @type lu: L{LogicalUnit}
@@ -119,12 +119,12 @@ def _GetWantedNodes(lu, nodes):
 
   """
   if nodes:
-    return [_ExpandNodeName(lu.cfg, name) for name in nodes]
+    return [ExpandNodeName(lu.cfg, name) for name in nodes]
 
   return utils.NiceSort(lu.cfg.GetNodeList())
 
 
-def _GetWantedInstances(lu, instances):
+def GetWantedInstances(lu, instances):
   """Returns list of checked and expanded instance names.
 
   @type lu: L{LogicalUnit}
@@ -138,13 +138,13 @@ def _GetWantedInstances(lu, instances):
 
   """
   if instances:
-    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
+    wanted = [ExpandInstanceName(lu.cfg, name) for name in instances]
   else:
     wanted = utils.NiceSort(lu.cfg.GetInstanceList())
   return wanted
 
 
-def _RunPostHook(lu, node_name):
+def RunPostHook(lu, node_name):
   """Runs the post-hook for an opcode on a single node.
 
   """
@@ -156,7 +156,7 @@ def _RunPostHook(lu, node_name):
                   node_name, err)
 
 
-def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
+def RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
   """Distribute additional files which are part of the cluster configuration.
 
   ConfigWriter takes care of distributing the config and ssconf files, but
@@ -189,7 +189,7 @@ def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
 
   # Gather file lists
   (files_all, _, files_mc, files_vm) = \
-    _ComputeAncillaryFiles(cluster, True)
+    ComputeAncillaryFiles(cluster, True)
 
   # Never re-distribute configuration file from here
   assert not (pathutils.CLUSTER_CONF_FILE in files_all or
@@ -204,10 +204,10 @@ def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
   # Upload the files
   for (node_list, files) in filemap:
     for fname in files:
-      _UploadHelper(lu, node_list, fname)
+      UploadHelper(lu, node_list, fname)
 
 
-def _ComputeAncillaryFiles(cluster, redist):
+def ComputeAncillaryFiles(cluster, redist):
   """Compute files external to Ganeti which need to be consistent.
 
   @type redist: boolean
@@ -286,7 +286,7 @@ def _ComputeAncillaryFiles(cluster, redist):
   return (files_all, files_opt, files_mc, files_vm)
 
 
-def _UploadHelper(lu, nodes, fname):
+def UploadHelper(lu, nodes, fname):
   """Helper for uploading a file and showing warnings.
 
   """
@@ -300,7 +300,7 @@ def _UploadHelper(lu, nodes, fname):
         lu.LogWarning(msg)
 
 
-def _MergeAndVerifyHvState(op_input, obj_input):
+def MergeAndVerifyHvState(op_input, obj_input):
   """Combines the hv state from an opcode with the one of the object
 
   @param op_input: The input dict from the opcode
@@ -322,7 +322,7 @@ def _MergeAndVerifyHvState(op_input, obj_input):
   return None
 
 
-def _MergeAndVerifyDiskState(op_input, obj_input):
+def MergeAndVerifyDiskState(op_input, obj_input):
   """Combines the disk state from an opcode with the one of the object
 
   @param op_input: The input dict from the opcode
@@ -345,7 +345,7 @@ def _MergeAndVerifyDiskState(op_input, obj_input):
   return None
 
 
-def _CheckOSParams(lu, required, nodenames, osname, osparams):
+def CheckOSParams(lu, required, nodenames, osname, osparams):
   """OS parameters validation.
 
   @type lu: L{LogicalUnit}
@@ -375,7 +375,7 @@ def _CheckOSParams(lu, required, nodenames, osname, osparams):
                  osname, node)
 
 
-def _CheckHVParams(lu, nodenames, hvname, hvparams):
+def CheckHVParams(lu, nodenames, hvname, hvparams):
   """Hypervisor parameter validation.
 
   This function abstract the hypervisor parameter validation to be
@@ -405,7 +405,7 @@ def _CheckHVParams(lu, nodenames, hvname, hvparams):
     info.Raise("Hypervisor parameter validation failed on node %s" % node)
 
 
-def _AdjustCandidatePool(lu, exceptions):
+def AdjustCandidatePool(lu, exceptions):
   """Adjust the candidate pool after node operations.
 
   """
@@ -421,7 +421,7 @@ def _AdjustCandidatePool(lu, exceptions):
                (mc_now, mc_max))
 
 
-def _CheckNodePVs(nresult, exclusive_storage):
+def CheckNodePVs(nresult, exclusive_storage):
   """Check node PVs.
 
   """
@@ -475,10 +475,10 @@ def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
   return None
 
 
-def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
-                                 nic_count, disk_sizes, spindle_use,
-                                 disk_template,
-                                 _compute_fn=_ComputeMinMaxSpec):
+def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
+                                nic_count, disk_sizes, spindle_use,
+                                disk_template,
+                                _compute_fn=_ComputeMinMaxSpec):
   """Verifies ipolicy against provided specs.
 
   @type ipolicy: dict
@@ -530,8 +530,8 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
   return ret + min_errs
 
 
-def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
-                                     _compute_fn=_ComputeIPolicySpecViolation):
+def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
+                                    _compute_fn=ComputeIPolicySpecViolation):
   """Compute if instance meets the specs of ipolicy.
 
   @type ipolicy: dict
@@ -541,7 +541,7 @@ def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
   @type cfg: L{config.ConfigWriter}
   @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
-  @see: L{_ComputeIPolicySpecViolation}
+  @see: L{ComputeIPolicySpecViolation}
 
   """
   be_full = cfg.GetClusterInfo().FillBE(instance)
@@ -569,10 +569,10 @@ def _ComputeViolatingInstances(ipolicy, instances, cfg):
 
   """
   return frozenset([inst.name for inst in instances
-                    if _ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
+                    if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
 
 
-def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
+def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
   """Computes a set of any instances that would violate the new ipolicy.
 
   @param old_ipolicy: The current (still in-place) ipolicy
@@ -588,7 +588,7 @@ def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
           _ComputeViolatingInstances(old_ipolicy, instances, cfg))
 
 
-def _GetUpdatedParams(old_params, update_dict,
+def GetUpdatedParams(old_params, update_dict,
                       use_default=True, use_none=False):
   """Return the new version of a parameter dictionary.
 
@@ -621,7 +621,7 @@ def _GetUpdatedParams(old_params, update_dict,
   return params_copy
 
 
-def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
+def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
   """Return the new version of an instance policy.
 
   @param group_policy: whether this policy applies to a group and thus
@@ -660,8 +660,8 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
         if group_policy:
           msg = "%s cannot appear in group instance specs" % key
           raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-        ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value,
-                                         use_none=False, use_default=False)
+        ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
+                                        use_none=False, use_default=False)
         utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
       else:
         # FIXME: we assume all others are lists; this should be redone
@@ -675,7 +675,7 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
   return ipolicy
 
 
-def _AnnotateDiskParams(instance, devs, cfg):
+def AnnotateDiskParams(instance, devs, cfg):
   """Little helper wrapper to the rpc annotation method.
 
   @param instance: The instance object
@@ -690,7 +690,7 @@ def _AnnotateDiskParams(instance, devs, cfg):
                                 cfg.GetInstanceDiskParams(instance))
 
 
-def _SupportsOob(cfg, node):
+def SupportsOob(cfg, node):
   """Tells if node supports OOB.
 
   @type cfg: L{config.ConfigWriter}
@@ -713,7 +713,7 @@ def _UpdateAndVerifySubDict(base, updates, type_check):
 
   """
   def fn(old, value):
-    new = _GetUpdatedParams(old, value)
+    new = GetUpdatedParams(old, value)
     utils.ForceDictType(new, type_check)
     return new
 
@@ -738,7 +738,7 @@ def _FilterVmNodes(lu, nodenames):
   return [name for name in nodenames if name not in vm_nodes]
 
 
-def _GetDefaultIAllocator(cfg, ialloc):
+def GetDefaultIAllocator(cfg, ialloc):
   """Decides on which iallocator to use.
 
   @type cfg: L{config.ConfigWriter}
@@ -761,8 +761,8 @@ def _GetDefaultIAllocator(cfg, ialloc):
   return ialloc
 
 
-def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
-                              cur_group_uuid):
+def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
+                             cur_group_uuid):
   """Checks if node groups for locked instances are still correct.
 
   @type cfg: L{config.ConfigWriter}
@@ -781,14 +781,14 @@ def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
     assert owned_nodes.issuperset(inst.all_nodes), \
       "Instance %s's nodes changed while we kept the lock" % name
 
-    inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
+    inst_groups = CheckInstanceNodeGroups(cfg, name, owned_groups)
 
     assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
       "Instance %s has no node in group %s" % (name, cur_group_uuid)
 
 
-def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
-                             primary_only=False):
+def CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
+                            primary_only=False):
   """Checks if the owned node groups are still correct for an instance.
 
   @type cfg: L{config.ConfigWriter}
@@ -816,7 +816,7 @@ def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
   return inst_groups
 
 
-def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
+def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
   """Unpacks the result of change-group and node-evacuate iallocator requests.
 
   Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
@@ -873,7 +873,7 @@ def _SetOpEarlyRelease(early_release, op):
   return op
 
 
-def _MapInstanceDisksToNodes(instances):
+def MapInstanceDisksToNodes(instances):
   """Creates a map from (node, volume) to instance name.
 
   @type instances: list of L{objects.Instance}
@@ -886,7 +886,7 @@ def _MapInstanceDisksToNodes(instances):
               for vol in vols)
 
 
-def _CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
+def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
   """Make sure that none of the given paramters is global.
 
   If a global parameter is found, an L{errors.OpPrereqError} exception is
@@ -915,7 +915,7 @@ def _CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
     raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
-def _IsExclusiveStorageEnabledNode(cfg, node):
+def IsExclusiveStorageEnabledNode(cfg, node):
   """Whether exclusive_storage is in effect for the given node.
 
   @type cfg: L{config.ConfigWriter}
@@ -929,7 +929,7 @@ def _IsExclusiveStorageEnabledNode(cfg, node):
   return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
 
 
-def _CheckInstanceState(lu, instance, req_states, msg=None):
+def CheckInstanceState(lu, instance, req_states, msg=None):
   """Ensure that an instance is in one of the required states.
 
   @param lu: the LU on behalf of which we make the check
@@ -960,7 +960,7 @@ def _CheckInstanceState(lu, instance, req_states, msg=None):
                      " is down")
 
 
-def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
+def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
   """Check the sanity of iallocator and node arguments and use the
   cluster-wide iallocator if appropriate.
 
@@ -996,7 +996,7 @@ def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
                                  " iallocator", errors.ECODE_INVAL)
 
 
-def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
+def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
   faulty = []
 
   for dev in instance.disks:
@@ -1015,7 +1015,7 @@ def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
   return faulty
 
 
-def _CheckNodeOnline(lu, node, msg=None):
+def CheckNodeOnline(lu, node, msg=None):
   """Ensure that a given node is online.
 
   @param lu: the LU on behalf of which we make the check
index dd51c48..d505d50 100644 (file)
@@ -31,13 +31,13 @@ from ganeti import qlang
 from ganeti import query
 from ganeti import utils
 from ganeti.masterd import iallocator
-from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, _QueryBase, \
+from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
   ResultWithJobs
-from ganeti.cmdlib.common import _MergeAndVerifyHvState, \
-  _MergeAndVerifyDiskState, _GetWantedNodes, _GetUpdatedParams, \
-  _CheckNodeGroupInstances, _GetUpdatedIPolicy, \
-  _ComputeNewInstanceViolations, _GetDefaultIAllocator, _ShareAll, \
-  _CheckInstancesNodeGroups, _LoadNodeEvacResult, _MapInstanceDisksToNodes
+from ganeti.cmdlib.common import MergeAndVerifyHvState, \
+  MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
+  CheckNodeGroupInstances, GetUpdatedIPolicy, \
+  ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
+  CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceDisksToNodes
 
 import ganeti.masterd.instance
 
@@ -79,12 +79,12 @@ class LUGroupAdd(LogicalUnit):
       utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
 
     if self.op.hv_state:
-      self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
+      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
     else:
       self.new_hv_state = None
 
     if self.op.disk_state:
-      self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
+      self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
     else:
       self.new_disk_state = None
 
@@ -152,7 +152,7 @@ class LUGroupAssignNodes(NoHooksLU):
   def ExpandNames(self):
     # These raise errors.OpPrereqError on their own:
     self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
-    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
+    self.op.nodes = GetWantedNodes(self, self.op.nodes)
 
     # We want to lock all the affected nodes and groups. We have readily
     # available the list of nodes, and the *destination* group. To gather the
@@ -276,7 +276,7 @@ class LUGroupAssignNodes(NoHooksLU):
             list(previously_split_instances & all_split_instances))
 
 
-class _GroupQuery(_QueryBase):
+class GroupQuery(QueryBase):
   FIELDS = query.GROUP_FIELDS
 
   def ExpandNames(self, lu):
@@ -363,7 +363,7 @@ class LUGroupQuery(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
+    self.gq = GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
                           self.op.output_fields, False)
 
   def ExpandNames(self):
@@ -423,7 +423,7 @@ class LUGroupSetParams(LogicalUnit):
     """Updates and verifies disk parameters.
 
     """
-    new_params = _GetUpdatedParams(old, new)
+    new_params = GetUpdatedParams(old, new)
     utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
     return new_params
 
@@ -434,7 +434,7 @@ class LUGroupSetParams(LogicalUnit):
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
 
     # Check if locked instances are still correct
-    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     self.group = self.cfg.GetNodeGroup(self.group_uuid)
     cluster = self.cfg.GetClusterInfo()
@@ -444,7 +444,7 @@ class LUGroupSetParams(LogicalUnit):
                                (self.op.group_name, self.group_uuid))
 
     if self.op.ndparams:
-      new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
+      new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
       utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
       self.new_ndparams = new_ndparams
 
@@ -467,27 +467,27 @@ class LUGroupSetParams(LogicalUnit):
                                    errors.ECODE_INVAL)
 
     if self.op.hv_state:
-      self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
-                                                 self.group.hv_state_static)
+      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
+                                                self.group.hv_state_static)
 
     if self.op.disk_state:
       self.new_disk_state = \
-        _MergeAndVerifyDiskState(self.op.disk_state,
-                                 self.group.disk_state_static)
+        MergeAndVerifyDiskState(self.op.disk_state,
+                                self.group.disk_state_static)
 
     if self.op.ipolicy:
-      self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
-                                            self.op.ipolicy,
-                                            group_policy=True)
+      self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
+                                           self.op.ipolicy,
+                                           group_policy=True)
 
       new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
       inst_filter = lambda inst: inst.name in owned_instances
       instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
       gmi = ganeti.masterd.instance
       violations = \
-          _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
-                                                                  self.group),
-                                        new_ipolicy, instances, self.cfg)
+          ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
+                                                                 self.group),
+                                       new_ipolicy, instances, self.cfg)
 
       if violations:
         self.LogWarning("After the ipolicy change the following instances"
@@ -697,9 +697,9 @@ class LUGroupEvacuate(LogicalUnit):
                                   utils.CommaJoin(self.req_target_uuids)),
                                  errors.ECODE_INVAL)
 
-    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
+    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
 
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [],
@@ -757,14 +757,14 @@ class LUGroupEvacuate(LogicalUnit):
     assert self.group_uuid in owned_groups
 
     # Check if locked instances are still correct
-    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     # Get instance information
     self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
 
     # Check if node groups for locked instances are still correct
-    _CheckInstancesNodeGroups(self.cfg, self.instances,
-                              owned_groups, owned_nodes, self.group_uuid)
+    CheckInstancesNodeGroups(self.cfg, self.instances,
+                             owned_groups, owned_nodes, self.group_uuid)
 
     if self.req_target_uuids:
       # User requested specific target groups
@@ -816,7 +816,7 @@ class LUGroupEvacuate(LogicalUnit):
                                  (self.op.iallocator, ial.info),
                                  errors.ECODE_NORES)
 
-    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
+    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
 
     self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
                  len(jobs), self.op.group_name)
@@ -834,7 +834,7 @@ class LUGroupVerifyDisks(NoHooksLU):
     # Raises errors.OpPrereqError on its own if group can't be found
     self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
 
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [],
@@ -887,14 +887,14 @@ class LUGroupVerifyDisks(NoHooksLU):
     assert self.group_uuid in owned_groups
 
     # Check if locked instances are still correct
-    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     # Get instance information
     self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
 
     # Check if node groups for locked instances are still correct
-    _CheckInstancesNodeGroups(self.cfg, self.instances,
-                              owned_groups, owned_nodes, self.group_uuid)
+    CheckInstancesNodeGroups(self.cfg, self.instances,
+                             owned_groups, owned_nodes, self.group_uuid)
 
   def Exec(self, feedback_fn):
     """Verify integrity of cluster disks.
@@ -909,7 +909,7 @@ class LUGroupVerifyDisks(NoHooksLU):
     res_instances = set()
     res_missing = {}
 
-    nv_dict = _MapInstanceDisksToNodes(
+    nv_dict = MapInstanceDisksToNodes(
       [inst for inst in self.instances.values()
        if inst.admin_state == constants.ADMINST_UP])
 
index 5f43d93..0b1966d 100644 (file)
@@ -44,29 +44,29 @@ from ganeti import utils
 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
 
 from ganeti.cmdlib.common import INSTANCE_DOWN, \
-  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \
-  _ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \
-  _LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \
-  _IsExclusiveStorageEnabledNode, _CheckHVParams, _CheckOSParams, \
-  _AnnotateDiskParams, _GetUpdatedParams, _ExpandInstanceName, \
-  _ComputeIPolicySpecViolation, _CheckInstanceState, _ExpandNodeName
-from ganeti.cmdlib.instance_storage import _CreateDisks, \
-  _CheckNodesFreeDiskPerVG, _WipeDisks, _WaitForSync, \
-  _IsExclusiveStorageEnabledNodeName, _CreateSingleBlockDev, _ComputeDisks, \
-  _CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \
-  _CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \
-  _AssembleInstanceDisks
-from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
-  _GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \
-  _NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \
-  _ReleaseLocks, _CheckNodeVmCapable, _CheckTargetNodeIPolicy, \
-  _GetInstanceInfoText, _RemoveDisks, _CheckNodeFreeMemory, \
-  _CheckInstanceBridgesExist, _CheckNicsBridgesExist, _CheckNodeHasOS
+  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
+  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
+  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
+  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
+  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
+  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
+from ganeti.cmdlib.instance_storage import CreateDisks, \
+  CheckNodesFreeDiskPerVG, WipeDisks, WaitForSync, \
+  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
+  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
+  CreateBlockDev, StartInstanceDisks, ShutdownInstanceDisks, \
+  AssembleInstanceDisks
+from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
+  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
+  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
+  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
+  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
+  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
 
 import ganeti.masterd.instance
 
 
-#: Type description for changes as returned by L{ApplyContainerMods}'s
+#: Type description for changes as returned by L{_ApplyContainerMods}'s
 #: callbacks
 _TApplyContModsCbChanges = \
   ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
@@ -266,7 +266,7 @@ def _CheckForConflictingIp(lu, ip, node):
 
 def _ComputeIPolicyInstanceSpecViolation(
   ipolicy, instance_spec, disk_template,
-  _compute_fn=_ComputeIPolicySpecViolation):
+  _compute_fn=ComputeIPolicySpecViolation):
   """Compute if instance specs meets the specs of ipolicy.
 
   @type ipolicy: dict
@@ -276,7 +276,7 @@ def _ComputeIPolicyInstanceSpecViolation(
   @type disk_template: string
   @param disk_template: the disk template of the instance
   @param _compute_fn: The function to verify ipolicy (unittest only)
-  @see: L{_ComputeIPolicySpecViolation}
+  @see: L{ComputeIPolicySpecViolation}
 
   """
   mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
@@ -411,7 +411,7 @@ class LUInstanceCreate(LogicalUnit):
       opcodes.RequireSharedFileStorage()
 
     ### Node/iallocator related checks
-    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
+    CheckIAllocatorOrNode(self, "iallocator", "pnode")
 
     if self.op.pnode is not None:
       if self.op.disk_template in constants.DTS_INT_MIRROR:
@@ -425,7 +425,7 @@ class LUInstanceCreate(LogicalUnit):
 
     _CheckOpportunisticLocking(self.op)
 
-    self._cds = _GetClusterDomainSecret()
+    self._cds = GetClusterDomainSecret()
 
     if self.op.mode == constants.INSTANCE_IMPORT:
       # On import force_variant must be True, because if we forced it at
@@ -521,10 +521,10 @@ class LUInstanceCreate(LogicalUnit):
         self.opportunistic_locks[locking.LEVEL_NODE] = True
         self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
     else:
-      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
       nodelist = [self.op.pnode]
       if self.op.snode is not None:
-        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
+        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
         nodelist.append(self.op.snode)
       self.needed_locks[locking.LEVEL_NODE] = nodelist
 
@@ -545,7 +545,7 @@ class LUInstanceCreate(LogicalUnit):
                                      " requires a source node option",
                                      errors.ECODE_INVAL)
       else:
-        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
+        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
         if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
           self.needed_locks[locking.LEVEL_NODE].append(src_node)
         if not os.path.isabs(src_path):
@@ -553,7 +553,7 @@ class LUInstanceCreate(LogicalUnit):
             utils.PathJoin(pathutils.EXPORT_DIR, src_path)
 
     self.needed_locks[locking.LEVEL_NODE_RES] = \
-      _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
 
   def _RunAllocator(self):
     """Run the allocator based on input opcode.
@@ -610,7 +610,7 @@ class LUInstanceCreate(LogicalUnit):
       env["SRC_PATH"] = self.op.src_path
       env["SRC_IMAGES"] = self.src_images
 
-    env.update(_BuildInstanceHookEnv(
+    env.update(BuildInstanceHookEnv(
       name=self.op.instance_name,
       primary_node=self.op.pnode,
       secondary_nodes=self.secondaries,
@@ -619,7 +619,7 @@ class LUInstanceCreate(LogicalUnit):
       minmem=self.be_full[constants.BE_MINMEM],
       maxmem=self.be_full[constants.BE_MAXMEM],
       vcpus=self.be_full[constants.BE_VCPUS],
-      nics=_NICListToTuple(self, self.nics),
+      nics=NICListToTuple(self, self.nics),
       disk_template=self.op.disk_template,
       disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
               d[constants.IDISK_MODE]) for d in self.disks],
@@ -669,7 +669,7 @@ class LUInstanceCreate(LogicalUnit):
         raise errors.OpPrereqError("No export found for relative path %s" %
                                    src_path, errors.ECODE_INVAL)
 
-    _CheckNodeOnline(self, src_node)
+    CheckNodeOnline(self, src_node)
     result = self.rpc.call_export_info(src_node, src_path)
     result.Raise("No export or invalid export found in dir %s" % src_path)
 
@@ -871,8 +871,8 @@ class LUInstanceCreate(LogicalUnit):
     hv_type.CheckParameterSyntax(filled_hvp)
     self.hv_full = filled_hvp
     # check that we don't specify global parameters on an instance
-    _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
-                          "instance", "cluster")
+    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
+                         "instance", "cluster")
 
     # fill and remember the beparams dict
     self.be_full = _ComputeFullBeParams(self.op, cluster)
@@ -891,7 +891,7 @@ class LUInstanceCreate(LogicalUnit):
 
     # disk checks/pre-build
     default_vg = self.cfg.GetVGName()
-    self.disks = _ComputeDisks(self.op, default_vg)
+    self.disks = ComputeDisks(self.op, default_vg)
 
     if self.op.mode == constants.INSTANCE_IMPORT:
       disk_images = []
@@ -941,9 +941,9 @@ class LUInstanceCreate(LogicalUnit):
 
     # Release all unneeded node locks
     keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
-    _ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
-    _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
-    _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
+    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
+    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
+    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
 
     assert (self.owned_locks(locking.LEVEL_NODE) ==
             self.owned_locks(locking.LEVEL_NODE_RES)), \
@@ -1010,9 +1010,9 @@ class LUInstanceCreate(LogicalUnit):
       if self.op.snode == pnode.name:
         raise errors.OpPrereqError("The secondary node cannot be the"
                                    " primary node", errors.ECODE_INVAL)
-      _CheckNodeOnline(self, self.op.snode)
-      _CheckNodeNotDrained(self, self.op.snode)
-      _CheckNodeVmCapable(self, self.op.snode)
+      CheckNodeOnline(self, self.op.snode)
+      CheckNodeNotDrained(self, self.op.snode)
+      CheckNodeVmCapable(self, self.op.snode)
       self.secondaries.append(self.op.snode)
 
       snode = self.cfg.GetNodeInfo(self.op.snode)
@@ -1026,7 +1026,7 @@ class LUInstanceCreate(LogicalUnit):
       nodes = [pnode]
       if self.op.disk_template in constants.DTS_INT_MIRROR:
         nodes.append(snode)
-      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
       if compat.any(map(has_es, nodes)):
         raise errors.OpPrereqError("Disk template %s not supported with"
                                    " exclusive storage" % self.op.disk_template,
@@ -1039,14 +1039,14 @@ class LUInstanceCreate(LogicalUnit):
         # _CheckRADOSFreeSpace() is just a placeholder.
         # Any function that checks prerequisites can be placed here.
         # Check if there is enough space on the RADOS cluster.
-        _CheckRADOSFreeSpace()
+        CheckRADOSFreeSpace()
       elif self.op.disk_template == constants.DT_EXT:
         # FIXME: Function that checks prereqs if needed
         pass
       else:
         # Check lv size requirements, if not adopting
-        req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
-        _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
+        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
+        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
 
     elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
       all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
@@ -1139,13 +1139,13 @@ class LUInstanceCreate(LogicalUnit):
              (pnode.group, group_info.name, utils.CommaJoin(res)))
       raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
-    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
+    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
 
-    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
+    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
     # check OS parameters (remotely)
-    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
+    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
 
-    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
+    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
 
     #TODO: _CheckExtParams (remotely)
     # Check parameters for extstorage
@@ -1153,10 +1153,10 @@ class LUInstanceCreate(LogicalUnit):
     # memory check on primary node
     #TODO(dynmem): use MINMEM for checking
     if self.op.start:
-      _CheckNodeFreeMemory(self, self.pnode.name,
-                           "creating instance %s" % self.op.instance_name,
-                           self.be_full[constants.BE_MAXMEM],
-                           self.op.hypervisor)
+      CheckNodeFreeMemory(self, self.pnode.name,
+                          "creating instance %s" % self.op.instance_name,
+                          self.be_full[constants.BE_MAXMEM],
+                          self.op.hypervisor)
 
     self.dry_run_result = list(nodenames)
 
@@ -1183,16 +1183,16 @@ class LUInstanceCreate(LogicalUnit):
     # has no disks yet (we are generating them right here).
     node = self.cfg.GetNodeInfo(pnode_name)
     nodegroup = self.cfg.GetNodeGroup(node.group)
-    disks = _GenerateDiskTemplate(self,
-                                  self.op.disk_template,
-                                  instance, pnode_name,
-                                  self.secondaries,
-                                  self.disks,
-                                  self.instance_file_storage_dir,
-                                  self.op.file_driver,
-                                  0,
-                                  feedback_fn,
-                                  self.cfg.GetGroupDiskParams(nodegroup))
+    disks = GenerateDiskTemplate(self,
+                                 self.op.disk_template,
+                                 instance, pnode_name,
+                                 self.secondaries,
+                                 self.disks,
+                                 self.instance_file_storage_dir,
+                                 self.op.file_driver,
+                                 0,
+                                 feedback_fn,
+                                 self.cfg.GetGroupDiskParams(nodegroup))
 
     iobj = objects.Instance(name=instance, os=self.op.os_type,
                             primary_node=pnode_name,
@@ -1226,7 +1226,7 @@ class LUInstanceCreate(LogicalUnit):
     else:
       feedback_fn("* creating instance disks...")
       try:
-        _CreateDisks(self, iobj)
+        CreateDisks(self, iobj)
       except errors.OpExecError:
         self.LogWarning("Device creation failed")
         self.cfg.ReleaseDRBDMinors(instance)
@@ -1242,16 +1242,16 @@ class LUInstanceCreate(LogicalUnit):
 
     if self.op.mode == constants.INSTANCE_IMPORT:
       # Release unused nodes
-      _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
+      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
     else:
       # Release all nodes
-      _ReleaseLocks(self, locking.LEVEL_NODE)
+      ReleaseLocks(self, locking.LEVEL_NODE)
 
     disk_abort = False
     if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
       feedback_fn("* wiping instance disks...")
       try:
-        _WipeDisks(self, iobj)
+        WipeDisks(self, iobj)
       except errors.OpExecError, err:
         logging.exception("Wiping disks failed")
         self.LogWarning("Wiping instance disks failed (%s)", err)
@@ -1261,16 +1261,16 @@ class LUInstanceCreate(LogicalUnit):
       # Something is already wrong with the disks, don't do anything else
       pass
     elif self.op.wait_for_sync:
-      disk_abort = not _WaitForSync(self, iobj)
+      disk_abort = not WaitForSync(self, iobj)
     elif iobj.disk_template in constants.DTS_INT_MIRROR:
       # make sure the disks are not degraded (still sync-ing is ok)
       feedback_fn("* checking mirrors status")
-      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
+      disk_abort = not WaitForSync(self, iobj, oneshot=True)
     else:
       disk_abort = False
 
     if disk_abort:
-      _RemoveDisks(self, iobj)
+      RemoveDisks(self, iobj)
       self.cfg.RemoveInstance(iobj.name)
       # Make sure the instance lock gets removed
       self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
@@ -1278,7 +1278,7 @@ class LUInstanceCreate(LogicalUnit):
                                " this instance")
 
     # Release all node resource locks
-    _ReleaseLocks(self, locking.LEVEL_NODE_RES)
+    ReleaseLocks(self, locking.LEVEL_NODE_RES)
 
     if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
       # we need to set the disks ID to the primary node, since the
@@ -1421,7 +1421,7 @@ class LUInstanceRename(LogicalUnit):
     This runs on master, primary and secondary nodes of the instance.
 
     """
-    env = _BuildInstanceHookEnvByObject(self, self.instance)
+    env = BuildInstanceHookEnvByObject(self, self.instance)
     env["INSTANCE_NEW_NAME"] = self.op.new_name
     return env
 
@@ -1438,13 +1438,13 @@ class LUInstanceRename(LogicalUnit):
     This checks that the instance is in the cluster and is not running.
 
     """
-    self.op.instance_name = _ExpandInstanceName(self.cfg,
-                                                self.op.instance_name)
+    self.op.instance_name = ExpandInstanceName(self.cfg,
+                                               self.op.instance_name)
     instance = self.cfg.GetInstanceInfo(self.op.instance_name)
     assert instance is not None
-    _CheckNodeOnline(self, instance.primary_node)
-    _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
-                        msg="cannot rename")
+    CheckNodeOnline(self, instance.primary_node)
+    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
+                       msg="cannot rename")
     self.instance = instance
 
     new_name = self.op.new_name
@@ -1496,9 +1496,9 @@ class LUInstanceRename(LogicalUnit):
                    (inst.primary_node, old_file_storage_dir,
                     new_file_storage_dir))
 
-    _StartInstanceDisks(self, inst, None)
+    StartInstanceDisks(self, inst, None)
     # update info on disks
-    info = _GetInstanceInfoText(inst)
+    info = GetInstanceInfoText(inst)
     for (idx, disk) in enumerate(inst.disks):
       for node in inst.all_nodes:
         self.cfg.SetDiskID(disk, node)
@@ -1516,7 +1516,7 @@ class LUInstanceRename(LogicalUnit):
                (inst.name, inst.primary_node, msg))
         self.LogWarning(msg)
     finally:
-      _ShutdownInstanceDisks(self, inst)
+      ShutdownInstanceDisks(self, inst)
 
     return inst.name
 
@@ -1541,7 +1541,7 @@ class LUInstanceRemove(LogicalUnit):
     elif level == locking.LEVEL_NODE_RES:
       # Copy node locks
       self.needed_locks[locking.LEVEL_NODE_RES] = \
-        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -1549,7 +1549,7 @@ class LUInstanceRemove(LogicalUnit):
     This runs on master, primary and secondary nodes of the instance.
 
     """
-    env = _BuildInstanceHookEnvByObject(self, self.instance)
+    env = BuildInstanceHookEnvByObject(self, self.instance)
     env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
     return env
 
@@ -1597,7 +1597,7 @@ class LUInstanceRemove(LogicalUnit):
                 self.owned_locks(locking.LEVEL_NODE)), \
       "Not owning correct locks"
 
-    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
+    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
 
 
 class LUInstanceMove(LogicalUnit):
@@ -1610,7 +1610,7 @@ class LUInstanceMove(LogicalUnit):
 
   def ExpandNames(self):
     self._ExpandAndLockInstance()
-    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
+    target_node = ExpandNodeName(self.cfg, self.op.target_node)
     self.op.target_node = target_node
     self.needed_locks[locking.LEVEL_NODE] = [target_node]
     self.needed_locks[locking.LEVEL_NODE_RES] = []
@@ -1622,7 +1622,7 @@ class LUInstanceMove(LogicalUnit):
     elif level == locking.LEVEL_NODE_RES:
       # Copy node locks
       self.needed_locks[locking.LEVEL_NODE_RES] = \
-        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -1634,7 +1634,7 @@ class LUInstanceMove(LogicalUnit):
       "TARGET_NODE": self.op.target_node,
       "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
       }
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+    env.update(BuildInstanceHookEnvByObject(self, self.instance))
     return env
 
   def BuildHooksNodes(self):
@@ -1680,27 +1680,27 @@ class LUInstanceMove(LogicalUnit):
         raise errors.OpPrereqError("Instance disk %d has a complex layout,"
                                    " cannot copy" % idx, errors.ECODE_STATE)
 
-    _CheckNodeOnline(self, target_node)
-    _CheckNodeNotDrained(self, target_node)
-    _CheckNodeVmCapable(self, target_node)
+    CheckNodeOnline(self, target_node)
+    CheckNodeNotDrained(self, target_node)
+    CheckNodeVmCapable(self, target_node)
     cluster = self.cfg.GetClusterInfo()
     group_info = self.cfg.GetNodeGroup(node.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
-                            ignore=self.op.ignore_ipolicy)
+    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
+                           ignore=self.op.ignore_ipolicy)
 
     if instance.admin_state == constants.ADMINST_UP:
       # check memory requirements on the secondary node
-      _CheckNodeFreeMemory(self, target_node,
-                           "failing over instance %s" %
-                           instance.name, bep[constants.BE_MAXMEM],
-                           instance.hypervisor)
+      CheckNodeFreeMemory(self, target_node,
+                          "failing over instance %s" %
+                          instance.name, bep[constants.BE_MAXMEM],
+                          instance.hypervisor)
     else:
       self.LogInfo("Not checking memory on the secondary node as"
                    " instance will not be started")
 
     # check bridge existance
-    _CheckInstanceBridgesExist(self, instance, node=target_node)
+    CheckInstanceBridgesExist(self, instance, node=target_node)
 
   def Exec(self, feedback_fn):
     """Move an instance.
@@ -1737,7 +1737,7 @@ class LUInstanceMove(LogicalUnit):
 
     # create the target disks
     try:
-      _CreateDisks(self, instance, target_node=target_node)
+      CreateDisks(self, instance, target_node=target_node)
     except errors.OpExecError:
       self.LogWarning("Device creation failed")
       self.cfg.ReleaseDRBDMinors(instance.name)
@@ -1769,7 +1769,7 @@ class LUInstanceMove(LogicalUnit):
     if errs:
       self.LogWarning("Some disks failed to copy, aborting")
       try:
-        _RemoveDisks(self, instance, target_node=target_node)
+        RemoveDisks(self, instance, target_node=target_node)
       finally:
         self.cfg.ReleaseDRBDMinors(instance.name)
         raise errors.OpExecError("Errors during disk copy: %s" %
@@ -1779,17 +1779,17 @@ class LUInstanceMove(LogicalUnit):
     self.cfg.Update(instance, feedback_fn)
 
     self.LogInfo("Removing the disks on the original node")
-    _RemoveDisks(self, instance, target_node=source_node)
+    RemoveDisks(self, instance, target_node=source_node)
 
     # Only start the instance if it's marked as up
     if instance.admin_state == constants.ADMINST_UP:
       self.LogInfo("Starting instance %s on node %s",
                    instance.name, target_node)
 
-      disks_ok, _ = _AssembleInstanceDisks(self, instance,
-                                           ignore_secondaries=True)
+      disks_ok, _ = AssembleInstanceDisks(self, instance,
+                                          ignore_secondaries=True)
       if not disks_ok:
-        _ShutdownInstanceDisks(self, instance)
+        ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Can't activate the instance's disks")
 
       result = self.rpc.call_instance_start(target_node,
@@ -1797,7 +1797,7 @@ class LUInstanceMove(LogicalUnit):
                                             self.op.reason)
       msg = result.fail_msg
       if msg:
-        _ShutdownInstanceDisks(self, instance)
+        ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance %s on node %s: %s" %
                                  (instance.name, target_node, msg))
 
@@ -1850,7 +1850,7 @@ class LUInstanceMultiAlloc(NoHooksLU):
     """Calculate the locks.
 
     """
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
     self.needed_locks = {
       # iallocator will select nodes and even if no iallocator is used,
       # collisions with LUInstanceCreate should be avoided
@@ -1867,10 +1867,10 @@ class LUInstanceMultiAlloc(NoHooksLU):
     else:
       nodeslist = []
       for inst in self.op.instances:
-        inst.pnode = _ExpandNodeName(self.cfg, inst.pnode)
+        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
         nodeslist.append(inst.pnode)
         if inst.snode is not None:
-          inst.snode = _ExpandNodeName(self.cfg, inst.snode)
+          inst.snode = ExpandNodeName(self.cfg, inst.snode)
           nodeslist.append(inst.snode)
 
       self.needed_locks[locking.LEVEL_NODE] = nodeslist
@@ -1892,7 +1892,7 @@ class LUInstanceMultiAlloc(NoHooksLU):
     else:
       node_whitelist = None
 
-    insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
+    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
                                          _ComputeNics(op, cluster, None,
                                                       self.cfg, ec_id),
                                          _ComputeFullBeParams(op, cluster),
@@ -1964,7 +1964,7 @@ class _InstNicModPrivate:
     self.filled = None
 
 
-def PrepareContainerMods(mods, private_fn):
+def _PrepareContainerMods(mods, private_fn):
   """Prepares a list of container modifications by adding a private data field.
 
   @type mods: list of tuples; (operation, index, parameters)
@@ -2054,8 +2054,8 @@ def GetItemFromContainer(identifier, kind, container):
                              (kind, identifier), errors.ECODE_NOENT)
 
 
-def ApplyContainerMods(kind, container, chgdesc, mods,
-                       create_fn, modify_fn, remove_fn):
+def _ApplyContainerMods(kind, container, chgdesc, mods,
+                        create_fn, modify_fn, remove_fn):
   """Applies descriptions in C{mods} to C{container}.
 
   @type kind: string
@@ -2065,20 +2065,20 @@ def ApplyContainerMods(kind, container, chgdesc, mods,
   @type chgdesc: None or list
   @param chgdesc: List of applied changes
   @type mods: list
-  @param mods: Modifications as returned by L{PrepareContainerMods}
+  @param mods: Modifications as returned by L{_PrepareContainerMods}
   @type create_fn: callable
   @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
     receives absolute item index, parameters and private data object as added
-    by L{PrepareContainerMods}, returns tuple containing new item and changes
+    by L{_PrepareContainerMods}, returns tuple containing new item and changes
     as list
   @type modify_fn: callable
   @param modify_fn: Callback for modifying an existing item
     (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
-    and private data object as added by L{PrepareContainerMods}, returns
+    and private data object as added by L{_PrepareContainerMods}, returns
     changes as list
   @type remove_fn: callable
   @param remove_fn: Callback on removing item; receives absolute item index,
-    item and private data object as added by L{PrepareContainerMods}
+    item and private data object as added by L{_PrepareContainerMods}
 
   """
   for (op, identifier, params, private) in mods:
@@ -2307,8 +2307,8 @@ class LUInstanceSetParams(LogicalUnit):
       raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
 
     if self.op.hvparams:
-      _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
-                            "hypervisor", "instance", "cluster")
+      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
+                           "hypervisor", "instance", "cluster")
 
     self.op.disks = self._UpgradeDiskNicMods(
       "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
@@ -2332,7 +2332,7 @@ class LUInstanceSetParams(LogicalUnit):
                     self._VerifyNicModification)
 
     if self.op.pnode:
-      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
 
   def ExpandNames(self):
     self._ExpandAndLockInstance()
@@ -2355,12 +2355,12 @@ class LUInstanceSetParams(LogicalUnit):
     elif level == locking.LEVEL_NODE:
       self._LockInstancesNodes()
       if self.op.disk_template and self.op.remote_node:
-        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
         self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
     elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
       # Copy node locks
       self.needed_locks[locking.LEVEL_NODE_RES] = \
-        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -2385,11 +2385,11 @@ class LUInstanceSetParams(LogicalUnit):
         n = copy.deepcopy(nic)
         nicparams = self.cluster.SimpleFillNIC(n.nicparams)
         n.nicparams = nicparams
-        nics.append(_NICToTuple(self, n))
+        nics.append(NICToTuple(self, n))
 
       args["nics"] = nics
 
-    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
+    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
     if self.op.disk_template:
       env["NEW_DISK_TEMPLATE"] = self.op.disk_template
     if self.op.runtime_mem:
@@ -2431,7 +2431,7 @@ class LUInstanceSetParams(LogicalUnit):
                                    new_net_obj.name, errors.ECODE_INVAL)
       new_params = dict(netparams)
     else:
-      new_params = _GetUpdatedParams(old_params, update_params_dict)
+      new_params = GetUpdatedParams(old_params, update_params_dict)
 
     utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
 
@@ -2560,29 +2560,29 @@ class LUInstanceSetParams(LogicalUnit):
                                  " %s to %s" % (instance.disk_template,
                                                 self.op.disk_template),
                                  errors.ECODE_INVAL)
-    _CheckInstanceState(self, instance, INSTANCE_DOWN,
-                        msg="cannot change disk template")
+    CheckInstanceState(self, instance, INSTANCE_DOWN,
+                       msg="cannot change disk template")
     if self.op.disk_template in constants.DTS_INT_MIRROR:
       if self.op.remote_node == pnode:
         raise errors.OpPrereqError("Given new secondary node %s is the same"
                                    " as the primary node of the instance" %
                                    self.op.remote_node, errors.ECODE_STATE)
-      _CheckNodeOnline(self, self.op.remote_node)
-      _CheckNodeNotDrained(self, self.op.remote_node)
+      CheckNodeOnline(self, self.op.remote_node)
+      CheckNodeNotDrained(self, self.op.remote_node)
       # FIXME: here we assume that the old instance type is DT_PLAIN
       assert instance.disk_template == constants.DT_PLAIN
       disks = [{constants.IDISK_SIZE: d.size,
                 constants.IDISK_VG: d.logical_id[0]}
                for d in instance.disks]
-      required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
-      _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
+      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
+      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
 
       snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
       snode_group = self.cfg.GetNodeGroup(snode_info.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               snode_group)
-      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
-                              ignore=self.op.ignore_ipolicy)
+      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
+                             ignore=self.op.ignore_ipolicy)
       if pnode_info.group != snode_info.group:
         self.LogWarning("The primary and secondary nodes are in two"
                         " different node groups; the disk parameters"
@@ -2595,7 +2595,7 @@ class LUInstanceSetParams(LogicalUnit):
       if self.op.disk_template in constants.DTS_INT_MIRROR:
         assert snode_info
         nodes.append(snode_info)
-      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
       if compat.any(map(has_es, nodes)):
         errmsg = ("Cannot convert disk template from %s to %s when exclusive"
                   " storage is enabled" % (instance.disk_template,
@@ -2653,8 +2653,8 @@ class LUInstanceSetParams(LogicalUnit):
                       self._VerifyDiskModification)
 
     # Prepare disk/NIC modifications
-    self.diskmod = PrepareContainerMods(self.op.disks, None)
-    self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
+    self.diskmod = _PrepareContainerMods(self.op.disks, None)
+    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
 
     # Check the validity of the `provider' parameter
     if instance.disk_template in constants.DT_EXT:
@@ -2685,8 +2685,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # OS change
     if self.op.os_name and not self.op.force:
-      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
-                      self.op.force_variant)
+      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
+                     self.op.force_variant)
       instance_os = self.op.os_name
     else:
       instance_os = instance.os
@@ -2700,13 +2700,13 @@ class LUInstanceSetParams(LogicalUnit):
     # hvparams processing
     if self.op.hvparams:
       hv_type = instance.hypervisor
-      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
+      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
       utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
       hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
 
       # local check
       hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
-      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
+      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
       self.hv_proposed = self.hv_new = hv_new # the new actual values
       self.hv_inst = i_hvdict # the new dict (without defaults)
     else:
@@ -2716,8 +2716,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # beparams processing
     if self.op.beparams:
-      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
-                                   use_none=True)
+      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
+                                  use_none=True)
       objects.UpgradeBeParams(i_bedict)
       utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
       be_new = cluster.SimpleFillBE(i_bedict)
@@ -2757,8 +2757,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # osparams processing
     if self.op.osparams:
-      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
-      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
+      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
+      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
       self.os_inst = i_osdict # the new dict (without defaults)
     else:
       self.os_inst = {}
@@ -2847,9 +2847,9 @@ class LUInstanceSetParams(LogicalUnit):
 
       delta = self.op.runtime_mem - current_memory
       if delta > 0:
-        _CheckNodeFreeMemory(self, instance.primary_node,
-                             "ballooning memory for instance %s" %
-                             instance.name, delta, instance.hypervisor)
+        CheckNodeFreeMemory(self, instance.primary_node,
+                            "ballooning memory for instance %s" %
+                            instance.name, delta, instance.hypervisor)
 
     if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
       raise errors.OpPrereqError("Disk operations not supported for"
@@ -2873,8 +2873,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # Verify NIC changes (operating on copy)
     nics = instance.nics[:]
-    ApplyContainerMods("NIC", nics, None, self.nicmod,
-                       _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
+    _ApplyContainerMods("NIC", nics, None, self.nicmod,
+                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
     if len(nics) > constants.MAX_NICS:
       raise errors.OpPrereqError("Instance has too many network interfaces"
                                  " (%d), cannot add more" % constants.MAX_NICS,
@@ -2885,8 +2885,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # Verify disk changes (operating on a copy)
     disks = copy.deepcopy(instance.disks)
-    ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod,
-                       None)
+    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
+                        _PrepareDiskMod, None)
     utils.ValidateDeviceNames("disk", disks)
     if len(disks) > constants.MAX_DISKS:
       raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
@@ -2899,16 +2899,16 @@ class LUInstanceSetParams(LogicalUnit):
     ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
 
     if self.op.offline is not None and self.op.offline:
-      _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
-                          msg="can't change to offline")
+      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
+                         msg="can't change to offline")
 
     # Pre-compute NIC changes (necessary to use result in hooks)
     self._nic_chgdesc = []
     if self.nicmod:
       # Operate on copies as this is still in prereq
       nics = [nic.Copy() for nic in instance.nics]
-      ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
-                         self._CreateNewNic, self._ApplyNicMods, None)
+      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
+                          self._CreateNewNic, self._ApplyNicMods, None)
       # Verify that NIC names are unique and valid
       utils.ValidateDeviceNames("NIC", nics)
       self._new_nics = nics
@@ -2967,24 +2967,24 @@ class LUInstanceSetParams(LogicalUnit):
                   constants.IDISK_VG: d.logical_id[0],
                   constants.IDISK_NAME: d.name}
                  for d in instance.disks]
-    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
-                                      instance.name, pnode, [snode],
-                                      disk_info, None, None, 0, feedback_fn,
-                                      self.diskparams)
+    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
+                                     instance.name, pnode, [snode],
+                                     disk_info, None, None, 0, feedback_fn,
+                                     self.diskparams)
     anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
                                         self.diskparams)
-    p_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
-    s_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, snode)
-    info = _GetInstanceInfoText(instance)
+    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
+    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
+    info = GetInstanceInfoText(instance)
     feedback_fn("Creating additional volumes...")
     # first, create the missing data and meta devices
     for disk in anno_disks:
       # unfortunately this is... not too nice
-      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
-                            info, True, p_excl_stor)
+      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
+                           info, True, p_excl_stor)
       for child in disk.children:
-        _CreateSingleBlockDev(self, snode, instance, child, info, True,
-                              s_excl_stor)
+        CreateSingleBlockDev(self, snode, instance, child, info, True,
+                             s_excl_stor)
     # at this stage, all new LVs have been created, we can rename the
     # old ones
     feedback_fn("Renaming original volumes...")
@@ -2999,8 +2999,8 @@ class LUInstanceSetParams(LogicalUnit):
       for disk in anno_disks:
         for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
           f_create = node == pnode
-          _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
-                                excl_stor)
+          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
+                               excl_stor)
     except errors.GenericError, e:
       feedback_fn("Initializing of DRBD devices failed;"
                   " renaming back original volumes...")
@@ -3018,11 +3018,11 @@ class LUInstanceSetParams(LogicalUnit):
     self.cfg.Update(instance, feedback_fn)
 
     # Release node locks while waiting for sync
-    _ReleaseLocks(self, locking.LEVEL_NODE)
+    ReleaseLocks(self, locking.LEVEL_NODE)
 
     # disks are created, waiting for sync
-    disk_abort = not _WaitForSync(self, instance,
-                                  oneshot=not self.op.wait_for_sync)
+    disk_abort = not WaitForSync(self, instance,
+                                 oneshot=not self.op.wait_for_sync)
     if disk_abort:
       raise errors.OpExecError("There are some degraded disks for"
                                " this instance, please cleanup manually")
@@ -3042,7 +3042,7 @@ class LUInstanceSetParams(LogicalUnit):
     snode = instance.secondary_nodes[0]
     feedback_fn("Converting template to plain")
 
-    old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
+    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
     new_disks = [d.children[0] for d in instance.disks]
 
     # copy over size, mode and name
@@ -3064,7 +3064,7 @@ class LUInstanceSetParams(LogicalUnit):
     self.cfg.Update(instance, feedback_fn)
 
     # Release locks in case removing disks takes a while
-    _ReleaseLocks(self, locking.LEVEL_NODE)
+    ReleaseLocks(self, locking.LEVEL_NODE)
 
     feedback_fn("Removing volumes on the secondary node...")
     for disk in old_disks:
@@ -3097,12 +3097,12 @@ class LUInstanceSetParams(LogicalUnit):
       file_driver = file_path = None
 
     disk = \
-      _GenerateDiskTemplate(self, instance.disk_template, instance.name,
-                            instance.primary_node, instance.secondary_nodes,
-                            [params], file_path, file_driver, idx,
-                            self.Log, self.diskparams)[0]
+      GenerateDiskTemplate(self, instance.disk_template, instance.name,
+                           instance.primary_node, instance.secondary_nodes,
+                           [params], file_path, file_driver, idx,
+                           self.Log, self.diskparams)[0]
 
-    info = _GetInstanceInfoText(instance)
+    info = GetInstanceInfoText(instance)
 
     logging.info("Creating volume %s for instance %s",
                  disk.iv_name, instance.name)
@@ -3111,15 +3111,15 @@ class LUInstanceSetParams(LogicalUnit):
     for node in instance.all_nodes:
       f_create = (node == instance.primary_node)
       try:
-        _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
+        CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
       except errors.OpExecError, err:
         self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
                         disk.iv_name, disk, node, err)
 
     if self.cluster.prealloc_wipe_disks:
       # Wipe new disk
-      _WipeDisks(self, instance,
-                 disks=[(idx, disk, 0)])
+      WipeDisks(self, instance,
+                disks=[(idx, disk, 0)])
 
     return (disk, [
       ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
@@ -3146,7 +3146,7 @@ class LUInstanceSetParams(LogicalUnit):
     """Removes a disk.
 
     """
-    (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
+    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
     for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
       self.cfg.SetDiskID(disk, node)
       msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
@@ -3238,8 +3238,9 @@ class LUInstanceSetParams(LogicalUnit):
       result.append(("runtime_memory", self.op.runtime_mem))
 
     # Apply disk changes
-    ApplyContainerMods("disk", instance.disks, result, self.diskmod,
-                       self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
+    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
+                        self._CreateNewDisk, self._ModifyDisk,
+                        self._RemoveDisk)
     _UpdateIvNames(0, instance.disks)
 
     if self.op.disk_template:
@@ -3253,7 +3254,7 @@ class LUInstanceSetParams(LogicalUnit):
             ("Not owning the correct locks, owning %r, expected at least %r" %
              (owned, check_nodes))
 
-      r_shut = _ShutdownInstanceDisks(self, instance)
+      r_shut = ShutdownInstanceDisks(self, instance)
       if not r_shut:
         raise errors.OpExecError("Cannot shutdown instance disks, unable to"
                                  " proceed with disk template conversion")
@@ -3271,8 +3272,8 @@ class LUInstanceSetParams(LogicalUnit):
 
     # Release node and resource locks if there are any (they might already have
     # been released during disk conversion)
-    _ReleaseLocks(self, locking.LEVEL_NODE)
-    _ReleaseLocks(self, locking.LEVEL_NODE_RES)
+    ReleaseLocks(self, locking.LEVEL_NODE)
+    ReleaseLocks(self, locking.LEVEL_NODE_RES)
 
     # Apply NIC changes
     if self._new_nics is not None:
@@ -3333,7 +3334,7 @@ class LUInstanceChangeGroup(LogicalUnit):
   REQ_BGL = False
 
   def ExpandNames(self):
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
 
     self.needed_locks = {
       locking.LEVEL_NODEGROUP: [],
@@ -3349,7 +3350,7 @@ class LUInstanceChangeGroup(LogicalUnit):
     else:
       self.req_target_uuids = None
 
-    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
+    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODEGROUP:
@@ -3402,8 +3403,8 @@ class LUInstanceChangeGroup(LogicalUnit):
       ("Instance %s's nodes changed while we kept the lock" %
        self.op.instance_name)
 
-    inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
-                                           owned_groups)
+    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
+                                          owned_groups)
 
     if self.req_target_uuids:
       # User requested specific target groups
@@ -3434,7 +3435,7 @@ class LUInstanceChangeGroup(LogicalUnit):
       "TARGET_GROUPS": " ".join(self.target_uuids),
       }
 
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+    env.update(BuildInstanceHookEnvByObject(self, self.instance))
 
     return env
 
@@ -3462,7 +3463,7 @@ class LUInstanceChangeGroup(LogicalUnit):
                                  (self.op.instance_name, self.op.iallocator,
                                   ial.info), errors.ECODE_NORES)
 
-    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
+    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
 
     self.LogInfo("Iallocator returned %s job(s) for changing group of"
                  " instance '%s'", len(jobs), self.op.instance_name)
index d3360e8..201accb 100644 (file)
@@ -30,13 +30,13 @@ from ganeti import locking
 from ganeti.masterd import iallocator
 from ganeti import utils
 from ganeti.cmdlib.base import LogicalUnit, Tasklet
-from ganeti.cmdlib.common import _ExpandInstanceName, \
-  _CheckIAllocatorOrNode, _ExpandNodeName
-from ganeti.cmdlib.instance_storage import _CheckDiskConsistency, \
-  _ExpandCheckDisks, _ShutdownInstanceDisks, _AssembleInstanceDisks
-from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
-  _CheckTargetNodeIPolicy, _ReleaseLocks, _CheckNodeNotDrained, \
-  _CopyLockList, _CheckNodeFreeMemory, _CheckInstanceBridgesExist
+from ganeti.cmdlib.common import ExpandInstanceName, \
+  CheckIAllocatorOrNode, ExpandNodeName
+from ganeti.cmdlib.instance_storage import CheckDiskConsistency, \
+  ExpandCheckDisks, ShutdownInstanceDisks, AssembleInstanceDisks
+from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
+  CheckTargetNodeIPolicy, ReleaseLocks, CheckNodeNotDrained, \
+  CopyLockList, CheckNodeFreeMemory, CheckInstanceBridgesExist
 
 import ganeti.masterd.instance
 
@@ -48,7 +48,7 @@ def _ExpandNamesForMigration(lu):
 
   """
   if lu.op.target_node is not None:
-    lu.op.target_node = _ExpandNodeName(lu.cfg, lu.op.target_node)
+    lu.op.target_node = ExpandNodeName(lu.cfg, lu.op.target_node)
 
   lu.needed_locks[locking.LEVEL_NODE] = []
   lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
@@ -94,7 +94,7 @@ def _DeclareLocksForMigration(lu, level):
   elif level == locking.LEVEL_NODE_RES:
     # Copy node locks
     lu.needed_locks[locking.LEVEL_NODE_RES] = \
-      _CopyLockList(lu.needed_locks[locking.LEVEL_NODE])
+      CopyLockList(lu.needed_locks[locking.LEVEL_NODE])
 
 
 class LUInstanceFailover(LogicalUnit):
@@ -148,7 +148,7 @@ class LUInstanceFailover(LogicalUnit):
     else:
       env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
 
-    env.update(_BuildInstanceHookEnvByObject(self, instance))
+    env.update(BuildInstanceHookEnvByObject(self, instance))
 
     return env
 
@@ -197,7 +197,7 @@ class LUInstanceMigrate(LogicalUnit):
     instance = self._migrater.instance
     source_node = instance.primary_node
     target_node = self.op.target_node
-    env = _BuildInstanceHookEnvByObject(self, instance)
+    env = BuildInstanceHookEnvByObject(self, instance)
     env.update({
       "MIGRATE_LIVE": self._migrater.live,
       "MIGRATE_CLEANUP": self.op.cleanup,
@@ -280,7 +280,7 @@ class TLMigrateInstance(Tasklet):
     This checks that the instance is in the cluster.
 
     """
-    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
+    instance_name = ExpandInstanceName(self.lu.cfg, self.instance_name)
     instance = self.cfg.GetInstanceInfo(instance_name)
     assert instance is not None
     self.instance = instance
@@ -303,7 +303,7 @@ class TLMigrateInstance(Tasklet):
                                  errors.ECODE_STATE)
 
     if instance.disk_template in constants.DTS_EXT_MIRROR:
-      _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
+      CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
 
       if self.lu.op.iallocator:
         assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
@@ -318,8 +318,8 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
-                              ignore=self.ignore_ipolicy)
+      CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
+                             ignore=self.ignore_ipolicy)
 
       # self.target_node is already populated, either directly or by the
       # iallocator run
@@ -333,9 +333,9 @@ class TLMigrateInstance(Tasklet):
       if len(self.lu.tasklets) == 1:
         # It is safe to release locks only when we're the only tasklet
         # in the LU
-        _ReleaseLocks(self.lu, locking.LEVEL_NODE,
-                      keep=[instance.primary_node, self.target_node])
-        _ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
+        ReleaseLocks(self.lu, locking.LEVEL_NODE,
+                     keep=[instance.primary_node, self.target_node])
+        ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
 
     else:
       assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
@@ -362,19 +362,19 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
-                              ignore=self.ignore_ipolicy)
+      CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
+                             ignore=self.ignore_ipolicy)
 
     i_be = cluster.FillBE(instance)
 
     # check memory requirements on the secondary node
     if (not self.cleanup and
          (not self.failover or instance.admin_state == constants.ADMINST_UP)):
-      self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
-                                               "migrating instance %s" %
-                                               instance.name,
-                                               i_be[constants.BE_MINMEM],
-                                               instance.hypervisor)
+      self.tgt_free_mem = CheckNodeFreeMemory(self.lu, target_node,
+                                              "migrating instance %s" %
+                                              instance.name,
+                                              i_be[constants.BE_MINMEM],
+                                              instance.hypervisor)
     else:
       self.lu.LogInfo("Not checking memory on the secondary node as"
                       " instance will not be started")
@@ -387,10 +387,10 @@ class TLMigrateInstance(Tasklet):
       self.failover = True
 
     # check bridge existance
-    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
+    CheckInstanceBridgesExist(self.lu, instance, node=target_node)
 
     if not self.cleanup:
-      _CheckNodeNotDrained(self.lu, target_node)
+      CheckNodeNotDrained(self.lu, target_node)
       if not self.failover:
         result = self.rpc.call_instance_migratable(instance.primary_node,
                                                    instance)
@@ -671,7 +671,7 @@ class TLMigrateInstance(Tasklet):
 
     self.feedback_fn("* checking disk consistency between source and target")
     for (idx, dev) in enumerate(instance.disks):
-      if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
+      if not CheckDiskConsistency(self.lu, instance, dev, target_node, False):
         raise errors.OpExecError("Disk %s is degraded or not fully"
                                  " synchronized on target node,"
                                  " aborting migration" % idx)
@@ -805,7 +805,7 @@ class TLMigrateInstance(Tasklet):
     # If the instance's disk template is `rbd' or `ext' and there was a
     # successful migration, unmap the device from the source node.
     if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
-      disks = _ExpandCheckDisks(instance, instance.disks)
+      disks = ExpandCheckDisks(instance, instance.disks)
       self.feedback_fn("* unmapping instance's disks from %s" % source_node)
       for disk in disks:
         result = self.rpc.call_blockdev_shutdown(source_node, (disk, instance))
@@ -836,8 +836,8 @@ class TLMigrateInstance(Tasklet):
       self.feedback_fn("* checking disk consistency between source and target")
       for (idx, dev) in enumerate(instance.disks):
         # for drbd, these are drbd over lvm
-        if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
-                                     False):
+        if not CheckDiskConsistency(self.lu, instance, dev, target_node,
+                                    False):
           if primary_node.offline:
             self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
                              " target node %s" %
@@ -869,7 +869,7 @@ class TLMigrateInstance(Tasklet):
                                  (instance.name, source_node, msg))
 
     self.feedback_fn("* deactivating the instance's disks on source node")
-    if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
+    if not ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
       raise errors.OpExecError("Can't shut down the instance's disks")
 
     instance.primary_node = target_node
@@ -883,10 +883,10 @@ class TLMigrateInstance(Tasklet):
       logging.info("Starting instance %s on node %s",
                    instance.name, target_node)
 
-      disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
-                                           ignore_secondaries=True)
+      disks_ok, _ = AssembleInstanceDisks(self.lu, instance,
+                                          ignore_secondaries=True)
       if not disks_ok:
-        _ShutdownInstanceDisks(self.lu, instance)
+        ShutdownInstanceDisks(self.lu, instance)
         raise errors.OpExecError("Can't activate the instance's disks")
 
       self.feedback_fn("* starting the instance on the target node %s" %
@@ -895,7 +895,7 @@ class TLMigrateInstance(Tasklet):
                                             False, self.lu.op.reason)
       msg = result.fail_msg
       if msg:
-        _ShutdownInstanceDisks(self.lu, instance)
+        ShutdownInstanceDisks(self.lu, instance)
         raise errors.OpExecError("Could not start instance %s on node %s: %s" %
                                  (instance.name, target_node, msg))
 
index 4129bb0..7137c15 100644 (file)
@@ -36,12 +36,12 @@ from ganeti import objects
 from ganeti import utils
 from ganeti.cmdlib.base import LogicalUnit, NoHooksLU
 from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
-  _CheckHVParams, _CheckInstanceState, _CheckNodeOnline, _ExpandNodeName, \
-  _GetUpdatedParams, _CheckOSParams, _ShareAll
-from ganeti.cmdlib.instance_storage import _StartInstanceDisks, \
-  _ShutdownInstanceDisks
-from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
-  _CheckInstanceBridgesExist, _CheckNodeFreeMemory, _CheckNodeHasOS
+  CheckHVParams, CheckInstanceState, CheckNodeOnline, ExpandNodeName, \
+  GetUpdatedParams, CheckOSParams, ShareAll
+from ganeti.cmdlib.instance_storage import StartInstanceDisks, \
+  ShutdownInstanceDisks
+from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
+  CheckInstanceBridgesExist, CheckNodeFreeMemory, CheckNodeHasOS
 
 
 class LUInstanceStartup(LogicalUnit):
@@ -77,7 +77,7 @@ class LUInstanceStartup(LogicalUnit):
       "FORCE": self.op.force,
       }
 
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+    env.update(BuildInstanceHookEnvByObject(self, self.instance))
 
     return env
 
@@ -107,9 +107,9 @@ class LUInstanceStartup(LogicalUnit):
       filled_hvp.update(self.op.hvparams)
       hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
       hv_type.CheckParameterSyntax(filled_hvp)
-      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+      CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
 
-    _CheckInstanceState(self, instance, INSTANCE_ONLINE)
+    CheckInstanceState(self, instance, INSTANCE_ONLINE)
 
     self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
 
@@ -119,13 +119,13 @@ class LUInstanceStartup(LogicalUnit):
       if self.op.hvparams or self.op.beparams:
         self.LogWarning("Overridden parameters are ignored")
     else:
-      _CheckNodeOnline(self, instance.primary_node)
+      CheckNodeOnline(self, instance.primary_node)
 
       bep = self.cfg.GetClusterInfo().FillBE(instance)
       bep.update(self.op.beparams)
 
       # check bridges existence
-      _CheckInstanceBridgesExist(self, instance)
+      CheckInstanceBridgesExist(self, instance)
 
       remote_info = self.rpc.call_instance_info(instance.primary_node,
                                                 instance.name,
@@ -133,9 +133,9 @@ class LUInstanceStartup(LogicalUnit):
       remote_info.Raise("Error checking node %s" % instance.primary_node,
                         prereq=True, ecode=errors.ECODE_ENVIRON)
       if not remote_info.payload: # not running already
-        _CheckNodeFreeMemory(self, instance.primary_node,
-                             "starting instance %s" % instance.name,
-                             bep[constants.BE_MINMEM], instance.hypervisor)
+        CheckNodeFreeMemory(self, instance.primary_node,
+                            "starting instance %s" % instance.name,
+                            bep[constants.BE_MINMEM], instance.hypervisor)
 
   def Exec(self, feedback_fn):
     """Start the instance.
@@ -154,7 +154,7 @@ class LUInstanceStartup(LogicalUnit):
     else:
       node_current = instance.primary_node
 
-      _StartInstanceDisks(self, instance, force)
+      StartInstanceDisks(self, instance, force)
 
       result = \
         self.rpc.call_instance_start(node_current,
@@ -163,7 +163,7 @@ class LUInstanceStartup(LogicalUnit):
                                      self.op.startup_paused, reason)
       msg = result.fail_msg
       if msg:
-        _ShutdownInstanceDisks(self, instance)
+        ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance: %s" % msg)
 
 
@@ -184,7 +184,7 @@ class LUInstanceShutdown(LogicalUnit):
     This runs on master, primary and secondary nodes of the instance.
 
     """
-    env = _BuildInstanceHookEnvByObject(self, self.instance)
+    env = BuildInstanceHookEnvByObject(self, self.instance)
     env["TIMEOUT"] = self.op.timeout
     return env
 
@@ -206,7 +206,7 @@ class LUInstanceShutdown(LogicalUnit):
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
     if not self.op.force:
-      _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
+      CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
     else:
       self.LogWarning("Ignoring offline instance check")
 
@@ -216,7 +216,7 @@ class LUInstanceShutdown(LogicalUnit):
     if self.primary_offline and self.op.ignore_offline_nodes:
       self.LogWarning("Ignoring offline primary node")
     else:
-      _CheckNodeOnline(self, self.instance.primary_node)
+      CheckNodeOnline(self, self.instance.primary_node)
 
   def Exec(self, feedback_fn):
     """Shutdown the instance.
@@ -242,7 +242,7 @@ class LUInstanceShutdown(LogicalUnit):
       if msg:
         self.LogWarning("Could not shutdown instance: %s", msg)
 
-      _ShutdownInstanceDisks(self, instance)
+      ShutdownInstanceDisks(self, instance)
 
 
 class LUInstanceReinstall(LogicalUnit):
@@ -262,7 +262,7 @@ class LUInstanceReinstall(LogicalUnit):
     This runs on master, primary and secondary nodes of the instance.
 
     """
-    return _BuildInstanceHookEnvByObject(self, self.instance)
+    return BuildInstanceHookEnvByObject(self, self.instance)
 
   def BuildHooksNodes(self):
     """Build hooks nodes.
@@ -280,19 +280,19 @@ class LUInstanceReinstall(LogicalUnit):
     instance = self.cfg.GetInstanceInfo(self.op.instance_name)
     assert instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
-                     " offline, cannot reinstall")
+    CheckNodeOnline(self, instance.primary_node, "Instance primary node"
+                    " offline, cannot reinstall")
 
     if instance.disk_template == constants.DT_DISKLESS:
       raise errors.OpPrereqError("Instance '%s' has no disks" %
                                  self.op.instance_name,
                                  errors.ECODE_INVAL)
-    _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
+    CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
 
     if self.op.os_type is not None:
       # OS verification
-      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
-      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
+      pnode = ExpandNodeName(self.cfg, instance.primary_node)
+      CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
       instance_os = self.op.os_type
     else:
       instance_os = instance.os
@@ -300,8 +300,8 @@ class LUInstanceReinstall(LogicalUnit):
     nodelist = list(instance.all_nodes)
 
     if self.op.osparams:
-      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
-      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
+      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
+      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
       self.os_inst = i_osdict # the new dict (without defaults)
     else:
       self.os_inst = None
@@ -320,7 +320,7 @@ class LUInstanceReinstall(LogicalUnit):
       # Write to configuration
       self.cfg.Update(inst, feedback_fn)
 
-    _StartInstanceDisks(self, inst, None)
+    StartInstanceDisks(self, inst, None)
     try:
       feedback_fn("Running the instance OS create scripts...")
       # FIXME: pass debug option from opcode to backend
@@ -330,7 +330,7 @@ class LUInstanceReinstall(LogicalUnit):
       result.Raise("Could not install OS for instance %s on node %s" %
                    (inst.name, inst.primary_node))
     finally:
-      _ShutdownInstanceDisks(self, inst)
+      ShutdownInstanceDisks(self, inst)
 
 
 class LUInstanceReboot(LogicalUnit):
@@ -356,7 +356,7 @@ class LUInstanceReboot(LogicalUnit):
       "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
       }
 
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+    env.update(BuildInstanceHookEnvByObject(self, self.instance))
 
     return env
 
@@ -376,11 +376,11 @@ class LUInstanceReboot(LogicalUnit):
     self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckInstanceState(self, instance, INSTANCE_ONLINE)
-    _CheckNodeOnline(self, instance.primary_node)
+    CheckInstanceState(self, instance, INSTANCE_ONLINE)
+    CheckNodeOnline(self, instance.primary_node)
 
     # check bridges existence
-    _CheckInstanceBridgesExist(self, instance)
+    CheckInstanceBridgesExist(self, instance)
 
   def Exec(self, feedback_fn):
     """Reboot the instance.
@@ -413,24 +413,24 @@ class LUInstanceReboot(LogicalUnit):
                                                  self.op.shutdown_timeout,
                                                  reason)
         result.Raise("Could not shutdown instance for full reboot")
-        _ShutdownInstanceDisks(self, instance)
+        ShutdownInstanceDisks(self, instance)
       else:
         self.LogInfo("Instance %s was already stopped, starting now",
                      instance.name)
-      _StartInstanceDisks(self, instance, ignore_secondaries)
+      StartInstanceDisks(self, instance, ignore_secondaries)
       result = self.rpc.call_instance_start(node_current,
                                             (instance, None, None), False,
                                             reason)
       msg = result.fail_msg
       if msg:
-        _ShutdownInstanceDisks(self, instance)
+        ShutdownInstanceDisks(self, instance)
         raise errors.OpExecError("Could not start instance for"
                                  " full reboot: %s" % msg)
 
     self.cfg.MarkInstanceUp(instance.name)
 
 
-def _GetInstanceConsole(cluster, instance):
+def GetInstanceConsole(cluster, instance):
   """Returns console information for an instance.
 
   @type cluster: L{objects.Cluster}
@@ -462,7 +462,7 @@ class LUInstanceConsole(NoHooksLU):
   REQ_BGL = False
 
   def ExpandNames(self):
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
     self._ExpandAndLockInstance()
 
   def CheckPrereq(self):
@@ -474,7 +474,7 @@ class LUInstanceConsole(NoHooksLU):
     self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckNodeOnline(self, self.instance.primary_node)
+    CheckNodeOnline(self, self.instance.primary_node)
 
   def Exec(self, feedback_fn):
     """Connect to the console of an instance
@@ -499,4 +499,4 @@ class LUInstanceConsole(NoHooksLU):
 
     logging.debug("Connecting to console of %s on %s", instance.name, node)
 
-    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
+    return GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
index e664fc0..d8c5363 100644 (file)
@@ -30,24 +30,24 @@ from ganeti import constants
 from ganeti import locking
 from ganeti import qlang
 from ganeti import query
-from ganeti.cmdlib.base import _QueryBase, NoHooksLU
-from ganeti.cmdlib.common import _ShareAll, _GetWantedInstances, \
-  _CheckInstanceNodeGroups, _CheckInstancesNodeGroups, _AnnotateDiskParams
-from ganeti.cmdlib.instance_operation import _GetInstanceConsole
-from ganeti.cmdlib.instance_utils import _NICListToTuple
+from ganeti.cmdlib.base import QueryBase, NoHooksLU
+from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
+  CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
+from ganeti.cmdlib.instance_operation import GetInstanceConsole
+from ganeti.cmdlib.instance_utils import NICListToTuple
 
 import ganeti.masterd.instance
 
 
-class _InstanceQuery(_QueryBase):
+class InstanceQuery(QueryBase):
   FIELDS = query.INSTANCE_FIELDS
 
   def ExpandNames(self, lu):
     lu.needed_locks = {}
-    lu.share_locks = _ShareAll()
+    lu.share_locks = ShareAll()
 
     if self.names:
-      self.wanted = _GetWantedInstances(lu, self.names)
+      self.wanted = GetWantedInstances(lu, self.names)
     else:
       self.wanted = locking.ALL_SET
 
@@ -90,7 +90,7 @@ class _InstanceQuery(_QueryBase):
 
     # Check if node groups for locked instances are still correct
     for instance_name in owned_instances:
-      _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
+      CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
 
   def _GetQueryData(self, lu):
     """Computes the list of instances and their attributes.
@@ -155,7 +155,7 @@ class _InstanceQuery(_QueryBase):
       for inst in instance_list:
         if inst.name in live_data:
           # Instance is running
-          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
+          consinfo[inst.name] = GetInstanceConsole(cluster, inst)
         else:
           consinfo[inst.name] = None
       assert set(consinfo.keys()) == set(instance_names)
@@ -194,7 +194,7 @@ class LUInstanceQuery(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
+    self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
                              self.op.output_fields, self.op.use_locking)
 
   def ExpandNames(self):
@@ -223,13 +223,13 @@ class LUInstanceQueryData(NoHooksLU):
 
     if self.op.instances or not self.op.use_locking:
       # Expand instance names right here
-      self.wanted_names = _GetWantedInstances(self, self.op.instances)
+      self.wanted_names = GetWantedInstances(self, self.op.instances)
     else:
       # Will use acquired locks
       self.wanted_names = None
 
     if self.op.use_locking:
-      self.share_locks = _ShareAll()
+      self.share_locks = ShareAll()
 
       if self.wanted_names is None:
         self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
@@ -282,8 +282,8 @@ class LUInstanceQueryData(NoHooksLU):
     instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
 
     if self.op.use_locking:
-      _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
-                                None)
+      CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
+                               None)
     else:
       assert not (owned_instances or owned_groups or
                   owned_nodes or owned_networks)
@@ -317,7 +317,7 @@ class LUInstanceQueryData(NoHooksLU):
     """Compute block device status.
 
     """
-    (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
+    (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
 
     return self._ComputeDiskStatusInner(instance, snode, anno_dev)
 
@@ -413,7 +413,7 @@ class LUInstanceQueryData(NoHooksLU):
         "snodes_group_names": map(group2name_fn, snodes_group_uuids),
         "os": instance.os,
         # this happens to be the same format used for hooks
-        "nics": _NICListToTuple(self, instance.nics),
+        "nics": NICListToTuple(self, instance.nics),
         "disk_template": instance.disk_template,
         "disks": disks,
         "hypervisor": instance.hypervisor,
index bfdee08..6f0b089 100644 (file)
@@ -38,12 +38,12 @@ from ganeti import opcodes
 from ganeti import rpc
 from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
 from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
-  _AnnotateDiskParams, _CheckIAllocatorOrNode, _ExpandNodeName, \
-  _CheckNodeOnline, _CheckInstanceNodeGroups, _CheckInstanceState, \
-  _IsExclusiveStorageEnabledNode, _FindFaultyInstanceDisks
-from ganeti.cmdlib.instance_utils import _GetInstanceInfoText, \
-  _CopyLockList, _ReleaseLocks, _CheckNodeVmCapable, \
-  _BuildInstanceHookEnvByObject, _CheckNodeNotDrained, _CheckTargetNodeIPolicy
+  AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeName, \
+  CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
+  IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks
+from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
+  CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
+  BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
 
 import ganeti.masterd.instance
 
@@ -65,8 +65,8 @@ _DISK_TEMPLATE_DEVICE_TYPE = {
   }
 
 
-def _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
-                          excl_stor):
+def CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+                         excl_stor):
   """Create a single block device on a given node.
 
   This will not recurse over children of the device, so they must be
@@ -146,8 +146,8 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
     if not force_create:
       return created_devices
 
-    _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
-                          excl_stor)
+    CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+                         excl_stor)
     # The device has been completely created, so there is no point in keeping
     # its subdevices in the list. We just add the device itself instead.
     created_devices = [(node, device)]
@@ -160,7 +160,7 @@ def _CreateBlockDevInner(lu, node, instance, device, force_create,
     raise errors.DeviceCreationError(str(e), created_devices)
 
 
-def _IsExclusiveStorageEnabledNodeName(cfg, nodename):
+def IsExclusiveStorageEnabledNodeName(cfg, nodename):
   """Whether exclusive_storage is in effect for the given node.
 
   @type cfg: L{config.ConfigWriter}
@@ -176,23 +176,23 @@ def _IsExclusiveStorageEnabledNodeName(cfg, nodename):
   if ni is None:
     raise errors.OpPrereqError("Invalid node name %s" % nodename,
                                errors.ECODE_NOENT)
-  return _IsExclusiveStorageEnabledNode(cfg, ni)
+  return IsExclusiveStorageEnabledNode(cfg, ni)
 
 
-def _CreateBlockDev(lu, node, instance, device, force_create, info,
+def CreateBlockDev(lu, node, instance, device, force_create, info,
                     force_open):
   """Wrapper around L{_CreateBlockDevInner}.
 
   This method annotates the root device first.
 
   """
-  (disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
-  excl_stor = _IsExclusiveStorageEnabledNodeName(lu.cfg, node)
+  (disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
+  excl_stor = IsExclusiveStorageEnabledNodeName(lu.cfg, node)
   return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
                               force_open, excl_stor)
 
 
-def _CreateDisks(lu, instance, to_skip=None, target_node=None):
+def CreateDisks(lu, instance, to_skip=None, target_node=None):
   """Create all disks for an instance.
 
   This abstracts away some work from AddInstance.
@@ -209,7 +209,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
   @return: the success of the creation
 
   """
-  info = _GetInstanceInfoText(instance)
+  info = GetInstanceInfoText(instance)
   if target_node is None:
     pnode = instance.primary_node
     all_nodes = instance.all_nodes
@@ -235,7 +235,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     for node in all_nodes:
       f_create = node == pnode
       try:
-        _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+        CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
         disks_created.append((node, device))
       except errors.OpExecError:
         logging.warning("Creating disk %s for instance '%s' failed",
@@ -253,7 +253,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
         raise errors.OpExecError(e.message)
 
 
-def _ComputeDiskSizePerVG(disk_template, disks):
+def ComputeDiskSizePerVG(disk_template, disks):
   """Compute disk size requirements in the volume group
 
   """
@@ -285,7 +285,7 @@ def _ComputeDiskSizePerVG(disk_template, disks):
   return req_size_dict[disk_template]
 
 
-def _ComputeDisks(op, default_vg):
+def ComputeDisks(op, default_vg):
   """Computes the instance disks.
 
   @param op: The instance opcode
@@ -349,7 +349,7 @@ def _ComputeDisks(op, default_vg):
   return disks
 
 
-def _CheckRADOSFreeSpace():
+def CheckRADOSFreeSpace():
   """Compute disk size requirements inside the RADOS cluster.
 
   """
@@ -385,7 +385,7 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
   return drbd_dev
 
 
-def _GenerateDiskTemplate(
+def GenerateDiskTemplate(
   lu, template_name, instance_name, primary_node, secondary_nodes,
   disk_info, file_storage_dir, file_driver, base_index,
   feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
@@ -591,7 +591,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     # We don't want _CheckIAllocatorOrNode selecting the default iallocator
     # when neither iallocator nor nodes are specified
     if self.op.iallocator or self.op.nodes:
-      _CheckIAllocatorOrNode(self, "iallocator", "nodes")
+      CheckIAllocatorOrNode(self, "iallocator", "nodes")
 
     for (idx, params) in self.op.disks:
       utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
@@ -607,7 +607,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
 
     if self.op.nodes:
-      self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
+      self.op.nodes = [ExpandNodeName(self.cfg, n) for n in self.op.nodes]
       self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
     else:
       self.needed_locks[locking.LEVEL_NODE] = []
@@ -652,7 +652,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     elif level == locking.LEVEL_NODE_RES:
       # Copy node locks
       self.needed_locks[locking.LEVEL_NODE_RES] = \
-        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -660,7 +660,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     This runs on master, primary and secondary nodes of the instance.
 
     """
-    return _BuildInstanceHookEnvByObject(self, self.instance)
+    return BuildInstanceHookEnvByObject(self, self.instance)
 
   def BuildHooksNodes(self):
     """Build hooks nodes.
@@ -693,7 +693,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     else:
       primary_node = instance.primary_node
     if not self.op.iallocator:
-      _CheckNodeOnline(self, primary_node)
+      CheckNodeOnline(self, primary_node)
 
     if instance.disk_template == constants.DT_DISKLESS:
       raise errors.OpPrereqError("Instance '%s' has no disks" %
@@ -704,15 +704,15 @@ class LUInstanceRecreateDisks(LogicalUnit):
     if owned_groups:
       # Node group locks are acquired only for the primary node (and only
       # when the allocator is used)
-      _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups,
-                               primary_only=True)
+      CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups,
+                              primary_only=True)
 
     # if we replace nodes *and* the old primary is offline, we don't
     # check the instance state
     old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
     if not ((self.op.iallocator or self.op.nodes) and old_pnode.offline):
-      _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
-                          msg="cannot recreate disks")
+      CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
+                         msg="cannot recreate disks")
 
     if self.op.disks:
       self.disks = dict(self.op.disks)
@@ -735,9 +735,9 @@ class LUInstanceRecreateDisks(LogicalUnit):
     if self.op.iallocator:
       self._RunAllocator()
       # Release unneeded node and node resource locks
-      _ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes)
-      _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes)
-      _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
+      ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes)
+      ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes)
+      ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
 
     assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
 
@@ -800,7 +800,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
     # All touched nodes must be locked
     mylocks = self.owned_locks(locking.LEVEL_NODE)
     assert mylocks.issuperset(frozenset(instance.all_nodes))
-    _CreateDisks(self, instance, to_skip=to_skip)
+    CreateDisks(self, instance, to_skip=to_skip)
 
 
 def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
@@ -842,7 +842,7 @@ def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
                                  errors.ECODE_NORES)
 
 
-def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
+def CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
   """Checks if nodes have enough free disk space in all the VGs.
 
   This function checks if all given nodes have the needed amount of
@@ -895,7 +895,7 @@ def _CalcEta(time_taken, written, total_size):
   return (total_size - written) * avg_time
 
 
-def _WipeDisks(lu, instance, disks=None):
+def WipeDisks(lu, instance, disks=None):
   """Wipes instance disks.
 
   @type lu: L{LogicalUnit}
@@ -990,7 +990,7 @@ def _WipeDisks(lu, instance, disks=None):
                         " failed", idx, instance.name)
 
 
-def _ExpandCheckDisks(instance, disks):
+def ExpandCheckDisks(instance, disks):
   """Return the instance disks selected by the disks list
 
   @type disks: list of L{objects.Disk} or None
@@ -1008,14 +1008,14 @@ def _ExpandCheckDisks(instance, disks):
     return disks
 
 
-def _WaitForSync(lu, instance, disks=None, oneshot=False):
+def WaitForSync(lu, instance, disks=None, oneshot=False):
   """Sleep and poll for an instance's disk to sync.
 
   """
   if not instance.disks or disks is not None and not disks:
     return True
 
-  disks = _ExpandCheckDisks(instance, disks)
+  disks = ExpandCheckDisks(instance, disks)
 
   if not oneshot:
     lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
@@ -1084,7 +1084,7 @@ def _WaitForSync(lu, instance, disks=None, oneshot=False):
   return not cumul_degraded
 
 
-def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
+def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
   """Shutdown block devices of an instance.
 
   This does the shutdown on all nodes of the instance.
@@ -1094,7 +1094,7 @@ def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
 
   """
   all_result = True
-  disks = _ExpandCheckDisks(instance, disks)
+  disks = ExpandCheckDisks(instance, disks)
 
   for disk in disks:
     for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
@@ -1117,11 +1117,11 @@ def _SafeShutdownInstanceDisks(lu, instance, disks=None):
   _ShutdownInstanceDisks.
 
   """
-  _CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
-  _ShutdownInstanceDisks(lu, instance, disks=disks)
+  CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
+  ShutdownInstanceDisks(lu, instance, disks=disks)
 
 
-def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
+def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
                            ignore_size=False):
   """Prepare the block devices for an instance.
 
@@ -1148,7 +1148,7 @@ def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
   device_info = []
   disks_ok = True
   iname = instance.name
-  disks = _ExpandCheckDisks(instance, disks)
+  disks = ExpandCheckDisks(instance, disks)
 
   # With the two passes mechanism we try to reduce the window of
   # opportunity for the race condition of switching DRBD to primary
@@ -1213,14 +1213,14 @@ def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
   return disks_ok, device_info
 
 
-def _StartInstanceDisks(lu, instance, force):
+def StartInstanceDisks(lu, instance, force):
   """Start the disks of an instance.
 
   """
-  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
-                                       ignore_secondaries=force)
+  disks_ok, _ = AssembleInstanceDisks(lu, instance,
+                                      ignore_secondaries=force)
   if not disks_ok:
-    _ShutdownInstanceDisks(lu, instance)
+    ShutdownInstanceDisks(lu, instance)
     if force is not None and not force:
       lu.LogWarning("",
                     hint=("If the message above refers to a secondary node,"
@@ -1249,7 +1249,7 @@ class LUInstanceGrowDisk(LogicalUnit):
     elif level == locking.LEVEL_NODE_RES:
       # Copy node locks
       self.needed_locks[locking.LEVEL_NODE_RES] = \
-        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
 
   def BuildHooksEnv(self):
     """Build hooks env.
@@ -1262,7 +1262,7 @@ class LUInstanceGrowDisk(LogicalUnit):
       "AMOUNT": self.op.amount,
       "ABSOLUTE": self.op.absolute,
       }
-    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+    env.update(BuildInstanceHookEnvByObject(self, self.instance))
     return env
 
   def BuildHooksNodes(self):
@@ -1283,7 +1283,7 @@ class LUInstanceGrowDisk(LogicalUnit):
       "Cannot retrieve locked instance %s" % self.op.instance_name
     nodenames = list(instance.all_nodes)
     for node in nodenames:
-      _CheckNodeOnline(self, node)
+      CheckNodeOnline(self, node)
 
     self.instance = instance
 
@@ -1318,14 +1318,14 @@ class LUInstanceGrowDisk(LogicalUnit):
       # TODO: check the free disk space for file, when that feature will be
       # supported
       nodes = map(self.cfg.GetNodeInfo, nodenames)
-      es_nodes = filter(lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n),
+      es_nodes = filter(lambda n: IsExclusiveStorageEnabledNode(self.cfg, n),
                         nodes)
       if es_nodes:
         # With exclusive storage we need to something smarter than just looking
         # at free space; for now, let's simply abort the operation.
         raise errors.OpPrereqError("Cannot grow disks when exclusive_storage"
                                    " is enabled", errors.ECODE_STATE)
-      _CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
+      CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
 
   def Exec(self, feedback_fn):
     """Execute disk grow.
@@ -1340,7 +1340,7 @@ class LUInstanceGrowDisk(LogicalUnit):
 
     wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
 
-    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
+    disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[disk])
     if not disks_ok:
       raise errors.OpExecError("Cannot activate block device to grow")
 
@@ -1395,7 +1395,7 @@ class LUInstanceGrowDisk(LogicalUnit):
     self.cfg.Update(instance, feedback_fn)
 
     # Changes have been recorded, release node lock
-    _ReleaseLocks(self, locking.LEVEL_NODE)
+    ReleaseLocks(self, locking.LEVEL_NODE)
 
     # Downgrade lock while waiting for sync
     self.glm.downgrade(locking.LEVEL_INSTANCE)
@@ -1406,11 +1406,11 @@ class LUInstanceGrowDisk(LogicalUnit):
       assert instance.disks[self.op.disk] == disk
 
       # Wipe newly added disk space
-      _WipeDisks(self, instance,
-                 disks=[(self.op.disk, disk, old_disk_size)])
+      WipeDisks(self, instance,
+                disks=[(self.op.disk, disk, old_disk_size)])
 
     if self.op.wait_for_sync:
-      disk_abort = not _WaitForSync(self, instance, disks=[disk])
+      disk_abort = not WaitForSync(self, instance, disks=[disk])
       if disk_abort:
         self.LogWarning("Disk syncing has not returned a good status; check"
                         " the instance")
@@ -1445,7 +1445,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
                                    " iallocator script must be used or the"
                                    " new node given", errors.ECODE_INVAL)
       else:
-        _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
+        CheckIAllocatorOrNode(self, "iallocator", "remote_node")
 
     elif remote_node is not None or ialloc is not None:
       # Not replacing the secondary
@@ -1464,7 +1464,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
       "Conflicting options"
 
     if self.op.remote_node is not None:
-      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+      self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
 
       # Warning: do not remove the locking of the new secondary here
       # unless DRBD8.AddChildren is changed to work in parallel;
@@ -1535,7 +1535,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
       "NEW_SECONDARY": self.op.remote_node,
       "OLD_SECONDARY": instance.secondary_nodes[0],
       }
-    env.update(_BuildInstanceHookEnvByObject(self, instance))
+    env.update(BuildInstanceHookEnvByObject(self, instance))
     return env
 
   def BuildHooksNodes(self):
@@ -1561,7 +1561,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
     # Verify if node group locks are still correct
     owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
     if owned_groups:
-      _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
+      CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
 
     return LogicalUnit.CheckPrereq(self)
 
@@ -1590,20 +1590,20 @@ class LUInstanceActivateDisks(NoHooksLU):
     self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
-    _CheckNodeOnline(self, self.instance.primary_node)
+    CheckNodeOnline(self, self.instance.primary_node)
 
   def Exec(self, feedback_fn):
     """Activate the disks.
 
     """
     disks_ok, disks_info = \
-              _AssembleInstanceDisks(self, self.instance,
-                                     ignore_size=self.op.ignore_size)
+              AssembleInstanceDisks(self, self.instance,
+                                    ignore_size=self.op.ignore_size)
     if not disks_ok:
       raise errors.OpExecError("Cannot activate block devices")
 
     if self.op.wait_for_sync:
-      if not _WaitForSync(self, self.instance):
+      if not WaitForSync(self, self.instance):
         raise errors.OpExecError("Some disks of the instance are degraded!")
 
     return disks_info
@@ -1640,7 +1640,7 @@ class LUInstanceDeactivateDisks(NoHooksLU):
     """
     instance = self.instance
     if self.op.force:
-      _ShutdownInstanceDisks(self, instance)
+      ShutdownInstanceDisks(self, instance)
     else:
       _SafeShutdownInstanceDisks(self, instance)
 
@@ -1683,11 +1683,11 @@ def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
   return result
 
 
-def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
+def CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
   """Wrapper around L{_CheckDiskConsistencyInner}.
 
   """
-  (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
+  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
   return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary,
                                     ldisk=ldisk)
 
@@ -1702,7 +1702,7 @@ def _BlockdevFind(lu, node, dev, instance):
   @returns The result of the rpc call
 
   """
-  (disk,) = _AnnotateDiskParams(instance, [dev], lu.cfg)
+  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
   return lu.rpc.call_blockdev_find(node, disk)
 
 
@@ -1773,11 +1773,11 @@ class TLReplaceDisks(Tasklet):
     return remote_node_name
 
   def _FindFaultyDisks(self, node_name):
-    """Wrapper for L{_FindFaultyInstanceDisks}.
+    """Wrapper for L{FindFaultyInstanceDisks}.
 
     """
-    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
-                                    node_name, True)
+    return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
+                                   node_name, True)
 
   def _CheckDisksActivated(self, instance):
     """Checks if the instance disks are activated.
@@ -1901,8 +1901,8 @@ class TLReplaceDisks(Tasklet):
         self.target_node = secondary_node
         check_nodes = [self.new_node, self.other_node]
 
-        _CheckNodeNotDrained(self.lu, remote_node)
-        _CheckNodeVmCapable(self.lu, remote_node)
+        CheckNodeNotDrained(self.lu, remote_node)
+        CheckNodeVmCapable(self.lu, remote_node)
 
         old_node_info = self.cfg.GetNodeInfo(secondary_node)
         assert old_node_info is not None
@@ -1928,11 +1928,11 @@ class TLReplaceDisks(Tasklet):
       cluster = self.cfg.GetClusterInfo()
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               new_group_info)
-      _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
-                              self.cfg, ignore=self.ignore_ipolicy)
+      CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
+                             self.cfg, ignore=self.ignore_ipolicy)
 
     for node in check_nodes:
-      _CheckNodeOnline(self.lu, node)
+      CheckNodeOnline(self.lu, node)
 
     touched_nodes = frozenset(node_name for node_name in [self.new_node,
                                                           self.other_node,
@@ -1940,12 +1940,12 @@ class TLReplaceDisks(Tasklet):
                               if node_name is not None)
 
     # Release unneeded node and node resource locks
-    _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
-    _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
-    _ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
+    ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
+    ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
+    ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
 
     # Release any owned node group
-    _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
+    ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
 
     # Check whether disks are valid
     for disk_idx in self.disks:
@@ -1993,7 +1993,7 @@ class TLReplaceDisks(Tasklet):
 
     # Activate the instance disks if we're replacing them on a down instance
     if activate_disks:
-      _StartInstanceDisks(self.lu, self.instance, True)
+      StartInstanceDisks(self.lu, self.instance, True)
 
     try:
       # Should we replace the secondary node?
@@ -2066,8 +2066,8 @@ class TLReplaceDisks(Tasklet):
       self.lu.LogInfo("Checking disk/%d consistency on node %s" %
                       (idx, node_name))
 
-      if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
-                                   on_primary, ldisk=ldisk):
+      if not CheckDiskConsistency(self.lu, self.instance, dev, node_name,
+                                  on_primary, ldisk=ldisk):
         raise errors.OpExecError("Node %s has degraded storage, unsafe to"
                                  " replace disks for instance %s" %
                                  (node_name, self.instance.name))
@@ -2081,7 +2081,7 @@ class TLReplaceDisks(Tasklet):
     """
     iv_names = {}
 
-    disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
     for idx, dev in enumerate(disks):
       if idx not in self.disks:
         continue
@@ -2107,12 +2107,12 @@ class TLReplaceDisks(Tasklet):
       new_lvs = [lv_data, lv_meta]
       old_lvs = [child.Copy() for child in dev.children]
       iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
-      excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
+      excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
 
       # we pass force_create=True to force the LVM creation
       for new_lv in new_lvs:
         _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
-                             _GetInstanceInfoText(self.instance), False,
+                             GetInstanceInfoText(self.instance), False,
                              excl_stor)
 
     return iv_names
@@ -2261,14 +2261,14 @@ class TLReplaceDisks(Tasklet):
       self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
       self._RemoveOldStorage(self.target_node, iv_names)
       # TODO: Check if releasing locks early still makes sense
-      _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
+      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
     else:
       # Release all resource locks except those used by the instance
-      _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
-                    keep=self.node_secondary_ip.keys())
+      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
+                   keep=self.node_secondary_ip.keys())
 
     # Release all node locks while waiting for sync
-    _ReleaseLocks(self.lu, locking.LEVEL_NODE)
+    ReleaseLocks(self.lu, locking.LEVEL_NODE)
 
     # TODO: Can the instance lock be downgraded here? Take the optional disk
     # shutdown in the caller into consideration.
@@ -2277,7 +2277,7 @@ class TLReplaceDisks(Tasklet):
     # This can fail as the old devices are degraded and _WaitForSync
     # does a combined result over all disks, so we don't check its return value
     self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
-    _WaitForSync(self.lu, self.instance)
+    WaitForSync(self.lu, self.instance)
 
     # Check all devices manually
     self._CheckDevices(self.instance.primary_node, iv_names)
@@ -2321,15 +2321,15 @@ class TLReplaceDisks(Tasklet):
 
     # Step: create new storage
     self.lu.LogStep(3, steps_total, "Allocate new storage")
-    disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
-    excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node)
+    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+    excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node)
     for idx, dev in enumerate(disks):
       self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
                       (self.new_node, idx))
       # we pass force_create=True to force LVM creation
       for new_lv in dev.children:
         _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
-                             True, _GetInstanceInfoText(self.instance), False,
+                             True, GetInstanceInfoText(self.instance), False,
                              excl_stor)
 
     # Step 4: dbrd minors and drbd setups changes
@@ -2369,13 +2369,13 @@ class TLReplaceDisks(Tasklet):
                               children=dev.children,
                               size=dev.size,
                               params={})
-      (anno_new_drbd,) = _AnnotateDiskParams(self.instance, [new_drbd],
-                                             self.cfg)
+      (anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
+                                            self.cfg)
       try:
-        _CreateSingleBlockDev(self.lu, self.new_node, self.instance,
-                              anno_new_drbd,
-                              _GetInstanceInfoText(self.instance), False,
-                              excl_stor)
+        CreateSingleBlockDev(self.lu, self.new_node, self.instance,
+                             anno_new_drbd,
+                             GetInstanceInfoText(self.instance), False,
+                             excl_stor)
       except errors.GenericError:
         self.cfg.ReleaseDRBDMinors(self.instance.name)
         raise
@@ -2413,7 +2413,7 @@ class TLReplaceDisks(Tasklet):
     self.cfg.Update(self.instance, feedback_fn)
 
     # Release all node locks (the configuration has been updated)
-    _ReleaseLocks(self.lu, locking.LEVEL_NODE)
+    ReleaseLocks(self.lu, locking.LEVEL_NODE)
 
     # and now perform the drbd attach
     self.lu.LogInfo("Attaching primary drbds to new secondary"
@@ -2438,11 +2438,11 @@ class TLReplaceDisks(Tasklet):
       self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
       self._RemoveOldStorage(self.target_node, iv_names)
       # TODO: Check if releasing locks early still makes sense
-      _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
+      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
     else:
       # Release all resource locks except those used by the instance
-      _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
-                    keep=self.node_secondary_ip.keys())
+      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
+                   keep=self.node_secondary_ip.keys())
 
     # TODO: Can the instance lock be downgraded here? Take the optional disk
     # shutdown in the caller into consideration.
@@ -2451,7 +2451,7 @@ class TLReplaceDisks(Tasklet):
     # This can fail as the old devices are degraded and _WaitForSync
     # does a combined result over all disks, so we don't check its return value
     self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
-    _WaitForSync(self.lu, self.instance)
+    WaitForSync(self.lu, self.instance)
 
     # Check all devices manually
     self._CheckDevices(self.instance.primary_node, iv_names)
index fd8cd26..f6efa92 100644 (file)
@@ -31,13 +31,13 @@ from ganeti import network
 from ganeti import objects
 from ganeti import pathutils
 from ganeti import utils
-from ganeti.cmdlib.common import _AnnotateDiskParams, \
-  _ComputeIPolicyInstanceViolation
+from ganeti.cmdlib.common import AnnotateDiskParams, \
+  ComputeIPolicyInstanceViolation
 
 
-def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
-                          minmem, maxmem, vcpus, nics, disk_template, disks,
-                          bep, hvp, hypervisor_name, tags):
+def BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
+                         minmem, maxmem, vcpus, nics, disk_template, disks,
+                         bep, hvp, hypervisor_name, tags):
   """Builds instance related env variables for hooks
 
   This builds the hook environment from individual variables.
@@ -140,7 +140,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   return env
 
 
-def _BuildInstanceHookEnvByObject(lu, instance, override=None):
+def BuildInstanceHookEnvByObject(lu, instance, override=None):
   """Builds instance related env variables for hooks from an object.
 
   @type lu: L{LogicalUnit}
@@ -167,7 +167,7 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
     "maxmem": bep[constants.BE_MAXMEM],
     "minmem": bep[constants.BE_MINMEM],
     "vcpus": bep[constants.BE_VCPUS],
-    "nics": _NICListToTuple(lu, instance.nics),
+    "nics": NICListToTuple(lu, instance.nics),
     "disk_template": instance.disk_template,
     "disks": [(disk.name, disk.size, disk.mode)
               for disk in instance.disks],
@@ -178,10 +178,10 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
   }
   if override:
     args.update(override)
-  return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
+  return BuildInstanceHookEnv(**args) # pylint: disable=W0142
 
 
-def _GetClusterDomainSecret():
+def GetClusterDomainSecret():
   """Reads the cluster domain secret.
 
   """
@@ -189,7 +189,7 @@ def _GetClusterDomainSecret():
                                strict=True)
 
 
-def _CheckNodeNotDrained(lu, node):
+def CheckNodeNotDrained(lu, node):
   """Ensure that a given node is not drained.
 
   @param lu: the LU on behalf of which we make the check
@@ -202,7 +202,7 @@ def _CheckNodeNotDrained(lu, node):
                                errors.ECODE_STATE)
 
 
-def _CheckNodeVmCapable(lu, node):
+def CheckNodeVmCapable(lu, node):
   """Ensure that a given node is vm capable.
 
   @param lu: the LU on behalf of which we make the check
@@ -215,13 +215,13 @@ def _CheckNodeVmCapable(lu, node):
                                errors.ECODE_STATE)
 
 
-def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
+def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
   """Utility function to remove an instance.
 
   """
   logging.info("Removing block devices for instance %s", instance.name)
 
-  if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
+  if not RemoveDisks(lu, instance, ignore_failures=ignore_failures):
     if not ignore_failures:
       raise errors.OpExecError("Can't remove instance's disks")
     feedback_fn("Warning: can't remove instance's disks")
@@ -237,7 +237,7 @@ def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
   lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
 
 
-def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
+def RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
   """Remove all disks for an instance.
 
   This abstracts away some work from `AddInstance()` and
@@ -258,7 +258,7 @@ def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
 
   all_result = True
   ports_to_release = set()
-  anno_disks = _AnnotateDiskParams(instance, instance.disks, lu.cfg)
+  anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
   for (idx, device) in enumerate(anno_disks):
     if target_node:
       edata = [(target_node, device)]
@@ -296,7 +296,7 @@ def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
   return all_result
 
 
-def _NICToTuple(lu, nic):
+def NICToTuple(lu, nic):
   """Build a tupple of nic information.
 
   @type lu:  L{LogicalUnit}
@@ -316,7 +316,7 @@ def _NICToTuple(lu, nic):
   return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, nic.network, netinfo)
 
 
-def _NICListToTuple(lu, nics):
+def NICListToTuple(lu, nics):
   """Build a list of nic information tuples.
 
   This list is suitable to be passed to _BuildInstanceHookEnv or as a return
@@ -330,11 +330,11 @@ def _NICListToTuple(lu, nics):
   """
   hooks_nics = []
   for nic in nics:
-    hooks_nics.append(_NICToTuple(lu, nic))
+    hooks_nics.append(NICToTuple(lu, nic))
   return hooks_nics
 
 
-def _CopyLockList(names):
+def CopyLockList(names):
   """Makes a copy of a list of lock names.
 
   Handles L{locking.ALL_SET} correctly.
@@ -346,7 +346,7 @@ def _CopyLockList(names):
     return names[:]
 
 
-def _ReleaseLocks(lu, level, names=None, keep=None):
+def ReleaseLocks(lu, level, names=None, keep=None):
   """Releases locks owned by an LU.
 
   @type lu: L{LogicalUnit}
@@ -398,7 +398,7 @@ def _ReleaseLocks(lu, level, names=None, keep=None):
 
 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
                                  target_group, cfg,
-                                 _compute_fn=_ComputeIPolicyInstanceViolation):
+                                 _compute_fn=ComputeIPolicyInstanceViolation):
   """Compute if instance meets the specs of the new target group.
 
   @param ipolicy: The ipolicy to verify
@@ -408,7 +408,7 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   @type cfg: L{config.ConfigWriter}
   @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
-  @see: L{ganeti.cmdlib.common._ComputeIPolicySpecViolation}
+  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
 
   """
   if current_group == target_group:
@@ -417,8 +417,8 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
     return _compute_fn(ipolicy, instance, cfg)
 
 
-def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
-                            _compute_fn=_ComputeIPolicyNodeViolation):
+def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
+                           _compute_fn=_ComputeIPolicyNodeViolation):
   """Checks that the target node is correct in terms of instance policy.
 
   @param ipolicy: The ipolicy to verify
@@ -428,7 +428,7 @@ def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
   @param cfg: Cluster configuration
   @param ignore: Ignore violations of the ipolicy
   @param _compute_fn: The function to verify ipolicy (unittest only)
-  @see: L{ganeti.cmdlib.common._ComputeIPolicySpecViolation}
+  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
 
   """
   primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
@@ -443,14 +443,14 @@ def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
       raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
-def _GetInstanceInfoText(instance):
+def GetInstanceInfoText(instance):
   """Compute that text that should be added to the disk's metadata.
 
   """
   return "originstname+%s" % instance.name
 
 
-def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
+def CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
   """Checks if a node has enough free memory.
 
   This function checks if a given node has the needed amount of free
@@ -492,16 +492,16 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
   return free_mem
 
 
-def _CheckInstanceBridgesExist(lu, instance, node=None):
+def CheckInstanceBridgesExist(lu, instance, node=None):
   """Check that the brigdes needed by an instance exist.
 
   """
   if node is None:
     node = instance.primary_node
-  _CheckNicsBridgesExist(lu, instance.nics, node)
+  CheckNicsBridgesExist(lu, instance.nics, node)
 
 
-def _CheckNicsBridgesExist(lu, target_nics, target_node):
+def CheckNicsBridgesExist(lu, target_nics, target_node):
   """Check that the brigdes needed by a list of nics exist.
 
   """
@@ -515,7 +515,7 @@ def _CheckNicsBridgesExist(lu, target_nics, target_node):
                  target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
 
 
-def _CheckNodeHasOS(lu, node, os_name, force_variant):
+def CheckNodeHasOS(lu, node, os_name, force_variant):
   """Ensure that a node supports a given OS.
 
   @param lu: the LU on behalf of which we make the check
index 12a950b..893b3f2 100644 (file)
@@ -31,8 +31,8 @@ from ganeti import locking
 from ganeti import qlang
 from ganeti import query
 from ganeti import utils
-from ganeti.cmdlib.base import NoHooksLU, _QueryBase
-from ganeti.cmdlib.common import _GetWantedNodes, _SupportsOob
+from ganeti.cmdlib.base import NoHooksLU, QueryBase
+from ganeti.cmdlib.common import GetWantedNodes, SupportsOob
 
 
 class LUOobCommand(NoHooksLU):
@@ -47,7 +47,7 @@ class LUOobCommand(NoHooksLU):
 
     """
     if self.op.node_names:
-      self.op.node_names = _GetWantedNodes(self, self.op.node_names)
+      self.op.node_names = GetWantedNodes(self, self.op.node_names)
       lock_names = self.op.node_names
     else:
       lock_names = locking.ALL_SET
@@ -81,7 +81,7 @@ class LUOobCommand(NoHooksLU):
       if (self.op.command in self._SKIP_MASTER and
           self.master_node in self.op.node_names):
         master_node_obj = self.cfg.GetNodeInfo(self.master_node)
-        master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
+        master_oob_handler = SupportsOob(self.cfg, master_node_obj)
 
         if master_oob_handler:
           additional_text = ("run '%s %s %s' if you want to operate on the"
@@ -128,7 +128,7 @@ class LUOobCommand(NoHooksLU):
       node_entry = [(constants.RS_NORMAL, node.name)]
       ret.append(node_entry)
 
-      oob_program = _SupportsOob(self.cfg, node)
+      oob_program = SupportsOob(self.cfg, node)
 
       if not oob_program:
         node_entry.append((constants.RS_UNAVAIL, None))
@@ -221,7 +221,7 @@ class LUOobCommand(NoHooksLU):
                                utils.CommaJoin(errs))
 
 
-class _ExtStorageQuery(_QueryBase):
+class ExtStorageQuery(QueryBase):
   FIELDS = query.EXTSTORAGE_FIELDS
 
   def ExpandNames(self, lu):
@@ -364,7 +364,7 @@ class LUExtStorageDiagnose(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
+    self.eq = ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
                                self.op.output_fields, False)
 
   def ExpandNames(self):
@@ -382,7 +382,7 @@ class LURestrictedCommand(NoHooksLU):
 
   def ExpandNames(self):
     if self.op.nodes:
-      self.op.nodes = _GetWantedNodes(self, self.op.nodes)
+      self.op.nodes = GetWantedNodes(self, self.op.nodes)
 
     self.needed_locks = {
       locking.LEVEL_NODE: self.op.nodes,
index 0488ceb..2f2dcd7 100644 (file)
@@ -29,8 +29,8 @@ from ganeti import objects
 from ganeti import qlang
 from ganeti import query
 from ganeti import utils
-from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, _QueryBase
-from ganeti.cmdlib.common import _ShareAll, _CheckNodeGroupInstances
+from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase
+from ganeti.cmdlib.common import ShareAll, CheckNodeGroupInstances
 
 
 def _BuildNetworkHookEnv(name, subnet, gateway, network6, gateway6,
@@ -399,12 +399,12 @@ class LUNetworkSetParams(LogicalUnit):
     self.cfg.Update(self.network, feedback_fn)
 
 
-class _NetworkQuery(_QueryBase):
+class NetworkQuery(QueryBase):
   FIELDS = query.NETWORK_FIELDS
 
   def ExpandNames(self, lu):
     lu.needed_locks = {}
-    lu.share_locks = _ShareAll()
+    lu.share_locks = ShareAll()
 
     self.do_locking = self.use_locking
 
@@ -509,7 +509,7 @@ class LUNetworkQuery(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.nq = _NetworkQuery(qlang.MakeSimpleFilter("name", self.op.names),
+    self.nq = NetworkQuery(qlang.MakeSimpleFilter("name", self.op.names),
                             self.op.output_fields, self.op.use_locking)
 
   def ExpandNames(self):
@@ -619,7 +619,7 @@ class LUNetworkConnect(LogicalUnit):
     # Check if locked instances are still correct
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
     if self.op.conflicts_check:
-      _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+      CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     self.netparams = {
       constants.NIC_MODE: self.network_mode,
@@ -697,7 +697,7 @@ class LUNetworkDisconnect(LogicalUnit):
 
     # Check if locked instances are still correct
     owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
-    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
+    CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
 
     self.group = self.cfg.GetNodeGroup(self.group_uuid)
     self.connected = True
index b186059..adbafb6 100644 (file)
@@ -36,16 +36,16 @@ from ganeti import rpc
 from ganeti import utils
 from ganeti.masterd import iallocator
 
-from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, _QueryBase, \
+from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
   ResultWithJobs
-from ganeti.cmdlib.common import _CheckParamsNotGlobal, \
-  _MergeAndVerifyHvState, _MergeAndVerifyDiskState, \
-  _IsExclusiveStorageEnabledNode, _CheckNodePVs, \
-  _RedistributeAncillaryFiles, _ExpandNodeName, _ShareAll, _SupportsOob, \
-  _CheckInstanceState, INSTANCE_DOWN, _GetUpdatedParams, \
-  _AdjustCandidatePool, _CheckIAllocatorOrNode, _LoadNodeEvacResult, \
-  _GetWantedNodes, _MapInstanceDisksToNodes, _RunPostHook, \
-  _FindFaultyInstanceDisks
+from ganeti.cmdlib.common import CheckParamsNotGlobal, \
+  MergeAndVerifyHvState, MergeAndVerifyDiskState, \
+  IsExclusiveStorageEnabledNode, CheckNodePVs, \
+  RedistributeAncillaryFiles, ExpandNodeName, ShareAll, SupportsOob, \
+  CheckInstanceState, INSTANCE_DOWN, GetUpdatedParams, \
+  AdjustCandidatePool, CheckIAllocatorOrNode, LoadNodeEvacResult, \
+  GetWantedNodes, MapInstanceDisksToNodes, RunPostHook, \
+  FindFaultyInstanceDisks
 
 
 def _DecideSelfPromotion(lu, exceptions=None):
@@ -262,14 +262,14 @@ class LUNodeAdd(LogicalUnit):
 
     if self.op.ndparams:
       utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
-      _CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
-                            "node", "cluster or group")
+      CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
+                           "node", "cluster or group")
 
     if self.op.hv_state:
-      self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
+      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
 
     if self.op.disk_state:
-      self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
+      self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
 
     # TODO: If we need to have multiple DnsOnlyRunner we probably should make
     #       it a property on the base class.
@@ -288,10 +288,10 @@ class LUNodeAdd(LogicalUnit):
     vg_name = cfg.GetVGName()
     if vg_name is not None:
       vparams = {constants.NV_PVLIST: [vg_name]}
-      excl_stor = _IsExclusiveStorageEnabledNode(cfg, self.new_node)
+      excl_stor = IsExclusiveStorageEnabledNode(cfg, self.new_node)
       cname = self.cfg.GetClusterName()
       result = rpcrunner.call_node_verify_light([node], vparams, cname)[node]
-      (errmsgs, _) = _CheckNodePVs(result.payload, excl_stor)
+      (errmsgs, _) = CheckNodePVs(result.payload, excl_stor)
       if errmsgs:
         raise errors.OpPrereqError("Checks on node PVs failed: %s" %
                                    "; ".join(errmsgs), errors.ECODE_ENVIRON)
@@ -372,7 +372,7 @@ class LUNodeAdd(LogicalUnit):
         raise errors.OpExecError("ssh/hostname verification failed")
 
     if self.op.readd:
-      _RedistributeAncillaryFiles(self)
+      RedistributeAncillaryFiles(self)
       self.context.ReaddNode(new_node)
       # make sure we redistribute the config
       self.cfg.Update(new_node, feedback_fn)
@@ -384,8 +384,8 @@ class LUNodeAdd(LogicalUnit):
           self.LogWarning("Node failed to demote itself from master"
                           " candidate status: %s" % msg)
     else:
-      _RedistributeAncillaryFiles(self, additional_nodes=[node],
-                                  additional_vm=self.op.vm_capable)
+      RedistributeAncillaryFiles(self, additional_nodes=[node],
+                                 additional_vm=self.op.vm_capable)
       self.context.AddNode(new_node, self.proc.GetECId())
 
 
@@ -412,7 +412,7 @@ class LUNodeSetParams(LogicalUnit):
   _FLAGS = ["master_candidate", "drained", "offline"]
 
   def CheckArguments(self):
-    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
     all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
                 self.op.master_capable, self.op.vm_capable,
                 self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
@@ -467,7 +467,7 @@ class LUNodeSetParams(LogicalUnit):
 
     # Get all locks except nodes in shared mode; they are not used for anything
     # but read-only access
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
     self.share_locks[locking.LEVEL_NODE] = 0
     self.share_locks[locking.LEVEL_NODE_RES] = 0
     self.share_locks[locking.LEVEL_NODE_ALLOC] = 0
@@ -573,7 +573,7 @@ class LUNodeSetParams(LogicalUnit):
     # away from the respective state, as only real changes are kept
 
     # TODO: We might query the real power state if it supports OOB
-    if _SupportsOob(self.cfg, node):
+    if SupportsOob(self.cfg, node):
       if self.op.offline is False and not (node.powered or
                                            self.op.powered is True):
         raise errors.OpPrereqError(("Node %s needs to be turned on before its"
@@ -670,8 +670,8 @@ class LUNodeSetParams(LogicalUnit):
         # On online nodes, check that no instances are running, and that
         # the node has the new ip and we can reach it.
         for instance in affected_instances.values():
-          _CheckInstanceState(self, instance, INSTANCE_DOWN,
-                              msg="cannot change secondary ip")
+          CheckInstanceState(self, instance, INSTANCE_DOWN,
+                             msg="cannot change secondary ip")
 
         _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
         if master.name != node.name:
@@ -684,20 +684,20 @@ class LUNodeSetParams(LogicalUnit):
                                        errors.ECODE_ENVIRON)
 
     if self.op.ndparams:
-      new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
+      new_ndparams = GetUpdatedParams(self.node.ndparams, self.op.ndparams)
       utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
-      _CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
-                            "node", "cluster or group")
+      CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
+                           "node", "cluster or group")
       self.new_ndparams = new_ndparams
 
     if self.op.hv_state:
-      self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
-                                                 self.node.hv_state_static)
+      self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
+                                                self.node.hv_state_static)
 
     if self.op.disk_state:
       self.new_disk_state = \
-        _MergeAndVerifyDiskState(self.op.disk_state,
-                                 self.node.disk_state_static)
+        MergeAndVerifyDiskState(self.op.disk_state,
+                                self.node.disk_state_static)
 
   def Exec(self, feedback_fn):
     """Modifies a node.
@@ -742,7 +742,7 @@ class LUNodeSetParams(LogicalUnit):
 
       # we locked all nodes, we adjust the CP before updating this node
       if self.lock_all:
-        _AdjustCandidatePool(self, [node.name])
+        AdjustCandidatePool(self, [node.name])
 
     if self.op.secondary_ip:
       node.secondary_ip = self.op.secondary_ip
@@ -766,7 +766,7 @@ class LUNodePowercycle(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
     if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
       raise errors.OpPrereqError("The node is the master and the force"
                                  " parameter was not set",
@@ -835,13 +835,13 @@ class LUNodeEvacuate(NoHooksLU):
           constants.IALLOCATOR_NEVAC_MODES)
 
   def CheckArguments(self):
-    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
+    CheckIAllocatorOrNode(self, "iallocator", "remote_node")
 
   def ExpandNames(self):
-    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
 
     if self.op.remote_node is not None:
-      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+      self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
       assert self.op.remote_node
 
       if self.op.remote_node == self.op.node_name:
@@ -854,7 +854,7 @@ class LUNodeEvacuate(NoHooksLU):
                                    errors.ECODE_INVAL)
 
     # Declare locks
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
     self.needed_locks = {
       locking.LEVEL_INSTANCE: [],
       locking.LEVEL_NODEGROUP: [],
@@ -1000,7 +1000,7 @@ class LUNodeEvacuate(NoHooksLU):
                                    (self.op.iallocator, ial.info),
                                    errors.ECODE_NORES)
 
-      jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
+      jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
 
     elif self.op.remote_node is not None:
       assert self.op.mode == constants.NODE_EVAC_SEC
@@ -1030,9 +1030,9 @@ class LUNodeMigrate(LogicalUnit):
     pass
 
   def ExpandNames(self):
-    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
 
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
     self.needed_locks = {
       locking.LEVEL_NODE: [self.op.node_name],
       }
@@ -1101,7 +1101,7 @@ class LUNodeModifyStorage(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
 
     storage_type = self.op.storage_type
 
@@ -1136,15 +1136,15 @@ class LUNodeModifyStorage(NoHooksLU):
                  (self.op.name, self.op.node_name))
 
 
-class _NodeQuery(_QueryBase):
+class NodeQuery(QueryBase):
   FIELDS = query.NODE_FIELDS
 
   def ExpandNames(self, lu):
     lu.needed_locks = {}
-    lu.share_locks = _ShareAll()
+    lu.share_locks = ShareAll()
 
     if self.names:
-      self.wanted = _GetWantedNodes(lu, self.names)
+      self.wanted = GetWantedNodes(lu, self.names)
     else:
       self.wanted = locking.ALL_SET
 
@@ -1198,7 +1198,7 @@ class _NodeQuery(_QueryBase):
       node_to_secondary = None
 
     if query.NQ_OOB in self.requested_data:
-      oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
+      oob_support = dict((name, bool(SupportsOob(lu.cfg, node)))
                          for name, node in all_info.iteritems())
     else:
       oob_support = None
@@ -1222,7 +1222,7 @@ class LUNodeQuery(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
+    self.nq = NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
                          self.op.output_fields, self.op.use_locking)
 
   def ExpandNames(self):
@@ -1268,11 +1268,11 @@ class LUNodeQueryvols(NoHooksLU):
                        selected=self.op.output_fields)
 
   def ExpandNames(self):
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
 
     if self.op.nodes:
       self.needed_locks = {
-        locking.LEVEL_NODE: _GetWantedNodes(self, self.op.nodes),
+        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes),
         }
     else:
       self.needed_locks = {
@@ -1288,7 +1288,7 @@ class LUNodeQueryvols(NoHooksLU):
     volumes = self.rpc.call_node_volumes(nodenames)
 
     ilist = self.cfg.GetAllInstancesInfo()
-    vol2inst = _MapInstanceDisksToNodes(ilist.values())
+    vol2inst = MapInstanceDisksToNodes(ilist.values())
 
     output = []
     for node in nodenames:
@@ -1340,11 +1340,11 @@ class LUNodeQueryStorage(NoHooksLU):
                        selected=self.op.output_fields)
 
   def ExpandNames(self):
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
 
     if self.op.nodes:
       self.needed_locks = {
-        locking.LEVEL_NODE: _GetWantedNodes(self, self.op.nodes),
+        locking.LEVEL_NODE: GetWantedNodes(self, self.op.nodes),
         }
     else:
       self.needed_locks = {
@@ -1454,7 +1454,7 @@ class LUNodeRemove(LogicalUnit):
     Any errors are signaled by raising errors.OpPrereqError.
 
     """
-    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
     node = self.cfg.GetNodeInfo(self.op.node_name)
     assert node is not None
 
@@ -1485,11 +1485,11 @@ class LUNodeRemove(LogicalUnit):
       "Not owning BGL"
 
     # Promote nodes to master candidate as needed
-    _AdjustCandidatePool(self, exceptions=[node.name])
+    AdjustCandidatePool(self, exceptions=[node.name])
     self.context.RemoveNode(node.name)
 
     # Run post hooks on the node before it's removed
-    _RunPostHook(self, node.name)
+    RunPostHook(self, node.name)
 
     result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
     msg = result.fail_msg
@@ -1504,7 +1504,7 @@ class LUNodeRemove(LogicalUnit):
                                               constants.ETC_HOSTS_REMOVE,
                                               node.name, None)
       result.Raise("Can't update hosts file with new host data")
-      _RedistributeAncillaryFiles(self)
+      RedistributeAncillaryFiles(self)
 
 
 class LURepairNodeStorage(NoHooksLU):
@@ -1514,7 +1514,7 @@ class LURepairNodeStorage(NoHooksLU):
   REQ_BGL = False
 
   def CheckArguments(self):
-    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
+    self.op.node_name = ExpandNodeName(self.cfg, self.op.node_name)
 
     storage_type = self.op.storage_type
 
@@ -1532,8 +1532,8 @@ class LURepairNodeStorage(NoHooksLU):
   def _CheckFaultyDisks(self, instance, node_name):
     """Ensure faulty disks abort the opcode or at least warn."""
     try:
-      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
-                                  node_name, True):
+      if FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
+                                 node_name, True):
         raise errors.OpPrereqError("Instance '%s' has faulty disks on"
                                    " node '%s'" % (instance.name, node_name),
                                    errors.ECODE_STATE)
index b6a4c13..ea4bca5 100644 (file)
@@ -25,10 +25,10 @@ from ganeti import compat
 from ganeti import locking
 from ganeti import qlang
 from ganeti import query
-from ganeti.cmdlib.base import NoHooksLU, _QueryBase
+from ganeti.cmdlib.base import QueryBase, NoHooksLU
 
 
-class _OsQuery(_QueryBase):
+class OsQuery(QueryBase):
   FIELDS = query.OS_FIELDS
 
   def ExpandNames(self, lu):
@@ -179,7 +179,7 @@ class LUOsDiagnose(NoHooksLU):
       return status_filter
 
   def CheckArguments(self):
-    self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
+    self.oq = OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
                        self.op.output_fields, False)
 
   def ExpandNames(self):
index 57c7f29..6cfd207 100644 (file)
 from ganeti import constants
 from ganeti import errors
 from ganeti import query
-from ganeti.cmdlib.backup import _ExportQuery
+from ganeti.cmdlib.backup import ExportQuery
 from ganeti.cmdlib.base import NoHooksLU
-from ganeti.cmdlib.cluster import _ClusterQuery
-from ganeti.cmdlib.group import _GroupQuery
-from ganeti.cmdlib.instance_query import _InstanceQuery
-from ganeti.cmdlib.misc import _ExtStorageQuery
-from ganeti.cmdlib.network import _NetworkQuery
-from ganeti.cmdlib.node import _NodeQuery
-from ganeti.cmdlib.operating_system import _OsQuery
+from ganeti.cmdlib.cluster import ClusterQuery
+from ganeti.cmdlib.group import GroupQuery
+from ganeti.cmdlib.instance_query import InstanceQuery
+from ganeti.cmdlib.misc import ExtStorageQuery
+from ganeti.cmdlib.network import NetworkQuery
+from ganeti.cmdlib.node import NodeQuery
+from ganeti.cmdlib.operating_system import OsQuery
 
 
 #: Query type implementations
 _QUERY_IMPL = {
-  constants.QR_CLUSTER: _ClusterQuery,
-  constants.QR_INSTANCE: _InstanceQuery,
-  constants.QR_NODE: _NodeQuery,
-  constants.QR_GROUP: _GroupQuery,
-  constants.QR_NETWORK: _NetworkQuery,
-  constants.QR_OS: _OsQuery,
-  constants.QR_EXTSTORAGE: _ExtStorageQuery,
-  constants.QR_EXPORT: _ExportQuery,
+  constants.QR_CLUSTER: ClusterQuery,
+  constants.QR_INSTANCE: InstanceQuery,
+  constants.QR_NODE: NodeQuery,
+  constants.QR_GROUP: GroupQuery,
+  constants.QR_NETWORK: NetworkQuery,
+  constants.QR_OS: OsQuery,
+  constants.QR_EXTSTORAGE: ExtStorageQuery,
+  constants.QR_EXPORT: ExportQuery,
   }
 
 assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
index 0ec27a6..726e3a1 100644 (file)
@@ -29,8 +29,7 @@ from ganeti import locking
 from ganeti import objects
 from ganeti import utils
 from ganeti.cmdlib.base import NoHooksLU
-from ganeti.cmdlib.common import _ExpandNodeName, _ExpandInstanceName, \
-  _ShareAll
+from ganeti.cmdlib.common import ExpandNodeName, ExpandInstanceName, ShareAll
 
 
 class TagsLU(NoHooksLU): # pylint: disable=W0223
@@ -44,11 +43,11 @@ class TagsLU(NoHooksLU): # pylint: disable=W0223
     self.needed_locks = {}
 
     if self.op.kind == constants.TAG_NODE:
-      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
+      self.op.name = ExpandNodeName(self.cfg, self.op.name)
       lock_level = locking.LEVEL_NODE
       lock_name = self.op.name
     elif self.op.kind == constants.TAG_INSTANCE:
-      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
+      self.op.name = ExpandInstanceName(self.cfg, self.op.name)
       lock_level = locking.LEVEL_INSTANCE
       lock_name = self.op.name
     elif self.op.kind == constants.TAG_NODEGROUP:
@@ -98,7 +97,7 @@ class LUTagsGet(TagsLU):
     TagsLU.ExpandNames(self)
 
     # Share locks as this is only a read operation
-    self.share_locks = _ShareAll()
+    self.share_locks = ShareAll()
 
   def Exec(self, feedback_fn):
     """Returns the tag list.
index 02a6749..52787c3 100644 (file)
@@ -33,8 +33,8 @@ from ganeti import locking
 from ganeti import utils
 from ganeti.masterd import iallocator
 from ganeti.cmdlib.base import NoHooksLU
-from ganeti.cmdlib.common import _ExpandInstanceName, _GetWantedNodes, \
-  _GetWantedInstances
+from ganeti.cmdlib.common import ExpandInstanceName, GetWantedNodes, \
+  GetWantedInstances
 
 
 class LUTestDelay(NoHooksLU):
@@ -57,7 +57,7 @@ class LUTestDelay(NoHooksLU):
       # _GetWantedNodes can be used here, but is not always appropriate to use
       # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
       # more information.
-      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
+      self.op.on_nodes = GetWantedNodes(self, self.op.on_nodes)
       self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
 
   def _TestDelay(self):
@@ -261,7 +261,7 @@ class LUTestAllocator(NoHooksLU):
       if self.op.hypervisor is None:
         self.op.hypervisor = self.cfg.GetHypervisorType()
     elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
-      fname = _ExpandInstanceName(self.cfg, self.op.name)
+      fname = ExpandInstanceName(self.cfg, self.op.name)
       self.op.name = fname
       self.relocate_from = \
           list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
@@ -269,7 +269,7 @@ class LUTestAllocator(NoHooksLU):
                           constants.IALLOCATOR_MODE_NODE_EVAC):
       if not self.op.instances:
         raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
-      self.op.instances = _GetWantedInstances(self, self.op.instances)
+      self.op.instances = GetWantedInstances(self, self.op.instances)
     else:
       raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
                                  self.op.mode, errors.ECODE_INVAL)
index bb73ed5..21b2c26 100755 (executable)
@@ -112,7 +112,7 @@ class TestIAllocatorChecks(testutils.GanetiTestCase):
     op = OpTest()
     lu = TestLU(op)
 
-    c_i = lambda: common._CheckIAllocatorOrNode(lu, "iallocator", "node")
+    c_i = lambda: common.CheckIAllocatorOrNode(lu, "iallocator", "node")
 
     # Neither node nor iallocator given
     for n in (None, []):
@@ -434,8 +434,8 @@ class TestLoadNodeEvacResult(unittest.TestCase):
           assert iallocator._NEVAC_RESULT(alloc_result)
 
           lu = _FakeLU()
-          result = common._LoadNodeEvacResult(lu, alloc_result,
-                                              early_release, use_nodes)
+          result = common.LoadNodeEvacResult(lu, alloc_result,
+                                             early_release, use_nodes)
 
           if moved:
             (_, (info_args, )) = lu.info_log.pop(0)
@@ -463,7 +463,7 @@ class TestLoadNodeEvacResult(unittest.TestCase):
     assert iallocator._NEVAC_RESULT(alloc_result)
 
     lu = _FakeLU()
-    self.assertRaises(errors.OpExecError, common._LoadNodeEvacResult,
+    self.assertRaises(errors.OpExecError, common.LoadNodeEvacResult,
                       lu, alloc_result, False, False)
     self.assertFalse(lu.info_log)
     (_, (args, )) = lu.warning_log.pop(0)
@@ -549,7 +549,7 @@ class TestUpdateAndVerifySubDict(unittest.TestCase):
 
 class TestHvStateHelper(unittest.TestCase):
   def testWithoutOpData(self):
-    self.assertEqual(common._MergeAndVerifyHvState(None, NotImplemented),
+    self.assertEqual(common.MergeAndVerifyHvState(None, NotImplemented),
                      None)
 
   def testWithoutOldData(self):
@@ -558,7 +558,7 @@ class TestHvStateHelper(unittest.TestCase):
         constants.HVST_MEMORY_TOTAL: 4096,
         },
       }
-    self.assertEqual(common._MergeAndVerifyHvState(new, None), new)
+    self.assertEqual(common.MergeAndVerifyHvState(new, None), new)
 
   def testWithWrongHv(self):
     new = {
@@ -566,12 +566,12 @@ class TestHvStateHelper(unittest.TestCase):
         constants.HVST_MEMORY_TOTAL: 4096,
         },
       }
-    self.assertRaises(errors.OpPrereqError, common._MergeAndVerifyHvState,
+    self.assertRaises(errors.OpPrereqError, common.MergeAndVerifyHvState,
                       new, None)
 
 class TestDiskStateHelper(unittest.TestCase):
   def testWithoutOpData(self):
-    self.assertEqual(common._MergeAndVerifyDiskState(None, NotImplemented),
+    self.assertEqual(common.MergeAndVerifyDiskState(None, NotImplemented),
                      None)
 
   def testWithoutOldData(self):
@@ -582,7 +582,7 @@ class TestDiskStateHelper(unittest.TestCase):
           },
         },
       }
-    self.assertEqual(common._MergeAndVerifyDiskState(new, None), new)
+    self.assertEqual(common.MergeAndVerifyDiskState(new, None), new)
 
   def testWithWrongStorageType(self):
     new = {
@@ -592,7 +592,7 @@ class TestDiskStateHelper(unittest.TestCase):
           },
         },
       }
-    self.assertRaises(errors.OpPrereqError, common._MergeAndVerifyDiskState,
+    self.assertRaises(errors.OpPrereqError, common.MergeAndVerifyDiskState,
                       new, None)
 
 
@@ -687,44 +687,44 @@ class TestComputeIPolicySpecViolation(unittest.TestCase):
 
   def test(self):
     compute_fn = _ValidateComputeMinMaxSpec
-    ret = common._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
-                                              [1024], 1, constants.DT_PLAIN,
-                                              _compute_fn=compute_fn)
+    ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
+                                             [1024], 1, constants.DT_PLAIN,
+                                             _compute_fn=compute_fn)
     self.assertEqual(ret, [])
 
   def testDiskFull(self):
     compute_fn = _NoDiskComputeMinMaxSpec
-    ret = common._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
-                                              [1024], 1, constants.DT_PLAIN,
-                                              _compute_fn=compute_fn)
+    ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
+                                             [1024], 1, constants.DT_PLAIN,
+                                             _compute_fn=compute_fn)
     self.assertEqual(ret, [constants.ISPEC_DISK_COUNT])
 
   def testDiskLess(self):
     compute_fn = _NoDiskComputeMinMaxSpec
-    ret = common._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
-                                              [1024], 1, constants.DT_DISKLESS,
-                                              _compute_fn=compute_fn)
+    ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
+                                             [1024], 1, constants.DT_DISKLESS,
+                                             _compute_fn=compute_fn)
     self.assertEqual(ret, [])
 
   def testWrongTemplates(self):
     compute_fn = _ValidateComputeMinMaxSpec
-    ret = common._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
-                                              [1024], 1, constants.DT_DRBD8,
-                                              _compute_fn=compute_fn)
+    ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
+                                             [1024], 1, constants.DT_DRBD8,
+                                             _compute_fn=compute_fn)
     self.assertEqual(len(ret), 1)
     self.assertTrue("Disk template" in ret[0])
 
   def testInvalidArguments(self):
-    self.assertRaises(AssertionError, common._ComputeIPolicySpecViolation,
+    self.assertRaises(AssertionError, common.ComputeIPolicySpecViolation,
                       self._MICRO_IPOL, 1024, 1, 1, 1, [], 1,
                       constants.DT_PLAIN,)
 
   def testInvalidSpec(self):
     spec = _SpecWrapper([None, False, "foo", None, "bar", None])
     compute_fn = spec.ComputeMinMaxSpec
-    ret = common._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
-                                              [1024], 1, constants.DT_PLAIN,
-                                              _compute_fn=compute_fn)
+    ret = common.ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1,
+                                             [1024], 1, constants.DT_PLAIN,
+                                             _compute_fn=compute_fn)
     self.assertEqual(ret, ["foo", "bar"])
     self.assertFalse(spec.spec)
 
@@ -779,10 +779,10 @@ class TestComputeIPolicySpecViolation(unittest.TestCase):
       constants.IPOLICY_DTS: [disk_template],
       }
     def AssertComputeViolation(ipolicy, violations):
-      ret = common._ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count,
-                                                disk_count, nic_count,
-                                                disk_sizes, spindle_use,
-                                                disk_template)
+      ret = common.ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count,
+                                               disk_count, nic_count,
+                                               disk_sizes, spindle_use,
+                                               disk_template)
       self.assertEqual(len(ret), violations)
 
     AssertComputeViolation(ipolicy1, 0)
@@ -847,13 +847,13 @@ class TestComputeIPolicyInstanceViolation(unittest.TestCase):
                                 disk_template=constants.DT_PLAIN)
     stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 4,
                                             constants.DT_PLAIN)
-    ret = common._ComputeIPolicyInstanceViolation(NotImplemented, instance,
-                                                  cfg, _compute_fn=stub)
+    ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance,
+                                                 cfg, _compute_fn=stub)
     self.assertEqual(ret, [])
     instance2 = objects.Instance(beparams={}, disks=disks, nics=[],
                                  disk_template=constants.DT_PLAIN)
-    ret = common._ComputeIPolicyInstanceViolation(NotImplemented, instance2,
-                                                  cfg, _compute_fn=stub)
+    ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance2,
+                                                 cfg, _compute_fn=stub)
     self.assertEqual(ret, [])
 
 
@@ -926,15 +926,15 @@ class TestCheckTargetNodeIPolicy(unittest.TestCase):
 
   def testNoViolation(self):
     compute_recoder = _CallRecorder(return_value=[])
-    instance._CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance,
-                                     self.target_node, NotImplemented,
-                                     _compute_fn=compute_recoder)
+    instance.CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance,
+                                    self.target_node, NotImplemented,
+                                    _compute_fn=compute_recoder)
     self.assertTrue(compute_recoder.called)
     self.assertEqual(self.lu.warning_log, [])
 
   def testNoIgnore(self):
     compute_recoder = _CallRecorder(return_value=["mem_size not in range"])
-    self.assertRaises(errors.OpPrereqError, instance._CheckTargetNodeIPolicy,
+    self.assertRaises(errors.OpPrereqError, instance.CheckTargetNodeIPolicy,
                       self.lu, NotImplemented, self.instance,
                       self.target_node, NotImplemented,
                       _compute_fn=compute_recoder)
@@ -943,7 +943,7 @@ class TestCheckTargetNodeIPolicy(unittest.TestCase):
 
   def testIgnoreViolation(self):
     compute_recoder = _CallRecorder(return_value=["mem_size not in range"])
-    instance._CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance,
+    instance.CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance,
                                      self.target_node, NotImplemented,
                                      ignore=True, _compute_fn=compute_recoder)
     self.assertTrue(compute_recoder.called)
@@ -956,7 +956,7 @@ class TestApplyContainerMods(unittest.TestCase):
   def testEmptyContainer(self):
     container = []
     chgdesc = []
-    instance.ApplyContainerMods("test", container, chgdesc, [], None, None,
+    instance._ApplyContainerMods("test", container, chgdesc, [], None, None,
                                 None)
     self.assertEqual(container, [])
     self.assertEqual(chgdesc, [])
@@ -964,24 +964,24 @@ class TestApplyContainerMods(unittest.TestCase):
   def testAdd(self):
     container = []
     chgdesc = []
-    mods = instance.PrepareContainerMods([
+    mods = instance._PrepareContainerMods([
       (constants.DDM_ADD, -1, "Hello"),
       (constants.DDM_ADD, -1, "World"),
       (constants.DDM_ADD, 0, "Start"),
       (constants.DDM_ADD, -1, "End"),
       ], None)
-    instance.ApplyContainerMods("test", container, chgdesc, mods,
+    instance._ApplyContainerMods("test", container, chgdesc, mods,
                                 None, None, None)
     self.assertEqual(container, ["Start", "Hello", "World", "End"])
     self.assertEqual(chgdesc, [])
 
-    mods = instance.PrepareContainerMods([
+    mods = instance._PrepareContainerMods([
       (constants.DDM_ADD, 0, "zero"),
       (constants.DDM_ADD, 3, "Added"),
       (constants.DDM_ADD, 5, "four"),
       (constants.DDM_ADD, 7, "xyz"),
       ], None)
-    instance.ApplyContainerMods("test", container, chgdesc, mods,
+    instance._ApplyContainerMods("test", container, chgdesc, mods,
                                 None, None, None)
     self.assertEqual(container,
                      ["zero", "Start", "Hello", "Added", "World", "four",
@@ -989,43 +989,43 @@ class TestApplyContainerMods(unittest.TestCase):
     self.assertEqual(chgdesc, [])
 
     for idx in [-2, len(container) + 1]:
-      mods = instance.PrepareContainerMods([
+      mods = instance._PrepareContainerMods([
         (constants.DDM_ADD, idx, "error"),
         ], None)
-      self.assertRaises(IndexError, instance.ApplyContainerMods,
+      self.assertRaises(IndexError, instance._ApplyContainerMods,
                         "test", container, None, mods, None, None, None)
 
   def testRemoveError(self):
     for idx in [0, 1, 2, 100, -1, -4]:
-      mods = instance.PrepareContainerMods([
+      mods = instance._PrepareContainerMods([
         (constants.DDM_REMOVE, idx, None),
         ], None)
-      self.assertRaises(IndexError, instance.ApplyContainerMods,
+      self.assertRaises(IndexError, instance._ApplyContainerMods,
                         "test", [], None, mods, None, None, None)
 
-    mods = instance.PrepareContainerMods([
+    mods = instance._PrepareContainerMods([
       (constants.DDM_REMOVE, 0, object()),
       ], None)
-    self.assertRaises(AssertionError, instance.ApplyContainerMods,
+    self.assertRaises(AssertionError, instance._ApplyContainerMods,
                       "test", [""], None, mods, None, None, None)
 
   def testAddError(self):
     for idx in range(-100, -1) + [100]:
-      mods = instance.PrepareContainerMods([
+      mods = instance._PrepareContainerMods([
         (constants.DDM_ADD, idx, None),
         ], None)
-      self.assertRaises(IndexError, instance.ApplyContainerMods,
+      self.assertRaises(IndexError, instance._ApplyContainerMods,
                         "test", [], None, mods, None, None, None)
 
   def testRemove(self):
     container = ["item 1", "item 2"]
-    mods = instance.PrepareContainerMods([
+    mods = instance._PrepareContainerMods([
       (constants.DDM_ADD, -1, "aaa"),
       (constants.DDM_REMOVE, -1, None),
       (constants.DDM_ADD, -1, "bbb"),
       ], None)
     chgdesc = []
-    instance.ApplyContainerMods("test", container, chgdesc, mods,
+    instance._ApplyContainerMods("test", container, chgdesc, mods,
                                 None, None, None)
     self.assertEqual(container, ["item 1", "item 2", "bbb"])
     self.assertEqual(chgdesc, [
@@ -1034,22 +1034,22 @@ class TestApplyContainerMods(unittest.TestCase):
 
   def testModify(self):
     container = ["item 1", "item 2"]
-    mods = instance.PrepareContainerMods([
+    mods = instance._PrepareContainerMods([
       (constants.DDM_MODIFY, -1, "a"),
       (constants.DDM_MODIFY, 0, "b"),
       (constants.DDM_MODIFY, 1, "c"),
       ], None)
     chgdesc = []
-    instance.ApplyContainerMods("test", container, chgdesc, mods,
+    instance._ApplyContainerMods("test", container, chgdesc, mods,
                                 None, None, None)
     self.assertEqual(container, ["item 1", "item 2"])
     self.assertEqual(chgdesc, [])
 
     for idx in [-2, len(container) + 1]:
-      mods = instance.PrepareContainerMods([
+      mods = instance._PrepareContainerMods([
         (constants.DDM_MODIFY, idx, "error"),
         ], None)
-      self.assertRaises(IndexError, instance.ApplyContainerMods,
+      self.assertRaises(IndexError, instance._ApplyContainerMods,
                         "test", container, None, mods, None, None, None)
 
   class _PrivateData:
@@ -1077,7 +1077,7 @@ class TestApplyContainerMods(unittest.TestCase):
   def testAddWithCreateFunction(self):
     container = []
     chgdesc = []
-    mods = instance.PrepareContainerMods([
+    mods = instance._PrepareContainerMods([
       (constants.DDM_ADD, -1, "Hello"),
       (constants.DDM_ADD, -1, "World"),
       (constants.DDM_ADD, 0, "Start"),
@@ -1087,7 +1087,7 @@ class TestApplyContainerMods(unittest.TestCase):
       (constants.DDM_REMOVE, 2, None),
       (constants.DDM_ADD, 1, "More"),
       ], self._PrivateData)
-    instance.ApplyContainerMods("test", container, chgdesc, mods,
+    instance._ApplyContainerMods("test", container, chgdesc, mods,
                                 self._CreateTestFn, self._ModifyTestFn,
                                 self._RemoveTestFn)
     self.assertEqual(container, [
@@ -1167,7 +1167,7 @@ class TestGenerateDiskTemplate(unittest.TestCase):
     return copy.deepcopy(constants.DISK_DT_DEFAULTS)
 
   def testWrongDiskTemplate(self):
-    gdt = instance._GenerateDiskTemplate
+    gdt = instance.GenerateDiskTemplate
     disk_template = "##unknown##"
 
     assert disk_template not in constants.DISK_TEMPLATES
@@ -1178,7 +1178,7 @@ class TestGenerateDiskTemplate(unittest.TestCase):
                       self.GetDiskParams())
 
   def testDiskless(self):
-    gdt = instance._GenerateDiskTemplate
+    gdt = instance.GenerateDiskTemplate
 
     result = gdt(self.lu, constants.DT_DISKLESS, "inst27734.example.com",
                  "node30113.example.com", [], [],
@@ -1191,7 +1191,7 @@ class TestGenerateDiskTemplate(unittest.TestCase):
                        file_driver=NotImplemented,
                        req_file_storage=NotImplemented,
                        req_shr_file_storage=NotImplemented):
-    gdt = instance._GenerateDiskTemplate
+    gdt = instance.GenerateDiskTemplate
 
     map(lambda params: utils.ForceDictType(params,
                                            constants.IDISK_PARAMS_TYPES),
@@ -1320,7 +1320,7 @@ class TestGenerateDiskTemplate(unittest.TestCase):
       ])
 
   def testDrbd8(self):
-    gdt = instance._GenerateDiskTemplate
+    gdt = instance.GenerateDiskTemplate
     drbd8_defaults = constants.DISK_LD_DEFAULTS[constants.LD_DRBD8]
     drbd8_default_metavg = drbd8_defaults[constants.LDP_DEFAULT_METAVG]
 
@@ -1491,7 +1491,7 @@ class TestWipeDisks(unittest.TestCase):
                             disk_template=constants.DT_PLAIN,
                             disks=disks)
 
-    self.assertRaises(errors.OpExecError, instance._WipeDisks, lu, inst)
+    self.assertRaises(errors.OpExecError, instance.WipeDisks, lu, inst)
 
   def _FailingWipeCb(self, (disk, _), offset, size):
     # This should only ever be called for the first disk
@@ -1519,7 +1519,7 @@ class TestWipeDisks(unittest.TestCase):
                             disks=disks)
 
     try:
-      instance._WipeDisks(lu, inst)
+      instance.WipeDisks(lu, inst)
     except errors.OpExecError, err:
       self.assertTrue(str(err), "Could not wipe disk 0 at offset 0 ")
     else:
@@ -1562,7 +1562,7 @@ class TestWipeDisks(unittest.TestCase):
 
     (lu, inst, pauset, progresst) = self._PrepareWipeTest(0, disks)
 
-    instance._WipeDisks(lu, inst)
+    instance.WipeDisks(lu, inst)
 
     self.assertEqual(pauset.history, [
       ("disk0", 1024, True),
@@ -1592,8 +1592,8 @@ class TestWipeDisks(unittest.TestCase):
         self._PrepareWipeTest(start_offset, disks)
 
       # Test start offset with only one disk
-      instance._WipeDisks(lu, inst,
-                          disks=[(1, disks[1], start_offset)])
+      instance.WipeDisks(lu, inst,
+                         disks=[(1, disks[1], start_offset)])
 
       # Only the second disk may have been paused and wiped
       self.assertEqual(pauset.history, [
@@ -1639,12 +1639,12 @@ class TestDiskSizeInBytesToMebibytes(unittest.TestCase):
 
 class TestCopyLockList(unittest.TestCase):
   def test(self):
-    self.assertEqual(instance._CopyLockList([]), [])
-    self.assertEqual(instance._CopyLockList(None), None)
-    self.assertEqual(instance._CopyLockList(locking.ALL_SET), locking.ALL_SET)
+    self.assertEqual(instance.CopyLockList([]), [])
+    self.assertEqual(instance.CopyLockList(None), None)
+    self.assertEqual(instance.CopyLockList(locking.ALL_SET), locking.ALL_SET)
 
     names = ["foo", "bar"]
-    output = instance._CopyLockList(names)
+    output = instance.CopyLockList(names)
     self.assertEqual(names, output)
     self.assertNotEqual(id(names), id(output), msg="List was not copied")
 
@@ -1897,8 +1897,8 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
       }
     if not isgroup:
       diff_policy[constants.ISPECS_STD] = diff_std
-    new_policy = common._GetUpdatedIPolicy(old_policy, diff_policy,
-                                           group_policy=isgroup)
+    new_policy = common.GetUpdatedIPolicy(old_policy, diff_policy,
+                                          group_policy=isgroup)
 
     self.assertTrue(constants.ISPECS_MINMAX in new_policy)
     self.assertEqual(new_policy[constants.ISPECS_MINMAX], diff_minmax)
@@ -1919,7 +1919,7 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
           self.assertEqual(new_std[key], old_std[key])
 
   def _TestSet(self, old_policy, diff_policy, isgroup):
-    new_policy = common._GetUpdatedIPolicy(old_policy, diff_policy,
+    new_policy = common.GetUpdatedIPolicy(old_policy, diff_policy,
                                            group_policy=isgroup)
     for key in diff_policy:
       self.assertTrue(key in new_policy)
@@ -1946,8 +1946,8 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
     diff_policy = {
       constants.IPOLICY_SPINDLE_RATIO: constants.VALUE_DEFAULT,
       }
-    new_policy = common._GetUpdatedIPolicy(old_policy, diff_policy,
-                                           group_policy=True)
+    new_policy = common.GetUpdatedIPolicy(old_policy, diff_policy,
+                                          group_policy=True)
     for key in diff_policy:
       self.assertFalse(key in new_policy)
     for key in old_policy:
@@ -1955,7 +1955,7 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
         self.assertTrue(key in new_policy)
         self.assertEqual(new_policy[key], old_policy[key])
 
-    self.assertRaises(errors.OpPrereqError, common._GetUpdatedIPolicy,
+    self.assertRaises(errors.OpPrereqError, common.GetUpdatedIPolicy,
                       old_policy, diff_policy, group_policy=False)
 
   def testUnsetEmpty(self):
@@ -1964,8 +1964,8 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
       diff_policy = {
         key: constants.VALUE_DEFAULT,
         }
-    new_policy = common._GetUpdatedIPolicy(old_policy, diff_policy,
-                                           group_policy=True)
+    new_policy = common.GetUpdatedIPolicy(old_policy, diff_policy,
+                                          group_policy=True)
     self.assertEqual(new_policy, old_policy)
 
   def _TestInvalidKeys(self, old_policy, isgroup):
@@ -1974,18 +1974,18 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
       INVALID_KEY: 3,
       }
     invalid_policy = INVALID_DICT
-    self.assertRaises(errors.OpPrereqError, common._GetUpdatedIPolicy,
+    self.assertRaises(errors.OpPrereqError, common.GetUpdatedIPolicy,
                       old_policy, invalid_policy, group_policy=isgroup)
     invalid_ispecs = {
       constants.ISPECS_MINMAX: [INVALID_DICT],
       }
-    self.assertRaises(errors.TypeEnforcementError, common._GetUpdatedIPolicy,
+    self.assertRaises(errors.TypeEnforcementError, common.GetUpdatedIPolicy,
                       old_policy, invalid_ispecs, group_policy=isgroup)
     if isgroup:
       invalid_for_group = {
         constants.ISPECS_STD: constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
         }
-      self.assertRaises(errors.OpPrereqError, common._GetUpdatedIPolicy,
+      self.assertRaises(errors.OpPrereqError, common.GetUpdatedIPolicy,
                         old_policy, invalid_for_group, group_policy=isgroup)
     good_ispecs = self._OLD_CLUSTER_POLICY[constants.ISPECS_MINMAX]
     invalid_ispecs = copy.deepcopy(good_ispecs)
@@ -1997,19 +1997,19 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
         ispec = minmax[key]
         ispec[INVALID_KEY] = None
         self.assertRaises(errors.TypeEnforcementError,
-                          common._GetUpdatedIPolicy, old_policy,
+                          common.GetUpdatedIPolicy, old_policy,
                           invalid_policy, group_policy=isgroup)
         del ispec[INVALID_KEY]
         for par in constants.ISPECS_PARAMETERS:
           oldv = ispec[par]
           ispec[par] = "this_is_not_good"
           self.assertRaises(errors.TypeEnforcementError,
-                            common._GetUpdatedIPolicy,
+                            common.GetUpdatedIPolicy,
                             old_policy, invalid_policy, group_policy=isgroup)
           ispec[par] = oldv
     # This is to make sure that no two errors were present during the tests
-    common._GetUpdatedIPolicy(old_policy, invalid_policy,
-                              group_policy=isgroup)
+    common.GetUpdatedIPolicy(old_policy, invalid_policy,
+                             group_policy=isgroup)
 
   def testInvalidKeys(self):
     self._TestInvalidKeys(self._OLD_GROUP_POLICY, True)
@@ -2021,7 +2021,7 @@ class TestGetUpdatedIPolicy(unittest.TestCase):
       bad_policy = {
         par: "invalid_value",
         }
-      self.assertRaises(errors.OpPrereqError, common._GetUpdatedIPolicy, {},
+      self.assertRaises(errors.OpPrereqError, common.GetUpdatedIPolicy, {},
                         bad_policy, group_policy=True)
 
 if __name__ == "__main__":