Merge branch 'next'
authorIustin Pop <iustin@google.com>
Thu, 28 May 2009 10:44:56 +0000 (12:44 +0200)
committerIustin Pop <iustin@google.com>
Thu, 28 May 2009 10:45:24 +0000 (12:45 +0200)
* next: (34 commits)
  watcher: automatically restart noded/rapi
  watcher: handle full and drained queue cases
  rapi: rework error handling
  Fix backend.OSEnvironment be/hv parameters
  rapi: make tags query not use jobs
  Change failover instance when instance is stopped
  Export more instance information in hooks
  watcher: write the instance status to a file
  Fix the SafeEncoding behaviour
  Move more hypervisor strings into constants
  Add -H/-B startup parameters to gnt-instance
  call_instance_start: add optional hv/be parameters
  Fix gnt-job list argument handling
  Instance reinstall: don't mix up errors
  Don't check memory at startup if instance is up
  gnt-cluster modify: fix --no-lvm-storage
  LUSetClusterParams: improve volume group removal
  gnt-cluster info: show more cluster parameters
  LUQueryClusterInfo: return a few more fields
  Add the new DRBD test files to the Makefile
  ...

Signed-off-by: Iustin Pop <iustin@google.com>
Reviewed-by: Guido Trotter <ultrotter@google.com>

32 files changed:
Makefile.am
daemons/ganeti-masterd
daemons/ganeti-noded
daemons/ganeti-watcher
doc/examples/ganeti.initd.in
lib/backend.py
lib/bdev.py
lib/bootstrap.py
lib/cli.py
lib/cmdlib.py
lib/constants.py
lib/hypervisor/hv_base.py
lib/hypervisor/hv_fake.py
lib/hypervisor/hv_kvm.py
lib/hypervisor/hv_xen.py
lib/opcodes.py
lib/rapi/baserlib.py
lib/rapi/rlib2.py
lib/rpc.py
lib/ssconf.py
lib/utils.py
man/gnt-debug.sgml
man/gnt-instance.sgml
scripts/gnt-cluster
scripts/gnt-debug
scripts/gnt-instance
scripts/gnt-job
test/data/bdev-8.3-both.txt [new file with mode: 0644]
test/data/proc_drbd83.txt [new file with mode: 0644]
test/ganeti.bdev_unittest.py
test/ganeti.constants_unittest.py
test/ganeti.utils_unittest.py

index 42b41b4..bacdc3d 100644 (file)
@@ -199,9 +199,11 @@ maninput = $(patsubst %.7,%.7.in,$(patsubst %.8,%.8.in,$(man_MANS))) $(patsubst
 
 TEST_FILES = \
        test/data/bdev-both.txt \
+       test/data/bdev-8.3-both.txt \
        test/data/bdev-disk.txt \
        test/data/bdev-net.txt \
-       test/data/proc_drbd8.txt
+       test/data/proc_drbd8.txt \
+       test/data/proc_drbd83.txt
 
 dist_TESTS = \
        test/ganeti.bdev_unittest.py \
@@ -222,7 +224,7 @@ nodist_TESTS =
 
 TESTS = $(dist_TESTS) $(nodist_TESTS)
 
-TESTS_ENVIRONMENT = PYTHONPATH=.:$(top_builddir)
+TESTS_ENVIRONMENT = PYTHONPATH=.:$(top_builddir) $(PYTHON)
 
 RAPI_RESOURCES = $(wildcard lib/rapi/*.py)
 
index 4c6a968..6a73e4e 100755 (executable)
@@ -36,7 +36,6 @@ import collections
 import Queue
 import random
 import signal
-import simplejson
 import logging
 
 from cStringIO import StringIO
@@ -55,6 +54,7 @@ from ganeti import ssconf
 from ganeti import workerpool
 from ganeti import rpc
 from ganeti import bootstrap
+from ganeti import serializer
 
 
 CLIENT_REQUEST_WORKERS = 16
@@ -152,7 +152,7 @@ class ClientRqHandler(SocketServer.BaseRequestHandler):
         logging.debug("client closed connection")
         break
 
-      request = simplejson.loads(msg)
+      request = serializer.LoadJson(msg)
       logging.debug("request: %s", request)
       if not isinstance(request, dict):
         logging.error("wrong request received: %s", msg)
@@ -181,7 +181,7 @@ class ClientRqHandler(SocketServer.BaseRequestHandler):
         luxi.KEY_RESULT: result,
         }
       logging.debug("response: %s", response)
-      self.send_message(simplejson.dumps(response))
+      self.send_message(serializer.DumpJson(response))
 
   def read_message(self):
     while not self._msgs:
index 3a2b4a0..9600e30 100755 (executable)
@@ -721,7 +721,7 @@ def ParseOptions():
 
   """
   parser = OptionParser(description="Ganeti node daemon",
-                        usage="%prog [-f] [-d]",
+                        usage="%prog [-f] [-d] [-b ADDRESS]",
                         version="%%prog (ganeti) %s" %
                         constants.RELEASE_VERSION)
 
@@ -731,6 +731,10 @@ def ParseOptions():
   parser.add_option("-d", "--debug", dest="debug",
                     help="Enable some debug messages",
                     default=False, action="store_true")
+  parser.add_option("-b", "--bind", dest="bind_address",
+                    help="Bind address",
+                    default="", metavar="ADDRESS")
+
   options, args = parser.parse_args()
   return options, args
 
@@ -781,7 +785,7 @@ def main():
     queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
 
     mainloop = daemon.Mainloop()
-    server = NodeHttpServer(mainloop, "", port,
+    server = NodeHttpServer(mainloop, options.bind_address, port,
                             ssl_params=ssl_params, ssl_verify_peer=True)
     server.Start()
     try:
index 42a2eaf..2749de6 100755 (executable)
@@ -80,6 +80,20 @@ def StartMaster():
   return not result.failed
 
 
+def EnsureDaemon(daemon):
+  """Check for and start daemon if not alive.
+
+  """
+  pidfile = utils.DaemonPidFileName(daemon)
+  pid = utils.ReadPidFile(pidfile)
+  if pid == 0 or not utils.IsProcessAlive(pid): # no file or dead pid
+    logging.debug("Daemon '%s' not alive, trying to restart", daemon)
+    result = utils.RunCmd([daemon])
+    if not result:
+      logging.error("Can't start daemon '%s', failure %s, output: %s",
+                    daemon, result.fail_reason, result.output)
+
+
 class WatcherState(object):
   """Interface to a state file recording restart attempts.
 
@@ -255,10 +269,17 @@ def GetClusterData():
 
   all_results = cli.PollJob(job_id, cl=client, feedback_fn=logging.debug)
 
+  logging.debug("Got data from cluster, writing instance status file")
+
   result = all_results[0]
   smap = {}
 
   instances = {}
+
+  # write the upfile
+  up_data = "".join(["%s %s\n" % (fields[0], fields[1]) for fields in result])
+  utils.WriteFile(file_name=constants.INSTANCE_UPFILE, data=up_data)
+
   for fields in result:
     (name, status, autostart, snodes) = fields
 
@@ -291,6 +312,9 @@ class Watcher(object):
     master = client.QueryConfigValues(["master_node"])[0]
     if master != utils.HostInfo().name:
       raise NotMasterError("This is not the master node")
+    # first archive old jobs
+    self.ArchiveJobs(opts.job_age)
+    # and only then submit new ones
     self.instances, self.bootids, self.smap = GetClusterData()
     self.started_instances = set()
     self.opts = opts
@@ -300,12 +324,12 @@ class Watcher(object):
 
     """
     notepad = self.notepad
-    self.ArchiveJobs(self.opts.job_age)
     self.CheckInstances(notepad)
     self.CheckDisks(notepad)
     self.VerifyDisks()
 
-  def ArchiveJobs(self, age):
+  @staticmethod
+  def ArchiveJobs(age):
     """Archive old jobs.
 
     """
@@ -452,8 +476,12 @@ def main():
   utils.SetupLogging(constants.LOG_WATCHER, debug=options.debug,
                      stderr_logging=options.debug)
 
-  update_file = True
+  update_file = False
   try:
+    # on master or not, try to start the node dameon (use _PID but is
+    # the same as daemon name)
+    EnsureDaemon(constants.NODED_PID)
+
     notepad = WatcherState()
     try:
       try:
@@ -461,24 +489,30 @@ def main():
       except errors.OpPrereqError:
         # this is, from cli.GetClient, a not-master case
         logging.debug("Not on master, exiting")
+        update_file = True
         sys.exit(constants.EXIT_SUCCESS)
       except luxi.NoMasterError, err:
         logging.warning("Master seems to be down (%s), trying to restart",
                         str(err))
         if not StartMaster():
           logging.critical("Can't start the master, exiting")
-          update_file = False
           sys.exit(constants.EXIT_FAILURE)
         # else retry the connection
         client = cli.GetClient()
 
+      # we are on master now (use _PID but is the same as daemon name)
+      EnsureDaemon(constants.RAPI_PID)
+
       try:
         watcher = Watcher(options, notepad)
       except errors.ConfigurationError:
         # Just exit if there's no configuration
+        update_file = True
         sys.exit(constants.EXIT_SUCCESS)
 
       watcher.Run()
+      update_file = True
+
     finally:
       if update_file:
         notepad.Save()
@@ -492,6 +526,10 @@ def main():
   except errors.ResolverError, err:
     logging.error("Cannot resolve hostname '%s', exiting.", err.args[0])
     sys.exit(constants.EXIT_NODESETUP_ERROR)
+  except errors.JobQueueFull:
+    logging.error("Job queue is full, can't query cluster state")
+  except errors.JobQueueDrainError:
+    logging.error("Job queue is drained, can't maintain cluster state")
   except Exception, err:
     logging.error(str(err), exc_info=True)
     sys.exit(constants.EXIT_FAILURE)
index 10bc684..b346530 100644 (file)
@@ -16,17 +16,22 @@ DESC="Ganeti cluster"
 
 GANETIRUNDIR="@LOCALSTATEDIR@/run/ganeti"
 
+GANETI_DEFAULTS_FILE="@SYSCONFDIR@/default/ganeti"
+
 NODED_NAME="ganeti-noded"
 NODED="@PREFIX@/sbin/${NODED_NAME}"
 NODED_PID="${GANETIRUNDIR}/${NODED_NAME}.pid"
+NODED_ARGS=""
 
 MASTERD_NAME="ganeti-masterd"
 MASTERD="@PREFIX@/sbin/${MASTERD_NAME}"
 MASTERD_PID="${GANETIRUNDIR}/${MASTERD_NAME}.pid"
+MASTERD_ARGS=""
 
 RAPI_NAME="ganeti-rapi"
 RAPI="@PREFIX@/sbin/${RAPI_NAME}"
 RAPI_PID="${GANETIRUNDIR}/${RAPI_NAME}.pid"
+RAPI_ARGS=""
 
 SCRIPTNAME="@SYSCONFDIR@/init.d/ganeti"
 
@@ -34,6 +39,10 @@ test -f $NODED || exit 0
 
 . /lib/lsb/init-functions
 
+if [ -s $GANETI_DEFAULTS_FILE ]; then
+    . $GANETI_DEFAULTS_FILE
+fi
+
 check_config() {
     for fname in \
         "@LOCALSTATEDIR@/lib/ganeti/server.pem"
@@ -84,16 +93,16 @@ case "$1" in
     start)
         log_daemon_msg "Starting $DESC" "$NAME"
         check_config
-        start_action $NODED $NODED_PID
-        start_action $MASTERD $MASTERD_PID
-        start_action $RAPI $RAPI_PID
-     ;;
+        start_action $NODED $NODED_PID $NODED_ARGS
+        start_action $MASTERD $MASTERD_PID $MASTERD_ARGS
+        start_action $RAPI $RAPI_PID $RAPI_ARGS
+        ;;
     stop)
         log_daemon_msg "Stopping $DESC" "$NAME"
         stop_action $RAPI $RAPI_PID
         stop_action $MASTERD $MASTERD_PID
         stop_action $NODED $NODED_PID
-    ;;
+        ;;
     restart|force-reload)
         log_daemon_msg "Reloading $DESC"
         stop_action $RAPI $RAPI_PID
@@ -103,11 +112,11 @@ case "$1" in
         start_action $NODED $NODED_PID
         start_action $MASTERD $MASTERD_PID
         start_action $RAPI $RAPI_PID
-     ;;
+        ;;
     *)
         log_success_msg "Usage: $SCRIPTNAME {start|stop|force-reload|restart}"
         exit 1
-    ;;
+        ;;
 esac
 
 exit 0
index 18d439e..54dc023 100644 (file)
@@ -1690,6 +1690,10 @@ def OSEnvironment(instance, debug=0):
       result['NIC_%d_FRONTEND_TYPE' % idx] = \
         instance.hvparams[constants.HV_NIC_TYPE]
 
+  for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
+    for key, value in source.items():
+      result["INSTANCE_%s_%s" % (kind, key)] = str(value)
+
   return result
 
 def BlockdevGrow(disk, amount):
index 545cc0b..9d3f08b 100644 (file)
@@ -563,7 +563,7 @@ class DRBD8Status(object):
 
   """
   UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
-  LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+st:([^/]+)/(\S+)"
+  LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
                        "\s+ds:([^/]+)/(\S+)\s+.*$")
   SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
                        "\sfinish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
@@ -896,15 +896,20 @@ class DRBD8(BaseDRBD):
     # value types
     value = pyp.Word(pyp.alphanums + '_-/.:')
     quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
-    addr_port = (pyp.Word(pyp.nums + '.') + pyp.Literal(':').suppress() +
-                 number)
+    addr_type = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
+                 pyp.Optional(pyp.Literal("ipv6")).suppress())
+    addr_port = (addr_type + pyp.Word(pyp.nums + '.') +
+                 pyp.Literal(':').suppress() + number)
     # meta device, extended syntax
     meta_value = ((value ^ quoted) + pyp.Literal('[').suppress() +
                   number + pyp.Word(']').suppress())
+    # device name, extended syntax
+    device_value = pyp.Literal("minor").suppress() + number
 
     # a statement
     stmt = (~rbrace + keyword + ~lbrace +
-            pyp.Optional(addr_port ^ value ^ quoted ^ meta_value) +
+            pyp.Optional(addr_port ^ value ^ quoted ^ meta_value ^
+                         device_value) +
             pyp.Optional(defa) + semi +
             pyp.Optional(pyp.restOfLine).suppress())
 
index edee90d..0308484 100644 (file)
@@ -25,7 +25,6 @@
 
 import os
 import os.path
-import sha
 import re
 import logging
 import tempfile
index 6db7cea..17dc47c 100644 (file)
@@ -815,6 +815,8 @@ def GenerateTable(headers, fields, separator, data,
     format = separator.replace("%", "%%").join(format_fields)
 
   for row in data:
+    if row is None:
+      continue
     for idx, val in enumerate(row):
       if unitfields.Matches(fields[idx]):
         try:
@@ -840,6 +842,8 @@ def GenerateTable(headers, fields, separator, data,
 
   for line in data:
     args = []
+    if line is None:
+      line = ['-' for _ in fields]
     for idx in xrange(len(fields)):
       if separator is None:
         args.append(mlens[idx])
index 26ea707..1ab20ac 100644 (file)
@@ -25,7 +25,6 @@
 
 import os
 import os.path
-import sha
 import time
 import tempfile
 import re
@@ -454,7 +453,8 @@ def _CheckNodeNotDrained(lu, node):
 
 
 def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
-                          memory, vcpus, nics, disk_template, disks):
+                          memory, vcpus, nics, disk_template, disks,
+                          bep, hvp, hypervisor):
   """Builds instance related env variables for hooks
 
   This builds the hook environment from individual variables.
@@ -480,6 +480,12 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
   @param disk_template: the distk template of the instance
   @type disks: list
   @param disks: the list of (size, mode) pairs
+  @type bep: dict
+  @param bep: the backend parameters for the instance
+  @type hvp: dict
+  @param hvp: the hypervisor parameters for the instance
+  @type hypervisor: string
+  @param hypervisor: the hypervisor for the instance
   @rtype: dict
   @return: the hook environment for this instance
 
@@ -498,6 +504,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     "INSTANCE_MEMORY": memory,
     "INSTANCE_VCPUS": vcpus,
     "INSTANCE_DISK_TEMPLATE": disk_template,
+    "INSTANCE_HYPERVISOR": hypervisor,
   }
 
   if nics:
@@ -523,6 +530,10 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
 
   env["INSTANCE_DISK_COUNT"] = disk_count
 
+  for source, kind in [(bep, "BE"), (hvp, "HV")]:
+    for key, value in source.items():
+      env["INSTANCE_%s_%s" % (kind, key)] = value
+
   return env
 
 
@@ -541,7 +552,9 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
   @return: the hook environment dictionary
 
   """
-  bep = lu.cfg.GetClusterInfo().FillBE(instance)
+  cluster = lu.cfg.GetClusterInfo()
+  bep = cluster.FillBE(instance)
+  hvp = cluster.FillHV(instance)
   args = {
     'name': instance.name,
     'primary_node': instance.primary_node,
@@ -553,6 +566,9 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
     'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
     'disk_template': instance.disk_template,
     'disks': [(disk.size, disk.mode) for disk in instance.disks],
+    'bep': bep,
+    'hvp': hvp,
+    'hypervisor': instance.hypervisor,
   }
   if override:
     args.update(override)
@@ -1524,8 +1540,11 @@ class LUSetClusterParams(LogicalUnit):
 
     """
     if self.op.vg_name is not None:
-      if self.op.vg_name != self.cfg.GetVGName():
-        self.cfg.SetVGName(self.op.vg_name)
+      new_volume = self.op.vg_name
+      if not new_volume:
+        new_volume = None
+      if new_volume != self.cfg.GetVGName():
+        self.cfg.SetVGName(new_volume)
       else:
         feedback_fn("Cluster LVM configuration already in desired"
                     " state, not changing")
@@ -2441,6 +2460,10 @@ class LUQueryClusterInfo(NoHooksLU):
                         for hypervisor in cluster.enabled_hypervisors]),
       "beparams": cluster.beparams,
       "candidate_pool_size": cluster.candidate_pool_size,
+      "default_bridge": cluster.default_bridge,
+      "master_netdev": cluster.master_netdev,
+      "volume_group_name": cluster.volume_group_name,
+      "file_storage_dir": cluster.file_storage_dir,
       }
 
     return result
@@ -2755,15 +2778,48 @@ class LUStartupInstance(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
+    # extra beparams
+    self.beparams = getattr(self.op, "beparams", {})
+    if self.beparams:
+      if not isinstance(self.beparams, dict):
+        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
+                                   " dict" % (type(self.beparams), ))
+      # fill the beparams dict
+      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
+      self.op.beparams = self.beparams
+
+    # extra hvparams
+    self.hvparams = getattr(self.op, "hvparams", {})
+    if self.hvparams:
+      if not isinstance(self.hvparams, dict):
+        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
+                                   " dict" % (type(self.hvparams), ))
+
+      # check hypervisor parameter syntax (locally)
+      cluster = self.cfg.GetClusterInfo()
+      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
+      filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
+                                    instance.hvparams)
+      filled_hvp.update(self.hvparams)
+      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
+      hv_type.CheckParameterSyntax(filled_hvp)
+      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+      self.op.hvparams = self.hvparams
+
     _CheckNodeOnline(self, instance.primary_node)
 
     bep = self.cfg.GetClusterInfo().FillBE(instance)
     # check bridges existance
     _CheckInstanceBridgesExist(self, instance)
 
-    _CheckNodeFreeMemory(self, instance.primary_node,
-                         "starting instance %s" % instance.name,
-                         bep[constants.BE_MEMORY], instance.hypervisor)
+    remote_info = self.rpc.call_instance_info(instance.primary_node,
+                                              instance.name,
+                                              instance.hypervisor)
+    remote_info.Raise()
+    if not remote_info.data:
+      _CheckNodeFreeMemory(self, instance.primary_node,
+                           "starting instance %s" % instance.name,
+                           bep[constants.BE_MEMORY], instance.hypervisor)
 
   def Exec(self, feedback_fn):
     """Start the instance.
@@ -2778,7 +2834,8 @@ class LUStartupInstance(LogicalUnit):
 
     _StartInstanceDisks(self, instance, force)
 
-    result = self.rpc.call_instance_start(node_current, instance)
+    result = self.rpc.call_instance_start(node_current, instance,
+                                          self.hvparams, self.beparams)
     msg = result.RemoteFailMsg()
     if msg:
       _ShutdownInstanceDisks(self, instance)
@@ -2860,7 +2917,7 @@ class LURebootInstance(LogicalUnit):
                                  " full reboot: %s" % msg)
       _ShutdownInstanceDisks(self, instance)
       _StartInstanceDisks(self, instance, ignore_secondaries)
-      result = self.rpc.call_instance_start(node_current, instance)
+      result = self.rpc.call_instance_start(node_current, instance, None, None)
       msg = result.RemoteFailMsg()
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -2960,7 +3017,8 @@ class LUReinstallInstance(LogicalUnit):
     remote_info = self.rpc.call_instance_info(instance.primary_node,
                                               instance.name,
                                               instance.hypervisor)
-    if remote_info.failed or remote_info.data:
+    remote_info.Raise()
+    if remote_info.data:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
                                   instance.primary_node))
@@ -3478,10 +3536,15 @@ class LUFailoverInstance(LogicalUnit):
     target_node = secondary_nodes[0]
     _CheckNodeOnline(self, target_node)
     _CheckNodeNotDrained(self, target_node)
-    # check memory requirements on the secondary node
-    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
-                         instance.name, bep[constants.BE_MEMORY],
-                         instance.hypervisor)
+
+    if instance.admin_up:
+      # check memory requirements on the secondary node
+      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
+                           instance.name, bep[constants.BE_MEMORY],
+                           instance.hypervisor)
+    else:
+      self.LogInfo("Not checking memory on the secondary node as"
+                   " instance will not be started")
 
     # check bridge existance
     brlist = [nic.bridge for nic in instance.nics]
@@ -3550,7 +3613,7 @@ class LUFailoverInstance(LogicalUnit):
         raise errors.OpExecError("Can't activate the instance's disks")
 
       feedback_fn("* starting the instance on the target node")
-      result = self.rpc.call_instance_start(target_node, instance)
+      result = self.rpc.call_instance_start(target_node, instance, None, None)
       msg = result.RemoteFailMsg()
       if msg:
         _ShutdownInstanceDisks(self, instance)
@@ -4300,6 +4363,7 @@ class LUCreateInstance(LogicalUnit):
                                   self.op.hvparams)
     hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
     hv_type.CheckParameterSyntax(filled_hvp)
+    self.hv_full = filled_hvp
 
     # fill and remember the beparams dict
     utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
@@ -4477,6 +4541,9 @@ class LUCreateInstance(LogicalUnit):
       nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
       disk_template=self.op.disk_template,
       disks=[(d["size"], d["mode"]) for d in self.disks],
+      bep=self.be_full,
+      hvp=self.hv_full,
+      hypervisor=self.op.hypervisor,
     ))
 
     nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
@@ -4794,7 +4861,7 @@ class LUCreateInstance(LogicalUnit):
       self.cfg.Update(iobj)
       logging.info("Starting instance %s on node %s", instance, pnode_name)
       feedback_fn("* starting instance...")
-      result = self.rpc.call_instance_start(pnode_name, iobj)
+      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
       msg = result.RemoteFailMsg()
       if msg:
         raise errors.OpExecError("Could not start instance: %s" % msg)
@@ -6242,7 +6309,7 @@ class LUExportInstance(LogicalUnit):
 
     finally:
       if self.op.shutdown and instance.admin_up:
-        result = self.rpc.call_instance_start(src_node, instance)
+        result = self.rpc.call_instance_start(src_node, instance, None, None)
         msg = result.RemoteFailMsg()
         if msg:
           _ShutdownInstanceDisks(self, instance)
index 058f2dd..d9edc13 100644 (file)
@@ -96,6 +96,7 @@ CLUSTER_CONF_FILE = DATA_DIR + "/config.data"
 SSL_CERT_FILE = DATA_DIR + "/server.pem"
 RAPI_CERT_FILE = DATA_DIR + "/rapi.pem"
 WATCHER_STATEFILE = DATA_DIR + "/watcher.data"
+INSTANCE_UPFILE = RUN_GANETI_DIR + "/instance-status"
 SSH_KNOWN_HOSTS_FILE = DATA_DIR + "/known_hosts"
 RAPI_USERS_FILE = DATA_DIR + "/rapi_users"
 QUEUE_DIR = DATA_DIR + "/queue"
@@ -356,7 +357,7 @@ VNC_BASE_PORT = 5900
 VNC_PASSWORD_FILE = _autoconf.SYSCONFDIR + "/ganeti/vnc-cluster-password"
 VNC_DEFAULT_BIND_ADDRESS = '0.0.0.0'
 
-# Device types
+# NIC types
 HT_NIC_RTL8139 = "rtl8139"
 HT_NIC_NE2K_PCI = "ne2k_pci"
 HT_NIC_NE2K_ISA = "ne2k_isa"
@@ -366,25 +367,40 @@ HT_NIC_I8259ER = "i82559er"
 HT_NIC_PCNET = "pcnet"
 HT_NIC_E1000 = "e1000"
 HT_NIC_PARAVIRTUAL = HT_DISK_PARAVIRTUAL = "paravirtual"
-HT_DISK_IOEMU = "ioemu"
-HT_DISK_IDE = "ide"
-HT_DISK_SCSI = "scsi"
-HT_DISK_SD = "sd"
-HT_DISK_MTD = "mtd"
-HT_DISK_PFLASH = "pflash"
 
 HT_HVM_VALID_NIC_TYPES = frozenset([HT_NIC_RTL8139, HT_NIC_NE2K_PCI,
                                     HT_NIC_NE2K_ISA, HT_NIC_PARAVIRTUAL])
-HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU])
 HT_KVM_VALID_NIC_TYPES = frozenset([HT_NIC_RTL8139, HT_NIC_NE2K_PCI,
                                     HT_NIC_NE2K_ISA, HT_NIC_I82551,
                                     HT_NIC_I85557B, HT_NIC_I8259ER,
                                     HT_NIC_PCNET, HT_NIC_E1000,
                                     HT_NIC_PARAVIRTUAL])
+# Disk types
+HT_DISK_IOEMU = "ioemu"
+HT_DISK_IDE = "ide"
+HT_DISK_SCSI = "scsi"
+HT_DISK_SD = "sd"
+HT_DISK_MTD = "mtd"
+HT_DISK_PFLASH = "pflash"
+
+HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU])
 HT_KVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IDE,
                                      HT_DISK_SCSI, HT_DISK_SD, HT_DISK_MTD,
                                      HT_DISK_PFLASH])
 
+# Mouse types:
+HT_MOUSE_MOUSE = "mouse"
+HT_MOUSE_TABLET = "tablet"
+
+HT_KVM_VALID_MOUSE_TYPES = frozenset([HT_MOUSE_MOUSE, HT_MOUSE_TABLET])
+
+# Boot order
+HT_BO_CDROM = "cdrom"
+HT_BO_DISK = "disk"
+HT_BO_NETWORK = "network"
+
+HT_KVM_VALID_BO_TYPES = frozenset([HT_BO_CDROM, HT_BO_DISK, HT_BO_NETWORK])
+
 # Cluster Verify steps
 VERIFY_NPLUSONE_MEM = 'nplusone_mem'
 VERIFY_OPTIONAL_CHECKS = frozenset([VERIFY_NPLUSONE_MEM])
@@ -501,7 +517,7 @@ HVC_DEFAULTS = {
     HV_VNC_X509: '',
     HV_VNC_X509_VERIFY: False,
     HV_CDROM_IMAGE_PATH: '',
-    HV_BOOT_ORDER: "disk",
+    HV_BOOT_ORDER: HT_BO_DISK,
     HV_NIC_TYPE: HT_NIC_PARAVIRTUAL,
     HV_DISK_TYPE: HT_DISK_PARAVIRTUAL,
     HV_USB_MOUSE: '',
index b094b37..e5de1b7 100644 (file)
@@ -23,6 +23,9 @@
 
 """
 
+import re
+
+
 from ganeti import errors
 
 
@@ -187,3 +190,64 @@ class BaseHypervisor(object):
 
     """
     pass
+
+  def GetLinuxNodeInfo(self):
+    """For linux systems, return actual OS information.
+
+    This is an abstraction for all non-hypervisor-based classes, where
+    the node actually sees all the memory and CPUs via the /proc
+    interface and standard commands. The other case if for example
+    xen, where you only see the hardware resources via xen-specific
+    tools.
+
+    @return: a dict with the following keys (values in MiB):
+          - memory_total: the total memory size on the node
+          - memory_free: the available memory on the node for instances
+          - memory_dom0: the memory used by the node itself, if available
+
+    """
+    try:
+      fh = file("/proc/meminfo")
+      try:
+        data = fh.readlines()
+      finally:
+        fh.close()
+    except EnvironmentError, err:
+      raise errors.HypervisorError("Failed to list node info: %s" % (err,))
+
+    result = {}
+    sum_free = 0
+    try:
+      for line in data:
+        splitfields = line.split(":", 1)
+
+        if len(splitfields) > 1:
+          key = splitfields[0].strip()
+          val = splitfields[1].strip()
+          if key == 'MemTotal':
+            result['memory_total'] = int(val.split()[0])/1024
+          elif key in ('MemFree', 'Buffers', 'Cached'):
+            sum_free += int(val.split()[0])/1024
+          elif key == 'Active':
+            result['memory_dom0'] = int(val.split()[0])/1024
+    except (ValueError, TypeError), err:
+      raise errors.HypervisorError("Failed to compute memory usage: %s" %
+                                   (err,))
+    result['memory_free'] = sum_free
+
+    cpu_total = 0
+    try:
+      fh = open("/proc/cpuinfo")
+      try:
+        cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$",
+                                   fh.read()))
+      finally:
+        fh.close()
+    except EnvironmentError, err:
+      raise errors.HypervisorError("Failed to list node info: %s" % (err,))
+    result['cpu_total'] = cpu_total
+    # FIXME: export correct data here
+    result['cpu_nodes'] = 1
+    result['cpu_sockets'] = 1
+
+    return result
index 48e645b..ccac842 100644 (file)
@@ -155,61 +155,19 @@ class FakeHypervisor(hv_base.BaseHypervisor):
   def GetNodeInfo(self):
     """Return information about the node.
 
+    This is just a wrapper over the base GetLinuxNodeInfo method.
+
     @return: a dict with the following keys (values in MiB):
           - memory_total: the total memory size on the node
           - memory_free: the available memory on the node for instances
           - memory_dom0: the memory used by the node itself, if available
 
     """
-    # global ram usage from the xm info command
-    # memory                 : 3583
-    # free_memory            : 747
-    # note: in xen 3, memory has changed to total_memory
-    try:
-      fh = file("/proc/meminfo")
-      try:
-        data = fh.readlines()
-      finally:
-        fh.close()
-    except IOError, err:
-      raise errors.HypervisorError("Failed to list node info: %s" % err)
-
-    result = {}
-    sum_free = 0
-    for line in data:
-      splitfields = line.split(":", 1)
-
-      if len(splitfields) > 1:
-        key = splitfields[0].strip()
-        val = splitfields[1].strip()
-        if key == 'MemTotal':
-          result['memory_total'] = int(val.split()[0])/1024
-        elif key in ('MemFree', 'Buffers', 'Cached'):
-          sum_free += int(val.split()[0])/1024
-        elif key == 'Active':
-          result['memory_dom0'] = int(val.split()[0])/1024
-    result['memory_free'] = sum_free
-
+    result = self.GetLinuxNodeInfo()
     # substract running instances
     all_instances = self.GetAllInstancesInfo()
     result['memory_free'] -= min(result['memory_free'],
                                  sum([row[2] for row in all_instances]))
-
-    cpu_total = 0
-    try:
-      fh = open("/proc/cpuinfo")
-      try:
-        cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$",
-                                   fh.read()))
-      finally:
-        fh.close()
-    except EnvironmentError, err:
-      raise errors.HypervisorError("Failed to list node info: %s" % err)
-    result['cpu_total'] = cpu_total
-    # FIXME: export correct data here
-    result['cpu_nodes'] = 1
-    result['cpu_sockets'] = 1
-
     return result
 
   @classmethod
index 358de74..3dec178 100644 (file)
@@ -236,9 +236,9 @@ class KVMHypervisor(hv_base.BaseHypervisor):
       kvm_cmd.extend(['-no-acpi'])
 
     hvp = instance.hvparams
-    boot_disk = hvp[constants.HV_BOOT_ORDER] == "disk"
-    boot_cdrom = hvp[constants.HV_BOOT_ORDER] == "cdrom"
-    boot_network = hvp[constants.HV_BOOT_ORDER] == "network"
+    boot_disk = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_DISK
+    boot_cdrom = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_CDROM
+    boot_network = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_NETWORK
 
     if boot_network:
       kvm_cmd.extend(['-boot', 'n'])
@@ -618,57 +618,15 @@ class KVMHypervisor(hv_base.BaseHypervisor):
   def GetNodeInfo(self):
     """Return information about the node.
 
+    This is just a wrapper over the base GetLinuxNodeInfo method.
+
     @return: a dict with the following keys (values in MiB):
           - memory_total: the total memory size on the node
           - memory_free: the available memory on the node for instances
           - memory_dom0: the memory used by the node itself, if available
 
     """
-    # global ram usage from the xm info command
-    # memory                 : 3583
-    # free_memory            : 747
-    # note: in xen 3, memory has changed to total_memory
-    try:
-      fh = file("/proc/meminfo")
-      try:
-        data = fh.readlines()
-      finally:
-        fh.close()
-    except EnvironmentError, err:
-      raise errors.HypervisorError("Failed to list node info: %s" % err)
-
-    result = {}
-    sum_free = 0
-    for line in data:
-      splitfields = line.split(":", 1)
-
-      if len(splitfields) > 1:
-        key = splitfields[0].strip()
-        val = splitfields[1].strip()
-        if key == 'MemTotal':
-          result['memory_total'] = int(val.split()[0])/1024
-        elif key in ('MemFree', 'Buffers', 'Cached'):
-          sum_free += int(val.split()[0])/1024
-        elif key == 'Active':
-          result['memory_dom0'] = int(val.split()[0])/1024
-    result['memory_free'] = sum_free
-
-    cpu_total = 0
-    try:
-      fh = open("/proc/cpuinfo")
-      try:
-        cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$",
-                                   fh.read()))
-      finally:
-        fh.close()
-    except EnvironmentError, err:
-      raise errors.HypervisorError("Failed to list node info: %s" % err)
-    result['cpu_total'] = cpu_total
-    # FIXME: export correct data here
-    result['cpu_nodes'] = 1
-    result['cpu_sockets'] = 1
-
-    return result
+    return self.GetLinuxNodeInfo()
 
   @classmethod
   def GetShellCommandForConsole(cls, instance, hvparams, beparams):
@@ -765,35 +723,39 @@ class KVMHypervisor(hv_base.BaseHypervisor):
                                    " an absolute path, if defined")
 
     boot_order = hvparams[constants.HV_BOOT_ORDER]
-    if boot_order not in ('cdrom', 'disk', 'network'):
-      raise errors.HypervisorError("The boot order must be 'cdrom', 'disk' or"
-                                   " 'network'")
+    if boot_order not in constants.HT_KVM_VALID_BO_TYPES:
+      raise errors.HypervisorError(\
+        "The boot order must be one of %s" %
+        utils.CommaJoin(constants.HT_KVM_VALID_BO_TYPES))
 
-    if boot_order == 'cdrom' and not iso_path:
-      raise errors.HypervisorError("Cannot boot from cdrom without an ISO path")
+    if boot_order == constants.HT_BO_CDROM and not iso_path:
+      raise errors.HypervisorError("Cannot boot from cdrom without an"
+                                   " ISO path")
 
     nic_type = hvparams[constants.HV_NIC_TYPE]
     if nic_type not in constants.HT_KVM_VALID_NIC_TYPES:
-      raise errors.HypervisorError("Invalid NIC type %s specified for the KVM"
-                                   " hypervisor. Please choose one of: %s" %
-                                   (nic_type,
-                                    constants.HT_KVM_VALID_NIC_TYPES))
-    elif boot_order == 'network' and nic_type == constants.HT_NIC_PARAVIRTUAL:
+      raise errors.HypervisorError(\
+        "Invalid NIC type %s specified for the KVM"
+        " hypervisor. Please choose one of: %s" %
+        (nic_type, utils.CommaJoin(constants.HT_KVM_VALID_NIC_TYPES)))
+    elif (boot_order == constants.HT_BO_NETWORK and
+          nic_type == constants.HT_NIC_PARAVIRTUAL):
       raise errors.HypervisorError("Cannot boot from a paravirtual NIC. Please"
-                                   " change the nic type.")
+                                   " change the NIC type.")
 
     disk_type = hvparams[constants.HV_DISK_TYPE]
     if disk_type not in constants.HT_KVM_VALID_DISK_TYPES:
-      raise errors.HypervisorError("Invalid disk type %s specified for the KVM"
-                                   " hypervisor. Please choose one of: %s" %
-                                   (disk_type,
-                                    constants.HT_KVM_VALID_DISK_TYPES))
+      raise errors.HypervisorError(\
+        "Invalid disk type %s specified for the KVM"
+        " hypervisor. Please choose one of: %s" %
+        (disk_type, utils.CommaJoin(constants.HT_KVM_VALID_DISK_TYPES)))
 
     mouse_type = hvparams[constants.HV_USB_MOUSE]
-    if mouse_type and mouse_type not in ('mouse', 'tablet'):
-      raise errors.HypervisorError("Invalid usb mouse type %s specified for"
-                                   " the KVM hyervisor. Please choose"
-                                   " 'mouse' or 'tablet'" % mouse_type)
+    if mouse_type and mouse_type not in constants.HT_KVM_VALID_MOUSE_TYPES:
+      raise errors.HypervisorError(\
+        "Invalid usb mouse type %s specified for the KVM hypervisor. Please"
+        " choose one of %s" %
+        utils.CommaJoin(constants.HT_KVM_VALID_MOUSE_TYPES))
 
   def ValidateParameters(self, hvparams):
     """Check the given parameters for validity.
index 959958d..4579f4d 100644 (file)
@@ -535,16 +535,16 @@ class XenHvmHypervisor(XenHypervisor):
     # device type checks
     nic_type = hvparams[constants.HV_NIC_TYPE]
     if nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
-      raise errors.HypervisorError("Invalid NIC type %s specified for the Xen"
-                                   " HVM hypervisor. Please choose one of: %s"
-                                   % (nic_type,
-                                      constants.HT_HVM_VALID_NIC_TYPES))
+      raise errors.HypervisorError(\
+        "Invalid NIC type %s specified for the Xen"
+        " HVM hypervisor. Please choose one of: %s"
+        % (nic_type, utils.CommaJoin(constants.HT_HVM_VALID_NIC_TYPES)))
     disk_type = hvparams[constants.HV_DISK_TYPE]
     if disk_type not in constants.HT_HVM_VALID_DISK_TYPES:
-      raise errors.HypervisorError("Invalid disk type %s specified for the Xen"
-                                   " HVM hypervisor. Please choose one of: %s"
-                                   % (disk_type,
-                                      constants.HT_HVM_VALID_DISK_TYPES))
+      raise errors.HypervisorError(\
+        "Invalid disk type %s specified for the Xen"
+        " HVM hypervisor. Please choose one of: %s"
+        % (disk_type, utils.CommaJoin(constants.HT_HVM_VALID_DISK_TYPES)))
     # vnc_bind_address verification
     vnc_bind_address = hvparams[constants.HV_VNC_BIND_ADDRESS]
     if vnc_bind_address:
index 5be660b..535db91 100644 (file)
@@ -382,7 +382,7 @@ class OpStartupInstance(OpCode):
   """Startup an instance."""
   OP_ID = "OP_INSTANCE_STARTUP"
   OP_DSC_FIELD = "instance_name"
-  __slots__ = ["instance_name", "force"]
+  __slots__ = ["instance_name", "force", "hvparams", "beparams"]
 
 
 class OpShutdownInstance(OpCode):
index 0138e64..2bca63b 100644 (file)
 
 """
 
+import logging
+
 import ganeti.cli
-import ganeti.opcodes
 
 from ganeti import luxi
 from ganeti import rapi
 from ganeti import http
+from ganeti import ssconf
+from ganeti import constants
+from ganeti import opcodes
+from ganeti import errors
 
 
 def BuildUriList(ids, uri_format, uri_fields=("name", "uri")):
@@ -81,8 +86,22 @@ def _Tags_GET(kind, name=""):
   """Helper function to retrieve tags.
 
   """
-  op = ganeti.opcodes.OpGetTags(kind=kind, name=name)
-  tags = ganeti.cli.SubmitOpCode(op)
+  if kind == constants.TAG_INSTANCE or kind == constants.TAG_NODE:
+    if not name:
+      raise http.HttpBadRequest("Missing name on tag request")
+    cl = GetClient()
+    if kind == constants.TAG_INSTANCE:
+      fn = cl.QueryInstances
+    else:
+      fn = cl.QueryNodes
+    result = fn(names=[name], fields=["tags"], use_locking=False)
+    if not result or not result[0]:
+      raise http.HttpBadGateway("Invalid response from tag query")
+    tags = result[0][0]
+  elif kind == constants.TAG_CLUSTER:
+    ssc = ssconf.SimpleStore()
+    tags = ssc.GetClusterTags()
+
   return list(tags)
 
 
@@ -90,18 +109,14 @@ def _Tags_PUT(kind, tags, name=""):
   """Helper function to set tags.
 
   """
-  cl = luxi.Client()
-  return cl.SubmitJob([ganeti.opcodes.OpAddTags(kind=kind, name=name,
-                                                tags=tags)])
+  return SubmitJob([opcodes.OpAddTags(kind=kind, name=name, tags=tags)])
 
 
 def _Tags_DELETE(kind, tags, name=""):
   """Helper function to delete tags.
 
   """
-  cl = luxi.Client()
-  return cl.SubmitJob([ganeti.opcodes.OpDelTags(kind=kind, name=name,
-                                                tags=tags)])
+  return SubmitJob([opcodes.OpDelTags(kind=kind, name=name, tags=tags)])
 
 
 def MapBulkFields(itemslist, fields):
@@ -147,6 +162,51 @@ def MakeParamsDict(opts, params):
   return result
 
 
+def SubmitJob(op, cl=None):
+  """Generic wrapper for submit job, for better http compatibility.
+
+  @type op: list
+  @param op: the list of opcodes for the job
+  @type cl: None or luxi.Client
+  @param cl: optional luxi client to use
+  @rtype: string
+  @return: the job ID
+
+  """
+  try:
+    if cl is None:
+      cl = GetClient()
+    return cl.SubmitJob(op)
+  except errors.JobQueueFull:
+    raise http.HttpServiceUnavailable("Job queue is full, needs archiving")
+  except errors.JobQueueDrainError:
+    raise http.HttpServiceUnavailable("Job queue is drained, cannot submit")
+  except luxi.NoMasterError, err:
+    raise http.HttpBadGateway("Master seems to unreachable: %s" % str(err))
+  except luxi.TimeoutError, err:
+    raise http.HttpGatewayTimeout("Timeout while talking to the master"
+                                  " daemon. Error: %s" % str(err))
+
+def GetClient():
+  """Geric wrapper for luxi.Client(), for better http compatiblity.
+
+  """
+  try:
+    return luxi.Client()
+  except luxi.NoMasterError, err:
+    raise http.HttpBadGateway("Master seems to unreachable: %s" % str(err))
+
+
+def FeedbackFn(ts, log_type, log_msg):
+  """Feedback logging function for http case.
+
+  We don't have a stdout for printing log messages, so log them to the
+  http log at least.
+
+  """
+  logging.info("%s: %s", log_type, log_msg)
+
+
 class R_Generic(object):
   """Generic class for resources.
 
index 79b95da..738f86f 100644 (file)
 
 """
 
-import ganeti.opcodes
+from ganeti import opcodes
 from ganeti import http
-from ganeti import luxi
 from ganeti import constants
+from ganeti import cli
 from ganeti.rapi import baserlib
 
 
+
 I_FIELDS = ["name", "admin_state", "os",
             "pnode", "snodes",
             "disk_template",
@@ -105,7 +106,7 @@ class R_2_info(baserlib.R_Generic):
       }
 
     """
-    client = luxi.Client()
+    client = baserlib.GetClient()
     return client.QueryClusterInfo()
 
 
@@ -123,12 +124,15 @@ class R_2_os(baserlib.R_Generic):
     Example: ["debian-etch"]
 
     """
-    op = ganeti.opcodes.OpDiagnoseOS(output_fields=["name", "valid"],
-                                     names=[])
-    diagnose_data = ganeti.cli.SubmitOpCode(op)
+    cl = baserlib.GetClient()
+    op = opcodes.OpDiagnoseOS(output_fields=["name", "valid"], names=[])
+    job_id = baserlib.SubmitJob([op], cl)
+    # we use custom feedback function, instead of print we log the status
+    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
+    diagnose_data = result[0]
 
     if not isinstance(diagnose_data, list):
-      raise http.HttpInternalServerError(message="Can't get OS list")
+      raise http.HttpBadGateway(message="Can't get OS list")
 
     return [row[0] for row in diagnose_data if row[1]]
 
@@ -146,8 +150,9 @@ class R_2_jobs(baserlib.R_Generic):
 
     """
     fields = ["id"]
+    cl = baserlib.GetClient()
     # Convert the list of lists to the list of ids
-    result = [job_id for [job_id] in luxi.Client().QueryJobs(None, fields)]
+    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
     return baserlib.BuildUriList(result, "/2/jobs/%s",
                                  uri_fields=("id", "uri"))
 
@@ -176,7 +181,7 @@ class R_2_jobs_id(baserlib.R_Generic):
               "received_ts", "start_ts", "end_ts",
               ]
     job_id = self.items[0]
-    result = luxi.Client().QueryJobs([job_id, ], fields)[0]
+    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
     if result is None:
       raise http.HttpNotFound()
     return baserlib.MapFields(fields, result)
@@ -186,7 +191,7 @@ class R_2_jobs_id(baserlib.R_Generic):
 
     """
     job_id = self.items[0]
-    result = luxi.Client().CancelJob(job_id)
+    result = baserlib.GetClient().CancelJob(job_id)
     return result
 
 
@@ -237,7 +242,7 @@ class R_2_nodes(baserlib.R_Generic):
     @return: a dictionary with 'name' and 'uri' keys for each of them
 
     """
-    client = luxi.Client()
+    client = baserlib.GetClient()
 
     if self.useBulk():
       bulkdata = client.QueryNodes([], N_FIELDS, False)
@@ -260,7 +265,7 @@ class R_2_nodes_name(baserlib.R_Generic):
 
     """
     node_name = self.items[0]
-    client = luxi.Client()
+    client = baserlib.GetClient()
     result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
                                use_locking=self.useLocking())
 
@@ -326,7 +331,7 @@ class R_2_instances(baserlib.R_Generic):
     @return: a dictionary with 'name' and 'uri' keys for each of them.
 
     """
-    client = luxi.Client()
+    client = baserlib.GetClient()
 
     use_locking = self.useLocking()
     if self.useBulk():
@@ -368,28 +373,27 @@ class R_2_instances(baserlib.R_Generic):
              "ip": fn("ip", None),
              "bridge": fn("bridge", None)}]
 
-    op = ganeti.opcodes.OpCreateInstance(
-        mode=constants.INSTANCE_CREATE,
-        instance_name=fn('name'),
-        disks=disks,
-        disk_template=fn('disk_template'),
-        os_type=fn('os'),
-        pnode=fn('pnode', None),
-        snode=fn('snode', None),
-        iallocator=fn('iallocator', None),
-        nics=nics,
-        start=fn('start', True),
-        ip_check=fn('ip_check', True),
-        wait_for_sync=True,
-        hypervisor=fn('hypervisor', None),
-        hvparams=hvparams,
-        beparams=beparams,
-        file_storage_dir=fn('file_storage_dir', None),
-        file_driver=fn('file_driver', 'loop'),
-        )
-
-    job_id = ganeti.cli.SendJob([op])
-    return job_id
+    op = opcodes.OpCreateInstance(
+      mode=constants.INSTANCE_CREATE,
+      instance_name=fn('name'),
+      disks=disks,
+      disk_template=fn('disk_template'),
+      os_type=fn('os'),
+      pnode=fn('pnode', None),
+      snode=fn('snode', None),
+      iallocator=fn('iallocator', None),
+      nics=nics,
+      start=fn('start', True),
+      ip_check=fn('ip_check', True),
+      wait_for_sync=True,
+      hypervisor=fn('hypervisor', None),
+      hvparams=hvparams,
+      beparams=beparams,
+      file_storage_dir=fn('file_storage_dir', None),
+      file_driver=fn('file_driver', 'loop'),
+      )
+
+    return baserlib.SubmitJob([op])
 
 
 class R_2_instances_name(baserlib.R_Generic):
@@ -402,7 +406,7 @@ class R_2_instances_name(baserlib.R_Generic):
     """Send information about an instance.
 
     """
-    client = luxi.Client()
+    client = baserlib.GetClient()
     instance_name = self.items[0]
     result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
                                    use_locking=self.useLocking())
@@ -413,10 +417,9 @@ class R_2_instances_name(baserlib.R_Generic):
     """Delete an instance.
 
     """
-    op = ganeti.opcodes.OpRemoveInstance(instance_name=self.items[0],
-                                         ignore_failures=False)
-    job_id = ganeti.cli.SendJob([op])
-    return job_id
+    op = opcodes.OpRemoveInstance(instance_name=self.items[0],
+                                  ignore_failures=False)
+    return baserlib.SubmitJob([op])
 
 
 class R_2_instances_name_reboot(baserlib.R_Generic):
@@ -440,14 +443,11 @@ class R_2_instances_name_reboot(baserlib.R_Generic):
                                      [constants.INSTANCE_REBOOT_HARD])[0]
     ignore_secondaries = bool(self.queryargs.get('ignore_secondaries',
                                                  [False])[0])
-    op = ganeti.opcodes.OpRebootInstance(
-        instance_name=instance_name,
-        reboot_type=reboot_type,
-        ignore_secondaries=ignore_secondaries)
-
-    job_id = ganeti.cli.SendJob([op])
+    op = opcodes.OpRebootInstance(instance_name=instance_name,
+                                  reboot_type=reboot_type,
+                                  ignore_secondaries=ignore_secondaries)
 
-    return job_id
+    return baserlib.SubmitJob([op])
 
 
 class R_2_instances_name_startup(baserlib.R_Generic):
@@ -468,12 +468,10 @@ class R_2_instances_name_startup(baserlib.R_Generic):
     """
     instance_name = self.items[0]
     force_startup = bool(self.queryargs.get('force', [False])[0])
-    op = ganeti.opcodes.OpStartupInstance(instance_name=instance_name,
-                                          force=force_startup)
+    op = opcodes.OpStartupInstance(instance_name=instance_name,
+                                   force=force_startup)
 
-    job_id = ganeti.cli.SendJob([op])
-
-    return job_id
+    return baserlib.SubmitJob([op])
 
 
 class R_2_instances_name_shutdown(baserlib.R_Generic):
@@ -490,11 +488,9 @@ class R_2_instances_name_shutdown(baserlib.R_Generic):
 
     """
     instance_name = self.items[0]
-    op = ganeti.opcodes.OpShutdownInstance(instance_name=instance_name)
-
-    job_id = ganeti.cli.SendJob([op])
+    op = opcodes.OpShutdownInstance(instance_name=instance_name)
 
-    return job_id
+    return baserlib.SubmitJob([op])
 
 
 class _R_Tags(baserlib.R_Generic):
index 70dd312..48a48dc 100644 (file)
@@ -260,7 +260,7 @@ class RpcRunner(object):
     self._cfg = cfg
     self.port = utils.GetNodeDaemonPort()
 
-  def _InstDict(self, instance):
+  def _InstDict(self, instance, hvp=None, bep=None):
     """Convert the given instance to a dict.
 
     This is done via the instance's ToDict() method and additionally
@@ -268,6 +268,10 @@ class RpcRunner(object):
 
     @type instance: L{objects.Instance}
     @param instance: an Instance object
+    @type hvp: dict or None
+    @param hvp: a dictionary with overriden hypervisor parameters
+    @type bep: dict or None
+    @param bep: a dictionary with overriden backend parameters
     @rtype: dict
     @return: the instance dict, with the hvparams filled with the
         cluster defaults
@@ -276,7 +280,11 @@ class RpcRunner(object):
     idict = instance.ToDict()
     cluster = self._cfg.GetClusterInfo()
     idict["hvparams"] = cluster.FillHV(instance)
+    if hvp is not None:
+      idict["hvparams"].update(hvp)
     idict["beparams"] = cluster.FillBE(instance)
+    if bep is not None:
+      idict["beparams"].update(bep)
     return idict
 
   def _ConnectList(self, client, node_list, call):
@@ -425,14 +433,14 @@ class RpcRunner(object):
     """
     return self._SingleNodeCall(node, "bridges_exist", [bridges_list])
 
-  def call_instance_start(self, node, instance):
+  def call_instance_start(self, node, instance, hvp, bep):
     """Starts an instance.
 
     This is a single-node call.
 
     """
-    return self._SingleNodeCall(node, "instance_start",
-                                [self._InstDict(instance)])
+    idict = self._InstDict(instance, hvp=hvp, bep=bep)
+    return self._SingleNodeCall(node, "instance_start", [idict])
 
   def call_instance_shutdown(self, node, instance):
     """Stops an instance.
index cce1141..19a95c9 100644 (file)
@@ -250,6 +250,14 @@ class SimpleStore(object):
     nl = data.splitlines(False)
     return nl
 
+  def GetClusterTags(self):
+    """Return the cluster tags.
+
+    """
+    data = self._ReadFile(constants.SS_CLUSTER_TAGS)
+    nl = data.splitlines(False)
+    return nl
+
 
 def GetMasterAndMyself(ss=None):
   """Get the master node and my own hostname.
index 3048779..ac781fb 100644 (file)
@@ -156,11 +156,18 @@ def RunCmd(cmd, env=None, output=None, cwd='/'):
   if env is not None:
     cmd_env.update(env)
 
-  if output is None:
-    out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
-  else:
-    status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
-    out = err = ""
+  try:
+    if output is None:
+      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
+    else:
+      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
+      out = err = ""
+  except OSError, err:
+    if err.errno == errno.ENOENT:
+      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
+                               (strcmd, err))
+    else:
+      raise
 
   if status >= 0:
     exitcode = status
@@ -1289,6 +1296,7 @@ def WriteFile(file_name, fn=None, data=None,
 
   dir_name, base_name = os.path.split(file_name)
   fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
+  do_remove = True
   # here we need to make sure we remove the temp file, if any error
   # leaves it in place
   try:
@@ -1309,13 +1317,15 @@ def WriteFile(file_name, fn=None, data=None,
       os.utime(new_name, (atime, mtime))
     if not dry_run:
       os.rename(new_name, file_name)
+      do_remove = False
   finally:
     if close:
       os.close(fd)
       result = None
     else:
       result = fd
-    RemoveFile(new_name)
+    if do_remove:
+      RemoveFile(new_name)
 
   return result
 
@@ -1806,11 +1816,13 @@ def SafeEncode(text):
   """Return a 'safe' version of a source string.
 
   This function mangles the input string and returns a version that
-  should be safe to disply/encode as ASCII. To this end, we first
+  should be safe to display/encode as ASCII. To this end, we first
   convert it to ASCII using the 'backslashreplace' encoding which
-  should get rid of any non-ASCII chars, and then we again encode it
-  via 'string_escape' which converts '\n' into '\\n' so that log
-  messages remain one-line.
+  should get rid of any non-ASCII chars, and then we process it
+  through a loop copied from the string repr sources in the python; we
+  don't use string_escape anymore since that escape single quotes and
+  backslashes too, and that is too much; and that escaping is not
+  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
 
   @type text: str or unicode
   @param text: input data
@@ -1818,9 +1830,33 @@ def SafeEncode(text):
   @return: a safe version of text
 
   """
-  text = text.encode('ascii', 'backslashreplace')
-  text = text.encode('string_escape')
-  return text
+  if isinstance(text, unicode):
+    # onli if unicode; if str already, we handle it below
+    text = text.encode('ascii', 'backslashreplace')
+  resu = ""
+  for char in text:
+    c = ord(char)
+    if char  == '\t':
+      resu += r'\t'
+    elif char == '\n':
+      resu += r'\n'
+    elif char == '\r':
+      resu += r'\'r'
+    elif c < 32 or c >= 127: # non-printable
+      resu += "\\x%02x" % (c & 0xff)
+    else:
+      resu += char
+  return resu
+
+
+def CommaJoin(names):
+  """Nicely join a set of identifiers.
+
+  @param names: set, list or tuple
+  @return: a string with the formatted results
+
+  """
+  return ", ".join(["'%s'" % val for val in names])
 
 
 def LockedMethod(fn):
index 2e624b0..89fd9e4 100644 (file)
       <cmdsynopsis>
         <command>submit-job</command>
 
-        <arg choice="req">opcodes_file</arg>
+        <arg choice="req" rep="repeat">opcodes_file</arg>
       </cmdsynopsis>
 
       <para>
-        This command builds a list of opcodes from a JSON-format file
-        and submits them as a single job to the master daemon. It can
+        This command builds a list of opcodes from JSON-format files
+        and submits for each file a job to the master daemon. It can
         be used to test some options that are not available via the
         command line.
       </para>
index b43f0da..8ca11de 100644 (file)
@@ -1317,6 +1317,9 @@ instance5: 11225
             <arg>--all</arg>
           </group>
           <sbr>
+          <arg>-H <option>key=value...</option></arg>
+          <arg>-B <option>key=value...</option></arg>
+          <sbr>
           <arg>--submit</arg>
           <sbr>
           <arg choice="opt"
@@ -1386,6 +1389,23 @@ instance5: 11225
         </para>
 
         <para>
+          The <option>-H</option> and <option>-B</option> options
+          specify extra, temporary hypervisor and backend parameters
+          that can be used to start an instance with modified
+          parameters. They can be useful for quick testing without
+          having to modify an instance back and forth, e.g.:
+          <screen>
+# gnt-instance start -H root_args="single" instance1
+# gnt-instance start -B memory=2048 instance2
+          </screen>
+          The first form will start the instance
+          <userinput>instance1</userinput> in single-user mode, and
+          the instance <userinput>instance2</userinput> with 2GB of
+          RAM (this time only, unless that is the actual instance
+          memory size already).
+        </para>
+
+        <para>
           The <option>--submit</option> option is used to send the job to
           the master daemon but not wait for its completion. The job
           ID will be shown so that it can be examined via
index c8473fa..99cab31 100755 (executable)
@@ -243,6 +243,10 @@ def ShowClusterConfig(opts, args):
 
   ToStdout("Cluster parameters:")
   ToStdout("  - candidate pool size: %s", result["candidate_pool_size"])
+  ToStdout("  - master netdev: %s", result["master_netdev"])
+  ToStdout("  - default bridge: %s", result["default_bridge"])
+  ToStdout("  - lvm volume group: %s", result["volume_group_name"])
+  ToStdout("  - file storage path: %s", result["file_storage_dir"])
 
   ToStdout("Default instance parameters:")
   for gr_name, gr_dict in result["beparams"].items():
@@ -463,6 +467,8 @@ def SetClusterParams(opts, args):
   if not opts.lvm_storage and opts.vg_name:
     ToStdout("Options --no-lvm-storage and --vg-name conflict.")
     return 1
+  elif not opts.lvm_storage:
+    vg_name = ''
 
   hvlist = opts.enabled_hypervisors
   if hvlist is not None:
@@ -476,7 +482,7 @@ def SetClusterParams(opts, args):
   beparams = opts.beparams
   utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
 
-  op = opcodes.OpSetClusterParams(vg_name=opts.vg_name,
+  op = opcodes.OpSetClusterParams(vg_name=vg_name,
                                   enabled_hypervisors=hvlist,
                                   hvparams=hvparams,
                                   beparams=beparams,
index ff7e01b..d3bf054 100755 (executable)
@@ -71,12 +71,19 @@ def GenericOpCodes(opts, args):
 
   """
   cl = cli.GetClient()
-  fname = args[0]
-  op_data = simplejson.loads(open(fname).read())
-  op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data]
-  jid = cli.SendJob(op_list, cl=cl)
-  ToStdout("Job id: %s", jid)
-  cli.PollJob(jid, cl=cl)
+  job_data = []
+  job_ids = []
+  for fname in args:
+    op_data = simplejson.loads(open(fname).read())
+    op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data]
+    job_data.append((fname, op_list))
+  for fname, op_list in job_data:
+    jid = cli.SendJob(op_list, cl=cl)
+    ToStdout("File '%s', job id: %s", fname, jid)
+    job_ids.append(jid)
+  for jid in job_ids:
+    ToStdout("Waiting for job id %s", jid)
+    cli.PollJob(jid, cl=cl)
   return 0
 
 
@@ -139,11 +146,11 @@ commands = {
                          help="Select nodes to sleep on"),
              ],
             "[opts...] <duration>", "Executes a TestDelay OpCode"),
-  'submit-job': (GenericOpCodes, ARGS_ONE,
+  'submit-job': (GenericOpCodes, ARGS_ATLEAST(1),
                  [DEBUG_OPT,
                   ],
-                 "<op_list_file>", "Submits a job built from a json-file"
-                 " with a list of serialized opcodes"),
+                 "<op_list_file...>", "Submits jobs built from json files"
+                 " containing a list of serialized opcodes"),
   'allocator': (TestAllocator, ARGS_ONE,
                 [DEBUG_OPT,
                  make_option("--dir", dest="direction",
index ecb93bb..a54672c 100755 (executable)
@@ -687,6 +687,11 @@ def StartupInstance(opts, args):
   for name in inames:
     op = opcodes.OpStartupInstance(instance_name=name,
                                    force=opts.force)
+    # do not add these parameters to the opcode unless they're defined
+    if opts.hvparams:
+      op.hvparams = opts.hvparams
+    if opts.beparams:
+      op.beparams = opts.beparams
     jex.QueueJob(name, op)
   jex.WaitOrShow(not opts.submit_only)
   return 0
@@ -1449,8 +1454,14 @@ commands = {
                m_node_opt, m_pri_node_opt, m_sec_node_opt,
                m_clust_opt, m_inst_opt,
                SUBMIT_OPT,
+               keyval_option("-H", "--hypervisor", type="keyval",
+                             default={}, dest="hvparams",
+                             help="Temporary hypervisor parameters"),
+               keyval_option("-B", "--backend", type="keyval",
+                             default={}, dest="beparams",
+                             help="Temporary backend parameters"),
                ],
-            "<instance>", "Starts an instance"),
+              "<instance>", "Starts an instance"),
 
   'reboot': (RebootInstance, ARGS_ANY,
               [DEBUG_OPT, m_force_multi,
index ce81709..2da75a3 100755 (executable)
@@ -85,7 +85,11 @@ def ListJobs(opts, args):
     headers = None
 
   # change raw values to nicer strings
-  for row in output:
+  for row_id, row in enumerate(output):
+    if row is None:
+      ToStderr("No such job: %s" % args[row_id])
+      continue
+
     for idx, field in enumerate(selected_fields):
       val = row[idx]
       if field == "status":
diff --git a/test/data/bdev-8.3-both.txt b/test/data/bdev-8.3-both.txt
new file mode 100644 (file)
index 0000000..bc6e741
--- /dev/null
@@ -0,0 +1,36 @@
+disk {
+       size                    0s _is_default; # bytes
+       on-io-error             detach;
+       fencing                 dont-care _is_default;
+       max-bio-bvecs           0 _is_default;
+}
+net {
+       timeout                 60 _is_default; # 1/10 seconds
+       max-epoch-size          2048 _is_default;
+       max-buffers             2048 _is_default;
+       unplug-watermark        128 _is_default;
+       connect-int             10 _is_default; # seconds
+       ping-int                10 _is_default; # seconds
+       sndbuf-size             131070 _is_default; # bytes
+       ko-count                0 _is_default;
+       after-sb-0pri           discard-zero-changes;
+       after-sb-1pri           consensus;
+       after-sb-2pri           disconnect _is_default;
+       rr-conflict             disconnect _is_default;
+       ping-timeout            5 _is_default; # 1/10 seconds
+}
+syncer {
+       rate                    61440k; # bytes/second
+       after                   -1 _is_default;
+       al-extents              257;
+}
+protocol C;
+_this_host {
+       device                  minor 0;
+       disk                    "/dev/xenvg/test.data";
+       meta-disk               "/dev/xenvg/test.meta" [ 0 ];
+       address                 ipv4 192.168.1.1:11000;
+}
+_remote_host {
+       address                 ipv4 192.168.1.2:11000;
+}
diff --git a/test/data/proc_drbd83.txt b/test/data/proc_drbd83.txt
new file mode 100644 (file)
index 0000000..114944c
Binary files /dev/null and b/test/data/proc_drbd83.txt differ
index 2db785c..b2299b1 100755 (executable)
@@ -61,7 +61,7 @@ class TestDRBD8Runner(testutils.GanetiTestCase):
     """Test drbdsetup show parser creation"""
     bdev.DRBD8._GetShowParser()
 
-  def testParserBoth(self):
+  def testParserBoth80(self):
     """Test drbdsetup show parser for disk and network"""
     data = self._ReadTestData("bdev-both.txt")
     result = bdev.DRBD8._GetDevInfo(data)
@@ -70,7 +70,18 @@ class TestDRBD8Runner(testutils.GanetiTestCase):
                     "Wrong local disk info")
     self.failUnless(self._has_net(result, ("192.168.1.1", 11000),
                                   ("192.168.1.2", 11000)),
-                    "Wrong network info")
+                    "Wrong network info (8.0.x)")
+
+  def testParserBoth83(self):
+    """Test drbdsetup show parser for disk and network"""
+    data = self._ReadTestData("bdev-8.3-both.txt")
+    result = bdev.DRBD8._GetDevInfo(data)
+    self.failUnless(self._has_disk(result, "/dev/xenvg/test.data",
+                                   "/dev/xenvg/test.meta"),
+                    "Wrong local disk info")
+    self.failUnless(self._has_net(result, ("192.168.1.1", 11000),
+                                  ("192.168.1.2", 11000)),
+                    "Wrong network info (8.2.x)")
 
   def testParserNet(self):
     """Test drbdsetup show parser for disk and network"""
@@ -103,8 +114,11 @@ class TestDRBD8Status(testutils.GanetiTestCase):
     """Read in txt data"""
     testutils.GanetiTestCase.setUp(self)
     proc_data = self._TestDataFilename("proc_drbd8.txt")
+    proc83_data = self._TestDataFilename("proc_drbd83.txt")
     self.proc_data = bdev.DRBD8._GetProcData(filename=proc_data)
+    self.proc83_data = bdev.DRBD8._GetProcData(filename=proc83_data)
     self.mass_data = bdev.DRBD8._MassageProcData(self.proc_data)
+    self.mass83_data = bdev.DRBD8._MassageProcData(self.proc83_data)
 
   def testIOErrors(self):
     """Test handling of errors while reading the proc file."""
@@ -116,6 +130,7 @@ class TestDRBD8Status(testutils.GanetiTestCase):
   def testMinorNotFound(self):
     """Test not-found-minor in /proc"""
     self.failUnless(9 not in self.mass_data)
+    self.failUnless(9 not in self.mass83_data)
 
   def testLineNotMatch(self):
     """Test wrong line passed to DRBD8Status"""
@@ -123,45 +138,51 @@ class TestDRBD8Status(testutils.GanetiTestCase):
 
   def testMinor0(self):
     """Test connected, primary device"""
-    stats = bdev.DRBD8Status(self.mass_data[0])
-    self.failUnless(stats.is_in_use)
-    self.failUnless(stats.is_connected and stats.is_primary and
-                    stats.peer_secondary and stats.is_disk_uptodate)
+    for data in [self.mass_data, self.mass83_data]:
+      stats = bdev.DRBD8Status(data[0])
+      self.failUnless(stats.is_in_use)
+      self.failUnless(stats.is_connected and stats.is_primary and
+                      stats.peer_secondary and stats.is_disk_uptodate)
 
   def testMinor1(self):
     """Test connected, secondary device"""
-    stats = bdev.DRBD8Status(self.mass_data[1])
-    self.failUnless(stats.is_in_use)
-    self.failUnless(stats.is_connected and stats.is_secondary and
-                    stats.peer_primary and stats.is_disk_uptodate)
+    for data in [self.mass_data, self.mass83_data]:
+      stats = bdev.DRBD8Status(data[1])
+      self.failUnless(stats.is_in_use)
+      self.failUnless(stats.is_connected and stats.is_secondary and
+                      stats.peer_primary and stats.is_disk_uptodate)
 
   def testMinor2(self):
     """Test unconfigured device"""
-    stats = bdev.DRBD8Status(self.mass_data[2])
-    self.failIf(stats.is_in_use)
+    for data in [self.mass_data, self.mass83_data]:
+      stats = bdev.DRBD8Status(data[2])
+      self.failIf(stats.is_in_use)
 
   def testMinor4(self):
     """Test WFconn device"""
-    stats = bdev.DRBD8Status(self.mass_data[4])
-    self.failUnless(stats.is_in_use)
-    self.failUnless(stats.is_wfconn and stats.is_primary and
-                    stats.rrole == 'Unknown' and
-                    stats.is_disk_uptodate)
+    for data in [self.mass_data, self.mass83_data]:
+      stats = bdev.DRBD8Status(data[4])
+      self.failUnless(stats.is_in_use)
+      self.failUnless(stats.is_wfconn and stats.is_primary and
+                      stats.rrole == 'Unknown' and
+                      stats.is_disk_uptodate)
 
   def testMinor6(self):
     """Test diskless device"""
-    stats = bdev.DRBD8Status(self.mass_data[6])
-    self.failUnless(stats.is_in_use)
-    self.failUnless(stats.is_connected and stats.is_secondary and
-                    stats.peer_primary and stats.is_diskless)
+    for data in [self.mass_data, self.mass83_data]:
+      stats = bdev.DRBD8Status(data[6])
+      self.failUnless(stats.is_in_use)
+      self.failUnless(stats.is_connected and stats.is_secondary and
+                      stats.peer_primary and stats.is_diskless)
 
   def testMinor8(self):
     """Test standalone device"""
-    stats = bdev.DRBD8Status(self.mass_data[8])
-    self.failUnless(stats.is_in_use)
-    self.failUnless(stats.is_standalone and
-                    stats.rrole == 'Unknown' and
-                    stats.is_disk_uptodate)
+    for data in [self.mass_data, self.mass83_data]:
+      stats = bdev.DRBD8Status(data[8])
+      self.failUnless(stats.is_in_use)
+      self.failUnless(stats.is_standalone and
+                      stats.rrole == 'Unknown' and
+                      stats.is_disk_uptodate)
 
 if __name__ == '__main__':
   unittest.main()
index 7652f86..e5263d4 100755 (executable)
@@ -23,6 +23,7 @@
 
 
 import unittest
+import re
 
 from ganeti import constants
 
@@ -54,5 +55,17 @@ class TestConstants(unittest.TestCase):
                      constants.CONFIG_REVISION))
 
 
+class TestParameterNames(unittest.TestCase):
+  """HV/BE parameter tests"""
+  VALID_NAME = re.compile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+  def testNoDashes(self):
+    for kind, source in [('hypervisor', constants.HVS_PARAMETER_TYPES),
+                         ('backend', constants.BES_PARAMETER_TYPES)]:
+      for key in source:
+        self.failUnless(self.VALID_NAME.match(key),
+                        "The %s parameter '%s' contains invalid characters" %
+                        (kind, key))
+
 if __name__ == '__main__':
   unittest.main()
index 1c2992c..ef3d626 100755 (executable)
@@ -33,6 +33,7 @@ import socket
 import shutil
 import re
 import select
+import string
 
 import ganeti
 import testutils
@@ -44,7 +45,7 @@ from ganeti.utils import IsProcessAlive, RunCmd, \
      ParseUnit, AddAuthorizedKey, RemoveAuthorizedKey, \
      ShellQuote, ShellQuoteArgs, TcpPing, ListVisibleFiles, \
      SetEtcHostsEntry, RemoveEtcHostsEntry, FirstFree, OwnIpAddress, \
-     TailFile, ForceDictType
+     TailFile, ForceDictType, SafeEncode
 
 from ganeti.errors import LockError, UnitParseError, GenericError, \
      ProgrammerError
@@ -969,5 +970,24 @@ class TestForceDictType(unittest.TestCase):
     self.assertRaises(errors.TypeEnforcementError, self._fdt, {'d': '4 L'})
 
 
+class TestSafeEncode(unittest.TestCase):
+  """Test case for SafeEncode"""
+
+  def testAscii(self):
+    for txt in [string.digits, string.letters, string.punctuation]:
+      self.failUnlessEqual(txt, SafeEncode(txt))
+
+  def testDoubleEncode(self):
+    for i in range(255):
+      txt = SafeEncode(chr(i))
+      self.failUnlessEqual(txt, SafeEncode(txt))
+
+  def testUnicode(self):
+    # 1024 is high enough to catch non-direct ASCII mappings
+    for i in range(1024):
+      txt = SafeEncode(unichr(i))
+      self.failUnlessEqual(txt, SafeEncode(txt))
+
+
 if __name__ == '__main__':
   unittest.main()