$ apt-get install lvm2 ssh bridge-utils iproute iputils-arping \
ndisc6 python python-pyopenssl openssl \
- python-pyparsing python-simplejson \
- python-pyinotify python-pycurl socat fping \
- python-ipaddr python-bitarray
+ python-pyparsing python-simplejson python-bitarray \
+ python-pyinotify python-pycurl python-ipaddr socat fping
+
+If bitarray is missing it can be installed from easy-install::
+
+ $ easy_install bitarray
+
+Or on newer distributions (eg. Debian Wheezy) the above becomes::
+
+ $ apt-get install lvm2 ssh bridge-utils iproute iputils-arping \
+ ndisc6 python python-openssl openssl \
+ python-pyparsing python-simplejson python-bitarray \
+ python-pyinotify python-pycurl python-ipaddr socat fping
Note that this does not install optional packages::
- `snap-server` <http://hackage.haskell.org/package/snap-server>`_, version
0.8.1 and above.
-These libraries are available in Debian Wheezy (but not in Squeeze, with
-the exception of curl), so you can use either apt::
+These libraries are available in Debian Wheezy (but not in Squeeze), so you
+can use either apt::
$ apt-get install libghc-hslogger-dev libghc-crypto-dev libghc-text-dev \
libghc-hinotify-dev libghc-regex-pcre-dev \
or ``cabal``, after installing a required non-Haskell dependency::
$ apt-get install libpcre3-dev
- $ cabal install hslogger Crypto text hinotify regex-pcre \
+ $ cabal install hslogger Crypto text hinotify==0.3.2 regex-pcre \
attoparsec vector snap-server
to install them.
HS_BIN_PROGS=src/htools
# Haskell programs to be installed in the MYEXECLIB dir
+if ENABLE_MOND
HS_MYEXECLIB_PROGS=src/mon-collector
+else
+HS_MYEXECLIB_PROGS=
+endif
# Haskell programs to be compiled by "make really-all"
HS_COMPILE_PROGS= \
'--enabled-disk-templates'.
-Version 2.7.0 beta3
--------------------
+Version 2.7.0 rc1
+-----------------
-*(Released Mon, 22 Apr 2013)*
+*(unreleased)*
Incompatible/important changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``plain`` disk template are supported.
- The KVM hypervisor has been updated with many new hypervisor
parameters, including a generic one for passing arbitrary command line
- values. See a complete list in :manpage:`gnt-instance(8)`.
+ values. See a complete list in :manpage:`gnt-instance(8)`. It is now
+ compatible up to qemu 1.4.
- A new tool, called ``mon-collector``, is the stand-alone executor of
the data collectors for a monitoring system. As of this version, it
just includes the DRBD data collector, that can be executed by calling
- The functionality for allocating multiple instances at once has been
overhauled and is now also available through :doc:`RAPI <rapi>`.
-Since beta2:
+
+Since beta3:
+
+- Fix kvm compatibility with qemu 1.4 (Issue 389)
+- Documentation updates (admin guide, upgrade notes, install
+ instructions) (Issue 372)
+- Fix gnt-group list nodes and instances count (Issue 436)
+- Fix compilation without non-mandatory libraries (Issue 441)
+- Fix xen-hvm hypervisor forcing nics to type 'ioemu' (Issue 247)
+- Make confd logging more verbose at INFO level (Issue 435)
+- Improve "networks" documentation in :manpage:`gnt-instance(8)`
+- Fix failure path for instance storage type conversion (Issue 229)
+- Update htools text backend documentation
+- Improve the renew-crypto section of :manpage:`gnt-cluster(8)`
+- Disable inter-cluster instance move for file-based instances, because
+ it is dependant on instance export, which is not supported for
+ file-based instances. (Issue 414)
+
+
+Version 2.7.0 beta3
+-------------------
+
+*(Released Mon, 22 Apr 2013)*
+
+This was the third beta release of the 2.7 series. Since beta2:
- Fix hail to verify disk instance policies on a per-disk basis (Issue 418).
- Fix data loss on wrong usage of ``gnt-instance move``
$ gnt-cluster redist-conf
+#. If you use file storage, check that the ``/etc/ganeti/file-storage-paths``
+#. is correct on all nodes. For security reasons it's not copied
+#. automatically, but it can be copied manually via::
+
+ $ gnt-cluster copyfile /etc/ganeti/file-storage-paths
+
#. Restart daemons again on all nodes::
$ /etc/init.d/ganeti restart
[MONITORING_PKG="$MONITORING_PKG attoparsec"])
AC_GHC_PKG_CHECK([snap-server], [],
[MONITORING_PKG="$MONITORING_PKG snap-server"])
+ MONITORING_DEP=
+ if test "$has_confd" = False; then
+ MONITORING_DEP="$MONITORING_DEP confd"
+ fi
+ has_monitoring_pkg=False
if test -z "$MONITORING_PKG"; then
- has_monitoring=True
+ has_monitoring_pkg=True
elif test "$enable_monitoring" = check; then
AC_MSG_WARN(m4_normalize([The required extra libraries for the monitoring
daemon were not found ($MONITORING_PKG),
required libraries were not found:
$MONITORING_PKG]))
fi
+ has_monitoring_dep=False
+ if test -z "$MONITORING_DEP"; then
+ has_monitoring_dep=True
+ elif test "$enable_monitoring" = check; then
+ AC_MSG_WARN(m4_normalize([The optional Ganeti components required for the
+ monitoring agent were not enabled
+ ($MONITORING_DEP), monitoring disabled]))
+ else
+ AC_MSG_FAILURE(m4_normalize([The monitoring functionality was requested, but
+ required optional Ganeti components were not
+ found: $MONITORING_DEP]))
+ fi
+
fi
-if test "$has_monitoring" = True; then
- AC_MSG_NOTICE([Enabling the monitoring daemon usage])
+if test "$has_monitoring_pkg" = True -a "$has_monitoring_dep" = True; then
+ has_monitoring=True
+ AC_MSG_NOTICE([Enabling the monitoring agent usage])
fi
AC_SUBST(ENABLE_MOND, $has_monitoring)
AM_CONDITIONAL([ENABLE_MOND], [test "$has_monitoring" = True])
file
The instance will use plain files as backend for its disks. No
redundancy is provided, and this is somewhat more difficult to
- configure for high performance.
+ configure for high performance. Note that for security reasons the
+ file storage directory must be listed under
+ ``/etc/ganeti/file-storage-paths``, and that file is not copied
+ automatically to all nodes by Ganeti.
+
+sharedfile
+ The instance will use plain files as backend, but Ganeti assumes that
+ those files will be available and in sync automatically on all nodes.
+ This allows live migration and failover of instances using this
+ method. As for ``file`` the file storage directory must be listed under
+ ``/etc/ganeti/file-storage-paths`` or ganeti will refuse to create
+ instances under it.
plain
The instance will use LVM devices as backend for its disks. No
The instance will use Volumes inside a RADOS cluster as backend for its
disks. It will access them using the RADOS block device (RBD).
+ext
+ The instance will use an external storage provider. See
+ :manpage:`ganeti-extstorage-interface(7)` for how to implement one.
+
+
IAllocator
~~~~~~~~~~
instance is created. The IP and/or bridge of the NIC can be changed
via ``--net 0:ip=IP,link=BRIDGE``
-See the manpage for gnt-instance for the detailed option list.
+See :manpage:`ganeti-instance(8)` for the detailed option list.
For example if you want to create an highly available instance, with a
single disk of 50GB and the default memory size, having primary node
"reason" attached to it (at opcode level). This can be used for
example to distinguish an admin request, from a scheduled maintenance
or an automated tool's work. If this reason is not passed, Ganeti will
- just use the information it has about the source of the request: for
- example a cli shutdown operation will have "cli:shutdown" as a reason,
- a cli failover operation will have "cli:failover". Operations coming
- from the remote API will use "rapi" instead of "cli". Of course
- setting a real site-specific reason is still preferred.
+ just use the information it has about the source of the request.
+ This reason information will be structured according to the
+ :doc:`Ganeti reason trail <design-reason-trail>` design document.
- RPCs that affect the instance status will be changed so that the
"reason" and the version of the config object they ran on is passed to
them. They will then export the new expected instance status, together
The timestamp of the last known change to the instance state.
``state_reason``
- The last known reason for state change, described according to the
- following subfields:
-
- ``text``
- Either a user-provided reason (if any), or the name of the command that
- triggered the state change, as a fallback.
-
- ``jobID``
- The ID of the job that caused the state change.
-
- ``source``
- Where the state change was triggered (RAPI, CLI).
+ The last known reason for state change of the instance, described according
+ to the JSON representation of a reason trail, as detailed in the :doc:`reason trail
+ design document <design-reason-trail>`.
``status``
It represents the status of the instance, and its format is the same as that
Returns the report of the collector ``[collector_name]`` that belongs to the
specified ``[category]``.
-If a collector does not belong to any category, ``collector`` will be used as
-the value for ``[category]``.
+The ``category`` has to be written in lowercase.
+
+If a collector does not belong to any category, ``default`` will have to be
+used as the value for ``[category]``.
`Status reporting collectors`_ will provide their output in non-verbose format.
The verbose format can be requested by adding the parameter ``verbose=1`` to the
$ apt-get install drbd8-source drbd8-utils
$ m-a update
$ m-a a-i drbd8
+
+ Or on newer versions, if the kernel already has modules:
+
+ $ apt-get install drbd8-utils
+
+ Then to configure it for Ganeti::
+
$ echo drbd minor_count=128 usermode_helper=/bin/true >> /etc/modules
$ depmod -a
$ modprobe drbd minor_count=128 usermode_helper=/bin/true
)
ret.append(
- ("enabled disk templates",
+ ("allowed disk templates",
_FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
eff_ipolicy[constants.IPOLICY_DTS]))
)
node_list = self.owned_locks(locking.LEVEL_NODE)
self.cluster = cluster = self.cfg.GetClusterInfo()
- (enabled_disk_templates, new_enabled_disk_templates) = \
- self._GetEnabledDiskTemplates(cluster)
-
- self._CheckVgName(node_list, enabled_disk_templates,
- new_enabled_disk_templates)
+ vm_capable_nodes = [node.name
+ for node in self.cfg.GetAllNodesInfo().values()
+ if node.name in node_list and node.vm_capable]
+
+ # if vg_name not None, checks given volume group on all nodes
+ if self.op.vg_name:
+ vglist = self.rpc.call_vg_list(vm_capable_nodes)
+ for node in vm_capable_nodes:
+ msg = vglist[node].fail_msg
+ if msg:
+ # ignoring down node
+ self.LogWarning("Error while gathering data on node %s"
+ " (ignoring node): %s", node, msg)
+ continue
+ vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
+ self.op.vg_name,
+ constants.MIN_VG_SIZE)
+ if vgstatus:
+ raise errors.OpPrereqError("Error on node '%s': %s" %
+ (node, vgstatus), errors.ECODE_ENVIRON)
if self.op.drbd_helper:
# checks given drbd helper on all nodes
feedback_fn("Initializing DRBD devices...")
# all child devices are in place, we can now create the DRBD devices
- for disk in anno_disks:
- for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
- f_create = node == pnode
- _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
- excl_stor)
+ try:
+ for disk in anno_disks:
+ for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
+ f_create = node == pnode
+ _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
+ excl_stor)
+ except errors.GenericError, e:
+ feedback_fn("Initializing of DRBD devices failed;"
+ " renaming back original volumes...")
+ for disk in new_disks:
+ self.cfg.SetDiskID(disk, pnode)
+ rename_back_list = [(n.children[0], o.logical_id)
+ for (n, o) in zip(new_disks, instance.disks)]
+ result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
+ result.Raise("Failed to rename LVs back after error %s" % str(e))
+ raise
# at this point, the instance has been modified
instance.disk_template = constants.DT_DRBD8
"""
(conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
if conf_net is not None:
- raise errors.OpPrereqError(("Conflicting IP address found: '%s' != '%s'" %
+ raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
+ " network %s, but the target NIC does not." %
(ip, conf_net)),
errors.ECODE_STATE)
HV_KVM_EXTRA = "kvm_extra"
HV_KVM_MACHINE_VERSION = "machine_version"
HV_KVM_PATH = "kvm_path"
+HV_VIF_TYPE = "vif_type"
HVS_PARAMETER_TYPES = {
HV_VGA: VTYPE_STRING,
HV_KVM_EXTRA: VTYPE_STRING,
HV_KVM_MACHINE_VERSION: VTYPE_STRING,
+ HV_VIF_TYPE: VTYPE_STRING,
}
HVS_PARAMETERS = frozenset(HVS_PARAMETER_TYPES.keys())
HT_NIC_PARAVIRTUAL,
])
+# Vif types
+# default vif type in xen-hvm
+HT_HVM_VIF_IOEMU = "ioemu"
+HT_HVM_VIF_VIF = "vif"
+HT_HVM_VALID_VIF_TYPES = compat.UniqueFrozenset([
+ HT_HVM_VIF_IOEMU,
+ HT_HVM_VIF_VIF,
+ ])
+
# Disk types
HT_DISK_IOEMU = "ioemu"
HT_DISK_IDE = "ide"
HV_CPU_MASK: CPU_PINNING_ALL,
HV_CPU_CAP: 0,
HV_CPU_WEIGHT: 256,
+ HV_VIF_TYPE: HT_HVM_VIF_IOEMU,
},
HT_KVM: {
HV_KVM_PATH: KVM_PATH,
_ENABLE_KVM_RE = re.compile(r"^-enable-kvm\s", re.M)
_DISABLE_KVM_RE = re.compile(r"^-disable-kvm\s", re.M)
_NETDEV_RE = re.compile(r"^-netdev\s", re.M)
+ _DISPLAY_RE = re.compile(r"^-display\s", re.M)
+ _MACHINE_RE = re.compile(r"^-machine\s", re.M)
_NEW_VIRTIO_RE = re.compile(r"^name \"%s\"" % _VIRTIO_NET_PCI, re.M)
# match -drive.*boot=on|off on different lines, but in between accept only
# dashes not preceeded by a new line (which would mean another option
"""
# pylint: disable=R0912,R0914,R0915
hvp = instance.hvparams
+ self.ValidateParameters(hvp)
pidfile = self._InstancePidFile(instance.name)
kvm = hvp[constants.HV_KVM_PATH]
mversion = hvp[constants.HV_KVM_MACHINE_VERSION]
if not mversion:
mversion = self._GetDefaultMachineVersion(kvm)
- kvm_cmd.extend(["-M", mversion])
+ if self._MACHINE_RE.search(kvmhelp):
+ # TODO (2.8): kernel_irqchip and kvm_shadow_mem machine properties, as
+ # extra hypervisor parameters. We should also investigate whether and how
+ # shadow_mem should be considered for the resource model.
+ if (hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_ENABLED):
+ specprop = ",accel=kvm"
+ else:
+ specprop = ""
+ machinespec = "%s%s" % (mversion, specprop)
+ kvm_cmd.extend(["-machine", machinespec])
+ else:
+ kvm_cmd.extend(["-M", mversion])
+ if (hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_ENABLED and
+ self._ENABLE_KVM_RE.search(kvmhelp)):
+ kvm_cmd.extend(["-enable-kvm"])
+ elif (hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_DISABLED and
+ self._DISABLE_KVM_RE.search(kvmhelp)):
+ kvm_cmd.extend(["-disable-kvm"])
kernel_path = hvp[constants.HV_KERNEL_PATH]
if kernel_path:
boot_floppy = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_FLOPPY
boot_network = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_NETWORK
- self.ValidateParameters(hvp)
-
if startup_paused:
kvm_cmd.extend([_KVM_START_PAUSED_FLAG])
- if (hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_ENABLED and
- self._ENABLE_KVM_RE.search(kvmhelp)):
- kvm_cmd.extend(["-enable-kvm"])
- elif (hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_DISABLED and
- self._DISABLE_KVM_RE.search(kvmhelp)):
- kvm_cmd.extend(["-disable-kvm"])
-
if boot_network:
kvm_cmd.extend(["-boot", "n"])
kvm_cmd.extend(["-spice", spice_arg])
else:
- kvm_cmd.extend(["-nographic"])
+ # From qemu 1.4 -nographic is incompatible with -daemonize. The new way
+ # also works in earlier versions though (tested with 1.1 and 1.3)
+ if self._DISPLAY_RE.search(kvmhelp):
+ kvm_cmd.extend(["-display", "none"])
+ else:
+ kvm_cmd.extend(["-nographic"])
if hvp[constants.HV_USE_LOCALTIME]:
kvm_cmd.extend(["-localtime"])
constants.HV_CPU_CAP: hv_base.NO_CHECK,
constants.HV_CPU_WEIGHT:
(False, lambda x: 0 < x < 65535, "invalid weight", None, None),
+ constants.HV_VIF_TYPE:
+ hv_base.ParamInSet(False, constants.HT_HVM_VALID_VIF_TYPES),
}
def _GetConfig(self, instance, startup_memory, block_devices):
config.write("localtime = 1\n")
vif_data = []
+ # Note: what is called 'nic_type' here, is used as value for the xen nic
+ # vif config parameter 'model'. For the xen nic vif parameter 'type', we use
+ # the 'vif_type' to avoid a clash of notation.
nic_type = hvp[constants.HV_NIC_TYPE]
+
if nic_type is None:
+ vif_type_str = ""
+ if hvp[constants.HV_VIF_TYPE]:
+ vif_type_str = ", type=%s" % hvp[constants.HV_VIF_TYPE]
# ensure old instances don't change
- nic_type_str = ", type=ioemu"
+ nic_type_str = vif_type_str
elif nic_type == constants.HT_NIC_PARAVIRTUAL:
nic_type_str = ", type=paravirtualized"
else:
- nic_type_str = ", model=%s, type=ioemu" % nic_type
+ # parameter 'model' is only valid with type 'ioemu'
+ nic_type_str = ", model=%s, type=%s" % \
+ (nic_type, constants.HT_HVM_VIF_IOEMU)
for nic in instance.nics:
nic_str = "mac=%s%s" % (nic.mac, nic_type_str)
ip = getattr(nic, "ip", None)
| {\--disk=*N*: {size=*VAL* \| adopt=*LV*}[,options...]
| \| {size=*VAL*,provider=*PROVIDER*}[,param=*value*... ][,options...]
| \| {-s|\--os-size} *SIZE*}
-| [\--no-ip-check] [\--no-name-check] [\--no-start] [\--no-install]
+| [\--no-ip-check] [\--no-name-check] [\--no-conflicts-check]
+| [\--no-start] [\--no-install]
| [\--net=*N* [:options...] \| \--no-nics]
| [{-B|\--backend-parameters} *BEPARAMS*]
| [{-H|\--hypervisor-parameters} *HYPERVISOR* [: option=*value*... ]]
ip
specifies the IP address assigned to the instance from the Ganeti
side (this is not necessarily what the instance will use, but what
- the node expects the instance to use)
+ the node expects the instance to use). Note that if an IP in the
+ range of a network configured with **gnt-network**\(8) is used,
+ and the NIC is not already connected to it, this network has to be
+ passed in the **network** parameter if this NIC is meant to be
+ connected to the said network. ``--no-conflicts-check`` can be used
+ to override this check. The special value **pool** causes Ganeti to
+ select an IP from the the network the NIC is or will be connected to.
mode
specifies the connection mode for this NIC: routed, bridged or
- e1000 (KVM)
- paravirtual (default for KVM) (HVM & KVM)
+vif\_type
+ Valid for the Xen HVM hypervisor.
+
+ This parameter specifies the vif type of the nic configuration
+ of the instance. Unsetting the value leads to no type being specified
+ in the configuration. Note that this parameter only takes effect when
+ the 'nic_type' is not set. The possible options are:
+
+ - ioemu
+ - vif
+
disk\_type
Valid for the Xen HVM and KVM hypervisors.
# Disk templates are treated slightly differently
par = "disk-templates"
- disp_str = "enabled disk templates"
+ disp_str = "allowed disk templates"
curr_val = old_policy[disp_str]
test_values = [
(True, constants.DT_PLAIN),
("vcpu-ratio", 1.5, None, 1.5),
("spindle-ratio", 1.5, None, 1.5),
("disk-templates", constants.DT_PLAIN,
- "enabled disk templates", constants.DT_PLAIN)
+ "allowed disk templates", constants.DT_PLAIN)
]:
if not iname:
iname = par
, dcFormatVersion
, dcCategory
, dcKind
- , dcData
+ , dcReport
) where
dcKind = DCKStatus
-- | The data exported by the data collector, taken from the default location.
-dcData :: IO J.JSValue
-dcData = buildJsonReport defaultFile Nothing
+dcReport :: IO DCReport
+dcReport = buildDCReport defaultFile Nothing
-- * Command line options
let status = computeStatus drbdData
return . addStatus status $ J.showJSON drbdData
+-- | This function computes the DCReport for the DRBD status.
+buildDCReport :: FilePath -> Maybe FilePath -> IO DCReport
+buildDCReport statusFile pairingFile =
+ buildJsonReport statusFile pairingFile >>=
+ buildReport dcName dcVersion dcFormatVersion dcCategory dcKind
+
-- | Main function.
main :: Options -> [String] -> IO ()
main opts args = do
pairingFile = optDrbdPairing opts
unless (null args) . exitErr $ "This program takes exactly zero" ++
" arguments, got '" ++ unwords args ++ "'"
- report <- buildJsonReport statusFile pairingFile >>=
- buildReport dcName dcVersion dcFormatVersion dcCategory dcKind
+ report <- buildDCReport statusFile pairingFile
putStrLn $ J.encode report
, buildReport
) where
+import Data.Char
import Text.JSON
import Ganeti.Constants as C
-- | The JSON instance for DCCategory.
instance JSON DCCategory where
- showJSON = showJSON . show
+ showJSON = showJSON . map toLower . drop 2 . show
readJSON =
error "JSON read instance not implemented for type DCCategory"
import Control.Applicative
import Control.Monad
+import Control.Monad.IO.Class
+import Data.ByteString.Char8 hiding (map, filter, find)
+import Data.List
import Snap.Core
import Snap.Http.Server
-import Data.ByteString.Char8
import qualified Text.JSON as J
+import qualified Ganeti.BasicTypes as BT
import Ganeti.Daemon
+import qualified Ganeti.DataCollectors.Drbd as Drbd
+import Ganeti.DataCollectors.Types
import qualified Ganeti.Constants as C
-- * Types and constants definitions
latestAPIVersion :: Int
latestAPIVersion = 1
+-- | Type describing a data collector basic information
+data DataCollector = DataCollector
+ { dName :: String -- ^ Name of the data collector
+ , dCategory :: Maybe DCCategory -- ^ Category (storage, instance, ecc)
+ -- of the collector
+ , dKind :: DCKind -- ^ Kind (performance or status reporting) of
+ -- the data collector
+ , dReport :: IO DCReport -- ^ Report produced by the collector
+ }
+
+-- | The list of available builtin data collectors.
+collectors :: [DataCollector]
+collectors =
+ [ DataCollector Drbd.dcName Drbd.dcCategory Drbd.dcKind Drbd.dcReport
+ ]
+
-- * Configuration handling
-- | The default configuration for the HTTP server.
, ("report", reportHandler)
]
+-- | Get the JSON representation of a data collector to be used in the collector
+-- list.
+dcListItem :: DataCollector -> J.JSValue
+dcListItem dc =
+ J.JSArray
+ [ J.showJSON $ dName dc
+ , maybe J.JSNull J.showJSON $ dCategory dc
+ , J.showJSON $ dKind dc
+ ]
+
-- | Handler for returning lists.
listHandler :: Snap ()
listHandler =
- dir "collectors" $ writeText "TODO: return the list of collectors"
+ dir "collectors" . writeBS . pack . J.encode $ map dcListItem collectors
-- | Handler for returning data collector reports.
reportHandler :: Snap ()
route
[ ("all", allReports)
, (":category/:collector", oneReport)
- ]
+ ] <|>
+ errorReport
--- | Return the report of all the available collectors
+-- | Return the report of all the available collectors.
allReports :: Snap ()
-allReports = writeText "TODO: return the reports of all the collectors"
+allReports = do
+ reports <- mapM (liftIO . dReport) collectors
+ writeBS . pack . J.encode $ reports
+
+-- | Returns a category given its name.
+-- If "collector" is given as the name, the collector has no category, and
+-- Nothing will be returned.
+catFromName :: String -> BT.Result (Maybe DCCategory)
+catFromName "instance" = BT.Ok $ Just DCInstance
+catFromName "storage" = BT.Ok $ Just DCStorage
+catFromName "daemon" = BT.Ok $ Just DCDaemon
+catFromName "hypervisor" = BT.Ok $ Just DCHypervisor
+catFromName "default" = BT.Ok Nothing
+catFromName _ = BT.Bad "No such category"
+
+errorReport :: Snap ()
+errorReport = do
+ modifyResponse $ setResponseStatus 404 "Not found"
+ writeBS "Unable to produce a report for the requested resource"
+
+error404 :: Snap ()
+error404 = do
+ modifyResponse $ setResponseStatus 404 "Not found"
+ writeBS "Resource not found"
-- | Return the report of one collector
oneReport :: Snap ()
oneReport = do
- category <- fmap (maybe mzero unpack) $ getParam "category"
- collector <- fmap (maybe mzero unpack) $ getParam "collector"
- writeBS . pack $
- "TODO: return the report for collector " ++ category
- ++ "/" ++ collector
+ categoryName <- fmap (maybe mzero unpack) $ getParam "category"
+ collectorName <- fmap (maybe mzero unpack) $ getParam "collector"
+ category <-
+ case catFromName categoryName of
+ BT.Ok cat -> return cat
+ BT.Bad msg -> fail msg
+ collector <-
+ case
+ find (\col -> collectorName == dName col) $
+ filter (\c -> category == dCategory c) collectors of
+ Just col -> return col
+ Nothing -> fail "Unable to find the requested collector"
+ report <- liftIO $ dReport collector
+ writeBS . pack . J.encode $ report
-- | The function implementing the HTTP API of the monitoring agent.
--- TODO: Currently it only replies to the API version query: implement all the
--- missing features.
monitoringApi :: Snap ()
monitoringApi =
ifTop versionQ <|>
- dir "1" version1Api
+ dir "1" version1Api <|>
+ error404
-- | Main function.
main :: MainFn CheckResult PrepResult
, pOpPriority
, pDependencies
, pComment
+ , pReason
])
-- | Default common parameter values.
, opPriority = OpPrioNormal
, opDepends = Nothing
, opComment = Nothing
+ , opReason = []
}
-- | The top-level opcode type.
, pOpPriority
, pDependencies
, pComment
+ , pReason
, pEnabledDiskTemplates
, dOldQuery
, dOldQueryNoLocking
pComment :: Field
pComment = optionalNullSerField $ stringField "comment"
+-- | Reason trail field.
+pReason :: Field
+pReason = simpleField C.opcodeReason [t| ReasonTrail |]
+
-- * Entire opcode parameter list
-- | Old-style query opcode, with locking.
return . Bad $
GenericError ("Luxi call '" ++ strOfOp op ++ "' not implemented")
-
-- | Given a decoded luxi request, executes it and sends the luxi
-- response back to the client.
handleClientMsg :: Client -> ConfigReader -> LuxiOp -> IO Bool
(!status, !rval) <-
case call_result of
Bad err -> do
- logWarning $ "Failed to execute request: " ++ show err
+ logWarning $ "Failed to execute request " ++ show args ++ ": "
+ ++ show err
return (False, showJSON err)
Ok result -> do
-- only log the first 2,000 chars of the result
logDebug $ "Result (truncated): " ++ take 2000 (J.encode result)
+ logInfo $ "Successfully handled " ++ strOfOp args
return (True, result)
sendMsg client $ buildResponse status rval
return True
-- | Generates the LuxiOp data type.
--
-- This takes a Luxi operation definition and builds both the
--- datatype and the function trnasforming the arguments to JSON.
+-- datatype and the function transforming the arguments to JSON.
-- We can't use anything less generic, because the way different
-- operations are serialized differs on both parameter- and top-level.
--
, opStatusToRaw
, opStatusFromRaw
, ELogType(..)
+ , ReasonElem
+ , ReasonTrail
) where
import Control.Monad (liftM)
, ("ELogJqueueTest", 'C.elogJqueueTest)
])
$(THH.makeJSONInstance ''ELogType)
+
+-- | Type of one element of a reason trail.
+type ReasonElem = (String, String, Integer)
+
+-- | Type representing a reason trail.
+type ReasonTrail = [ReasonElem]
genNameNE
_ -> fail $ "Undefined arbitrary for opcode " ++ op_id
+-- | Generates one element of a reason trail
+genReasonElem :: Gen ReasonElem
+genReasonElem = (,,) <$> genFQDN <*> genFQDN <*> arbitrary
+
+-- | Generates a reason trail
+genReasonTrail :: Gen ReasonTrail
+genReasonTrail = do
+ size <- choose (0, 10)
+ vectorOf size genReasonElem
+
instance Arbitrary OpCodes.CommonOpParams where
arbitrary = OpCodes.CommonOpParams <$> arbitrary <*> arbitrary <*>
- arbitrary <*> resize 5 arbitrary <*> genMaybe genName
+ arbitrary <*> resize 5 arbitrary <*> genMaybe genName <*>
+ genReasonTrail
-- * Helper functions
def _TranslateParsedNames(self, parsed):
for (pretty, raw) in [
("bounds specs", constants.ISPECS_MINMAX),
- ("enabled disk templates", constants.IPOLICY_DTS)
+ ("allowed disk templates", constants.IPOLICY_DTS)
]:
self._RenameDictItem(parsed, pretty, raw)
for minmax in parsed[constants.ISPECS_MINMAX]:
self._AssertIPolicyIsFull(policy)
self._AssertIPolicyMerged(constants.IPOLICY_DEFAULTS, diff_pol, policy)
+ def testFillIPolicyKeepsUnknown(self):
+ INVALID_KEY = "invalid_ipolicy_key"
+ diff_pol = {
+ INVALID_KEY: None,
+ }
+ policy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, diff_pol)
+ self.assertTrue(INVALID_KEY in policy)
+
if __name__ == "__main__":
testutils.GanetiTestProgram()
logging.info("Retrieving instance information from source cluster")
instinfo = self._GetInstanceInfo(src_client, mrt.PollJob,
mrt.move.src_instance_name)
+ if instinfo["disk_template"] == constants.DT_FILE:
+ raise Error("Inter-cluster move of file-based instances is not"
+ " supported.")
logging.info("Preparing export on source cluster")
expinfo = self._PrepareExport(src_client, mrt.PollJob,
done
adduser @GNTMASTERUSER@ @GNTCONFDGROUP@
+adduser @GNTMONDUSER@ @GNTMASTERDGROUP@