# lib
/lib/_autoconf.py
/lib/_vcsversion.py
+/lib/_generated_rpc.py
# man
/man/*.[0-9]
<socat-note>` below
- `Paramiko <http://www.lag.net/paramiko/>`_, if you want automated SSH
setup; optional otherwise but manual setup of the nodes required
+- `affinity Python module <http://pypi.python.org/pypi/affinity/0.1.0>`_,
+ optional python package for supporting CPU pinning under KVM
+- `ElementTree Python module <http://effbot.org/zone/element-index.htm>`_,
+ if running python 2.4 (optional, used by ovfconverter tool)
+- `qemu-img <http://qemu.org/>`_, if you want to use ovfconverter
+- `fping <http://fping.sourceforge.net/>`_
These programs are supplied as part of most Linux distributions, so
usually they can be installed via the standard package manager. Also
$ apt-get install lvm2 ssh bridge-utils iproute iputils-arping \
ndisc6 python python-pyopenssl openssl \
python-pyparsing python-simplejson \
- python-pyinotify python-pycurl socat
+ python-pyinotify python-pycurl socat \
+ python-elementtree qemu
If you want to also enable the `htools` components, which is recommended
on bigger deployments (they give you automatic instance placement,
cluster balancing, etc.), then you need to have a Haskell compiler
installed. More specifically:
-- `GHC <http://www.haskell.org/ghc/>`_ version 6.10 or higher
+- `GHC <http://www.haskell.org/ghc/>`_ version 6.12 or higher
- or even better, `The Haskell Platform
<http://hackage.haskell.org/platform/>`_ which gives you a simple way
to bootstrap Haskell
BUILD_BASH_COMPLETION = $(top_srcdir)/autotools/build-bash-completion
RUN_IN_TEMPDIR = $(top_srcdir)/autotools/run-in-tempdir
CHECK_PYTHON_CODE = $(top_srcdir)/autotools/check-python-code
+CHECK_HEADER = $(top_srcdir)/autotools/check-header
CHECK_MAN = $(top_srcdir)/autotools/check-man
CHECK_VERSION = $(top_srcdir)/autotools/check-version
CHECK_NEWS = $(top_srcdir)/autotools/check-news
+CHECK_IMPORTS = $(top_srcdir)/autotools/check-imports
DOCPP = $(top_srcdir)/autotools/docpp
REPLACE_VARS_SED = autotools/replace_vars.sed
CONVERT_CONSTANTS = $(top_srcdir)/autotools/convert-constants
+BUILD_RPC = $(top_srcdir)/autotools/build-rpc
# Note: these are automake-specific variables, and must be named after
# the directory + 'dir' suffix
qa \
test \
test/data \
+ test/data/ovfdata \
+ test/data/ovfdata/other \
tools
BUILDTIME_DIR_AUTOCREATE = \
daemons/daemon-util \
daemons/ganeti-cleaner \
devel/upload \
+ $(BUILT_EXAMPLES) \
doc/examples/bash_completion \
- doc/examples/ganeti.initd \
- doc/examples/ganeti-kvm-poweroff.initd \
- doc/examples/ganeti.cron \
- doc/examples/gnt-config-backup \
- doc/examples/hooks/ipsec \
+ lib/_generated_rpc.py \
$(man_MANS) \
$(manhtml) \
tools/kvm-ifup \
# BUILT_SOURCES should only be used as a dependency on phony targets. Otherwise
# it'll cause the target to rebuild every time.
BUILT_SOURCES = \
+ $(built_base_sources) \
+ $(BUILT_PYTHON_SOURCES) \
+ $(PYTHON_BOOTSTRAP)
+
+built_base_sources = \
ganeti \
stamp-srclinks \
- $(all_dirfiles) \
- $(PYTHON_BOOTSTRAP) \
- $(BUILT_PYTHON_SOURCES)
+ $(all_dirfiles)
-BUILT_PYTHON_SOURCES = \
+built_python_base_sources = \
lib/_autoconf.py \
lib/_vcsversion.py
+BUILT_PYTHON_SOURCES = \
+ $(built_python_base_sources) \
+ lib/_generated_rpc.py
+
+# Generating the RPC wrappers depends on many things, so make sure it's built at
+# the end of the built sources
+lib/_generated_rpc.py: | $(built_base_sources) $(built_python_base_sources)
+
+# these are all built from the underlying %.in sources
+BUILT_EXAMPLES = \
+ doc/examples/ganeti-kvm-poweroff.initd \
+ doc/examples/ganeti.cron \
+ doc/examples/ganeti.initd \
+ doc/examples/gnt-config-backup \
+ doc/examples/hooks/ipsec
+
nodist_pkgpython_PYTHON = \
$(BUILT_PYTHON_SOURCES)
lib/netutils.py \
lib/objects.py \
lib/opcodes.py \
+ lib/ovf.py \
lib/qlang.py \
lib/query.py \
lib/rpc.py \
+ lib/rpc_defs.py \
lib/runtime.py \
lib/serializer.py \
lib/ssconf.py \
doc/design-network.rst \
doc/design-chained-jobs.rst \
doc/design-ovf-support.rst \
+ doc/design-resource-model.rst \
doc/cluster-merge.rst \
doc/design-shared-storage.rst \
+ doc/design-node-state-cache.rst \
+ doc/design-virtual-clusters.rst \
doc/devnotes.rst \
doc/glossary.rst \
doc/hooks.rst \
# extra flags that can be overriden on the command line
HEXTRA =
# exclude options for coverage reports
-HPCEXCL = --exclude Main --exclude Ganeti.HTools.QC \
+HPCEXCL = --exclude Main \
--exclude Ganeti.Constants \
+ --exclude Ganeti.THH \
+ --exclude Ganeti.HTools.QC \
+ --exclude Ganeti.HTools.QCHelper \
--exclude Ganeti.HTools.Version
HS_LIB_SRCS = \
htools/Ganeti/HTools/Group.hs \
htools/Ganeti/HTools/IAlloc.hs \
htools/Ganeti/HTools/Instance.hs \
+ htools/Ganeti/HTools/JSON.hs \
htools/Ganeti/HTools/Loader.hs \
htools/Ganeti/HTools/Luxi.hs \
htools/Ganeti/HTools/Node.hs \
htools/Ganeti/HTools/PeerMap.hs \
htools/Ganeti/HTools/QC.hs \
+ htools/Ganeti/HTools/QCHelper.hs \
htools/Ganeti/HTools/Rapi.hs \
htools/Ganeti/HTools/Simu.hs \
htools/Ganeti/HTools/Text.hs \
htools/Ganeti/HTools/Program/Hspace.hs \
htools/Ganeti/Jobs.hs \
htools/Ganeti/Luxi.hs \
- htools/Ganeti/OpCodes.hs
+ htools/Ganeti/OpCodes.hs \
+ htools/Ganeti/THH.hs
HS_BUILT_SRCS = htools/Ganeti/HTools/Version.hs htools/Ganeti/Constants.hs
HS_BUILT_SRCS_IN = $(patsubst %,%.in,$(HS_BUILT_SRCS))
noinst_DATA = \
devel/upload \
doc/html \
+ $(BUILT_EXAMPLES) \
doc/examples/bash_completion \
- doc/examples/ganeti.cron \
- doc/examples/ganeti.initd \
- doc/examples/ganeti-kvm-poweroff.initd \
- doc/examples/gnt-config-backup \
- doc/examples/hooks/ipsec \
$(manhtml)
gnt_scripts = \
daemons/ganeti-noded \
daemons/ganeti-watcher \
daemons/ganeti-rapi \
- scripts/gnt-backup \
- scripts/gnt-cluster \
- scripts/gnt-debug \
- scripts/gnt-group \
- scripts/gnt-instance \
- scripts/gnt-job \
- scripts/gnt-node \
- scripts/gnt-os
+ $(gnt_scripts)
PYTHON_BOOTSTRAP = \
$(PYTHON_BOOTSTRAP_SBIN) \
exit 1; \
fi
BINARY=$(@:htools/%=%); $(GHC) --make \
- $(HFLAGS) $(HEXTRA) \
+ $(HFLAGS) \
$(HTOOLS_NOCURL) $(HTOOLS_PARALLEL3) \
- -osuf $$BINARY.o -hisuf $$BINARY.hi $@
+ -osuf $$BINARY.o -hisuf $$BINARY.hi \
+ $(HEXTRA) $@
# for the htools/test binary, we need to enable profiling/coverage
htools/test: HEXTRA=-fhpc -Wwarn -fno-warn-missing-signatures \
$(PYTHON_BOOTSTRAP_SBIN) \
daemons/ganeti-cleaner
-dist_tools_PYTHON = \
+python_scripts = \
tools/burnin \
tools/cfgshell \
tools/cfgupgrade \
tools/cfgupgrade12 \
tools/cluster-merge \
+ tools/confd-client \
tools/lvmstrap \
tools/move-instance \
+ tools/ovfconverter \
tools/setup-ssh \
tools/sanitize-config
dist_tools_SCRIPTS = \
- $(dist_tools_PYTHON) \
+ $(python_scripts) \
tools/kvm-console-wrapper \
- tools/xm-console-wrapper
+ tools/xm-console-wrapper \
+ tools/master-ip-setup
pkglib_python_scripts = \
daemons/import-export \
epydoc.conf.in \
pylintrc \
autotools/build-bash-completion \
+ autotools/build-rpc \
+ autotools/check-header \
autotools/check-python-code \
+ autotools/check-imports \
autotools/check-man \
autotools/check-news \
autotools/check-tar \
$(docrst) \
doc/conf.py \
doc/html \
- doc/examples/ganeti.initd.in \
- doc/examples/ganeti-kvm-poweroff.initd.in \
- doc/examples/ganeti.cron.in \
- doc/examples/gnt-config-backup.in \
+ $(BUILT_EXAMPLES:%=%.in) \
doc/examples/ganeti.default \
doc/examples/ganeti.default-debug \
doc/examples/hooks/ethers \
- doc/examples/hooks/ipsec.in \
doc/examples/gnt-debug/README \
doc/examples/gnt-debug/delay0.json \
doc/examples/gnt-debug/delay50.json \
+ test/lockperf.py \
test/testutils.py \
test/mocks.py \
$(dist_TESTS) \
qa/qa-sample.json \
$(qa_scripts) \
$(HS_LIB_SRCS) $(HS_BUILT_SRCS_IN) \
- $(HS_PROG_SRCS)
+ $(HS_PROG_SRCS) \
+ htools/lint-hints.hs
man_MANS = \
man/ganeti.7 \
test/data/kvm_0.12.5_help.txt \
test/data/kvm_0.9.1_help.txt \
test/data/sys_drbd_usermode_helper.txt \
+ test/data/ovfdata/compr_disk.vmdk.gz \
+ test/data/ovfdata/config.ini \
+ test/data/ovfdata/corrupted_resources.ovf \
+ test/data/ovfdata/empty.ini \
+ test/data/ovfdata/empty.ovf \
+ test/data/ovfdata/ganeti.mf \
+ test/data/ovfdata/ganeti.ovf \
+ test/data/ovfdata/gzip_disk.ovf \
+ test/data/ovfdata/new_disk.vmdk \
+ test/data/ovfdata/no_disk.ini \
+ test/data/ovfdata/no_disk_in_ref.ovf \
+ test/data/ovfdata/no_os.ini \
+ test/data/ovfdata/no_ovf.ova \
+ test/data/ovfdata/ova.ova \
+ test/data/ovfdata/second_disk.vmdk \
+ test/data/ovfdata/rawdisk.raw \
+ test/data/ovfdata/unsafe_path.ini \
+ test/data/ovfdata/virtualbox.ovf \
+ test/data/ovfdata/wrong_extension.ovd \
+ test/data/ovfdata/wrong_config.ini \
+ test/data/ovfdata/wrong_manifest.mf \
+ test/data/ovfdata/wrong_manifest.ovf \
+ test/data/ovfdata/wrong_ova.ova \
+ test/data/ovfdata/wrong_xml.ovf \
+ test/data/ovfdata/other/rawdisk.raw \
test/import-export_unittest-helper
python_tests = \
test/ganeti.netutils_unittest.py \
test/ganeti.objects_unittest.py \
test/ganeti.opcodes_unittest.py \
+ test/ganeti.ovf_unittest.py \
test/ganeti.qlang_unittest.py \
test/ganeti.query_unittest.py \
test/ganeti.rapi.baserlib_unittest.py \
test/ganeti.workerpool_unittest.py \
test/cfgupgrade_unittest.py \
test/docs_unittest.py \
+ test/pycurl_reset_unittest.py \
test/tempfile_fork_unittest.py
+if HAS_FAKEROOT
+python_tests += test/ganeti.utils.io_unittest-runasroot.py
+endif
haskell_tests = htools/test
# Environment for all tests
PLAIN_TESTS_ENVIRONMENT = \
- PYTHONPATH=. TOP_SRCDIR=$(abs_top_srcdir) PYTHON=$(PYTHON) $(RUN_IN_TEMPDIR)
+ PYTHONPATH=. TOP_SRCDIR=$(abs_top_srcdir) PYTHON=$(PYTHON) \
+ FAKEROOT=$(FAKEROOT_PATH) $(RUN_IN_TEMPDIR)
# Environment for tests run by automake
TESTS_ENVIRONMENT = \
all_python_code = \
$(dist_sbin_SCRIPTS) \
- $(dist_tools_PYTHON) \
+ $(python_scripts) \
$(pkglib_python_scripts) \
$(nodist_pkglib_python_scripts) \
$(python_tests) \
check_python_code = \
$(BUILD_BASH_COMPLETION) \
+ $(CHECK_IMPORTS) \
+ $(CHECK_HEADER) \
$(DOCPP) \
$(all_python_code)
ganeti \
ganeti/http/server.py \
$(dist_sbin_SCRIPTS) \
- $(dist_tools_PYTHON) \
+ $(python_scripts) \
$(pkglib_python_scripts) \
$(BUILD_BASH_COMPLETION) \
+ $(CHECK_IMPORTS) \
+ $(CHECK_HEADER) \
$(DOCPP) \
$(PYTHON_BOOTSTRAP)
+standalone_python_modules = \
+ lib/rapi/client.py \
+ tools/ganeti-listrunner
+
pep8_python_code = \
ganeti \
ganeti/http/server.py \
$(dist_sbin_SCRIPTS) \
- $(dist_tools_PYTHON) \
+ $(python_scripts) \
$(pkglib_python_scripts) \
$(BUILD_BASH_COMPLETION) \
+ $(CHECK_HEADER) \
$(DOCPP) \
$(PYTHON_BOOTSTRAP) \
qa
sed -e "s/%ver%/$$VCSVER/" < $< > $@
htools/Ganeti/Constants.hs: htools/Ganeti/Constants.hs.in \
- lib/constants.py lib/_autoconf.py $(CONVERT_CONSTANTS)
+ lib/constants.py lib/_autoconf.py $(CONVERT_CONSTANTS) \
+ | lib/_vcsversion.py
set -e; \
{ cat $< ; PYTHONPATH=. $(CONVERT_CONSTANTS); } > $@
echo "TOOLSDIR = '$(toolsdir)'"; \
echo "GNT_SCRIPTS = [$(foreach i,$(notdir $(gnt_scripts)),'$(i)',)]"; \
echo "PKGLIBDIR = '$(pkglibdir)'"; \
- echo "DRBD_BARRIERS = $(DRBD_BARRIERS)"; \
+ echo "DRBD_BARRIERS = '$(DRBD_BARRIERS)'"; \
+ echo "DRBD_NO_META_FLUSH = $(DRBD_NO_META_FLUSH)"; \
echo "SYSLOG_USAGE = '$(SYSLOG_USAGE)'"; \
echo "DAEMONS_GROUP = '$(DAEMONS_GROUP)'"; \
echo "ADMIN_GROUP = '$(ADMIN_GROUP)'"; \
echo "NODED_USER = '$(NODED_USER)'"; \
echo "NODED_GROUP = '$(NODED_GROUP)'"; \
echo "DISK_SEPARATOR = '$(DISK_SEPARATOR)'"; \
+ echo "QEMUIMG_PATH = '$(QEMUIMG_PATH)'"; \
if [ "$(HTOOLS)" ]; then \
echo "HTOOLS = True"; \
else \
echo "HTOOLS = False"; \
fi; \
+ echo "ENABLE_CONFD = $(ENABLE_CONFD)"; \
} > $@
lib/_vcsversion.py: Makefile vcs-version | lib/.dir
echo "VCS_VERSION = '$$VCSVER'"; \
} > $@
+lib/_generated_rpc.py: lib/rpc_defs.py $(BUILD_RPC)
+ PYTHONPATH=. $(RUN_IN_TEMPDIR) $(CURDIR)/$(BUILD_RPC) lib/rpc_defs.py > $@
+
$(REPLACE_VARS_SED): Makefile
set -e; \
{ echo 's#@PREFIX@#$(prefix)#g'; \
echo 's#@GNTCONFDGROUP@#$(CONFD_GROUP)#g'; \
echo 's#@GNTMASTERDGROUP@#$(MASTERD_GROUP)#g'; \
echo 's#@GNTDAEMONSGROUP@#$(DAEMONS_GROUP)#g'; \
+ echo 's#@CUSTOM_ENABLE_CONFD@#$(ENABLE_CONFD)#g'; \
} > $@
# Using deferred evaluation
if test -n "$$error"; then exit 1; else exit 0; fi; \
}
-check-local: check-dirs
+.PHONY: check-local
+check-local: check-dirs $(BUILT_SOURCES)
$(CHECK_PYTHON_CODE) $(check_python_code)
+ PYTHONPATH=. $(CHECK_HEADER) $(check_python_code)
$(CHECK_VERSION) $(VERSION) $(top_srcdir)/NEWS
$(CHECK_NEWS) < $(top_srcdir)/NEWS
+ PYTHONPATH=. $(RUN_IN_TEMPDIR) $(CURDIR)/$(CHECK_IMPORTS) . $(standalone_python_modules)
expver=$(VERSION_MAJOR).$(VERSION_MINOR); \
if test "`head -n 1 $(top_srcdir)/README`" != "Ganeti $$expver"; then \
echo "Incorrect version in README, expected $$expver"; \
fi; \
for file in doc/iallocator.rst doc/hooks.rst; do \
if test "`sed -ne '4 p' $(top_srcdir)/$$file`" != \
- "Documents Ganeti version $$expver"; then \
+ "Documents Ganeti version $$expver"; then \
echo "Incorrect version in $$file, expected $$expver"; \
exit 1; \
fi; \
--rcfile ../pylintrc $(patsubst qa/%.py,%,$(qa_scripts))
.PHONY: hlint
-hlint: $(HS_BUILT_SRCS)
+hlint: $(HS_BUILT_SRCS) htools/lint-hints.hs
if tty -s; then C="-c"; else C=""; fi; \
- hlint --report=doc/hs-lint.html $$C htools
+ hlint --report=doc/hs-lint.html --cross $$C \
+ --ignore "Use first" \
+ --ignore "Use comparing" \
+ --ignore "Use on" \
+ --ignore "Use Control.Exception.catch" \
+ --ignore "Reduce duplication" \
+ --hint htools/lint-hints \
+ $(filter-out htools/Ganeti/THH.hs,$(HS_LIB_SRCS))
# a dist hook rule for updating the vcs-version file; this is
# hardcoded due to where it needs to build the file...
====
+Version 2.6.0 beta1
+-------------------
+
+*(unreleased)*
+
+- Deprecated ``admin_up`` field. Instead, ``admin_state`` is introduced,
+ with 3 possible values -- ``up``, ``down`` and ``offline``.
+
+
Version 2.5.0 rc5
-----------------
And as usual, various improvements to the error messages, documentation
and man pages.
+
Version 2.4.1
-------------
server endpoint
-Version 2.2.0 beta 0
---------------------
+Version 2.2.0 beta0
+-------------------
*(Released Thu, 17 Jun 2010)*
error handling path called a wrong function name)
-Version 2.0.0 final
--------------------
+Version 2.0.0
+-------------
*(Released Wed, 27 May 2009)*
- no changes from rc5
-Version 2.0 release candidate 5
--------------------------------
+Version 2.0 rc5
+---------------
*(Released Wed, 20 May 2009)*
- make watcher automatically start the master daemon if down
-Version 2.0 release candidate 4
--------------------------------
+Version 2.0 rc4
+---------------
*(Released Mon, 27 Apr 2009)*
- miscellaneous doc and man pages fixes
-Version 2.0 release candidate 3
--------------------------------
+Version 2.0 rc3
+---------------
*(Released Wed, 8 Apr 2009)*
toolchains
-Version 2.0 release candidate 2
--------------------------------
+Version 2.0 rc2
+---------------
*(Released Fri, 27 Mar 2009)*
- Some documentation fixes and updates
-Version 2.0 release candidate 1
--------------------------------
+Version 2.0 rc1
+---------------
*(Released Mon, 2 Mar 2009)*
- Fix an issue related to $libdir/run/ganeti and cluster creation
-Version 2.0 beta 2
-------------------
+Version 2.0 beta2
+-----------------
*(Released Thu, 19 Feb 2009)*
- Many other bugfixes and small improvements
-Version 2.0 beta 1
-------------------
+Version 2.0 beta1
+-----------------
*(Released Mon, 26 Jan 2009)*
- Change parsing of lvm commands to ignore stderr
-Version 1.2b3
--------------
+Version 1.2 beta3
+-----------------
*(Released Wed, 28 Nov 2007)*
- QA updates
-Version 1.2b2
--------------
+Version 1.2 beta2
+-----------------
*(Released Tue, 13 Nov 2007)*
import os
import re
+import itertools
from cStringIO import StringIO
from ganeti import constants
# making an exception here because this script is only used at build time.
from ganeti import _autoconf
+#: Regular expression describing desired format of option names. Long names can
+#: contain lowercase characters, numbers and dashes only.
+_OPT_NAME_RE = re.compile(r"^-[a-zA-Z0-9]|--[a-z][-a-z0-9]+$")
+
def WritePreamble(sw):
"""Writes the script preamble.
sw.Write("# This script is automatically generated at build time.")
sw.Write("# Do not modify manually.")
- sw.Write("_ganeti_dbglog() {")
+ sw.Write("_gnt_log() {")
sw.IncIndent()
try:
sw.Write("if [[ -n \"$GANETI_COMPL_LOG\" ]]; then")
# Params: <long options with equal sign> <all options>
# Result variable: $optcur
- sw.Write("_ganeti_checkopt() {")
+ sw.Write("_gnt_checkopt() {")
sw.IncIndent()
try:
sw.Write("""if [[ -n "$1" && "$cur" == @($1) ]]; then""")
sw.DecIndent()
sw.Write("fi")
- sw.Write("_ganeti_dbglog optcur=\"'$optcur'\"")
+ sw.Write("_gnt_log optcur=\"'$optcur'\"")
sw.Write("return 1")
finally:
# Params: <compgen options>
# Result variable: $COMPREPLY
- sw.Write("_ganeti_compgen() {")
+ sw.Write("_gnt_compgen() {")
sw.IncIndent()
try:
sw.Write("""COMPREPLY=( $(compgen "$@") )""")
- sw.Write("_ganeti_dbglog COMPREPLY=\"${COMPREPLY[@]}\"")
+ sw.Write("_gnt_log COMPREPLY=\"${COMPREPLY[@]}\"")
finally:
sw.DecIndent()
sw.Write("}")
def WriteCompReply(sw, args, cur="\"$cur\""):
- sw.Write("_ganeti_compgen %s -- %s", args, cur)
+ sw.Write("_gnt_compgen %s -- %s", args, cur)
sw.Write("return")
# pylint. pylint: disable=W0212
opt.all_names = sorted(opt._short_opts + opt._long_opts)
+ invalid = list(itertools.ifilterfalse(_OPT_NAME_RE.match, opt.all_names))
+ if invalid:
+ raise Exception("Option names don't match regular expression '%s': %s" %
+ (_OPT_NAME_RE.pattern, utils.CommaJoin(invalid)))
+
def _FindFirstArgument(self, sw):
ignore = []
skip_one = []
else:
condcmd = "if"
- sw.Write("%s _ganeti_checkopt %s %s; then", condcmd,
+ sw.Write("%s _gnt_checkopt %s %s; then", condcmd,
utils.ShellQuote("|".join(["%s=*" % i for i in longnames])),
utils.ShellQuote("|".join(allnames)))
sw.IncIndent()
sw.DecIndent()
sw.Write("fi")
- sw.Write("_ganeti_dbglog pfx=\"'$pfx'\" curvalue=\"'$curvalue'\""
+ sw.Write("_gnt_log pfx=\"'$pfx'\" curvalue=\"'$curvalue'\""
" node1=\"'$node1'\"")
sw.Write("for i in $(_ganeti_nodes); do")
varlen_arg_idx = None
wrote_arg = False
- # Write some debug comments
- for idx, arg in enumerate(self.args):
- sw.Write("# %s: %r", idx, arg)
-
sw.Write("compgenargs=")
for idx, arg in enumerate(self.args):
' prev="${COMP_WORDS[COMP_CWORD-1]}"'
' i first_arg_idx choices compgenargs arg_idx optcur')
- sw.Write("_ganeti_dbglog cur=\"$cur\" prev=\"$prev\"")
+ sw.Write("_gnt_log cur=\"$cur\" prev=\"$prev\"")
sw.Write("[[ -n \"$GANETI_COMPL_LOG\" ]] &&"
- " _ganeti_dbglog \"$(set | grep ^COMP_)\"")
+ " _gnt_log \"$(set | grep ^COMP_)\"")
sw.Write("COMPREPLY=()")
sw.DecIndent()
sw.Write("fi")
- # We're doing options and arguments to commands
- sw.Write("""case "${COMP_WORDS[1]}" in""")
+ # Group commands by arguments and options
+ grouped_cmds = {}
for cmd, (_, argdef, optdef, _, _) in commands.iteritems():
if not (argdef or optdef):
continue
+ grouped_cmds.setdefault((tuple(argdef), tuple(optdef)), set()).add(cmd)
- # TODO: Group by arguments and options
- sw.Write("%s)", utils.ShellQuote(cmd))
+ # We're doing options and arguments to commands
+ sw.Write("""case "${COMP_WORDS[1]}" in""")
+ for ((argdef, optdef), cmds) in grouped_cmds.items():
+ assert argdef or optdef
+ sw.Write("%s)", "|".join(map(utils.ShellQuote, sorted(cmds))))
sw.IncIndent()
try:
CompletionWriter(1, optdef, argdef).WriteTo(sw)
finally:
sw.DecIndent()
-
sw.Write(";;")
sw.Write("esac")
finally:
--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Script to generate RPC code.
+
+"""
+
+# pylint: disable=C0103
+# [C0103] Invalid name
+
+import sys
+import re
+import itertools
+import textwrap
+from cStringIO import StringIO
+
+from ganeti import utils
+from ganeti import compat
+from ganeti import build
+
+
+_SINGLE = "single-node"
+_MULTI = "multi-node"
+
+#: Expected length of a rpc definition
+_RPC_DEF_LEN = 8
+
+
+def _WritePreamble(sw):
+ """Writes a preamble for the RPC wrapper output.
+
+ """
+ sw.Write("# This code is automatically generated at build time.")
+ sw.Write("# Do not modify manually.")
+ sw.Write("")
+ sw.Write("\"\"\"Automatically generated RPC client wrappers.")
+ sw.Write("")
+ sw.Write("\"\"\"")
+ sw.Write("")
+ sw.Write("from ganeti import rpc_defs")
+ sw.Write("")
+
+
+def _WrapCode(line):
+ """Wraps Python code.
+
+ """
+ return textwrap.wrap(line, width=70, expand_tabs=False,
+ fix_sentence_endings=False, break_long_words=False,
+ replace_whitespace=True,
+ subsequent_indent=utils.ShellWriter.INDENT_STR)
+
+
+def _WriteDocstring(sw, name, timeout, kind, args, desc):
+ """Writes a docstring for an RPC wrapper.
+
+ """
+ sw.Write("\"\"\"Wrapper for RPC call '%s'", name)
+ sw.Write("")
+ if desc:
+ sw.Write(desc)
+ sw.Write("")
+
+ note = ["This is a %s call" % kind]
+ if timeout and not callable(timeout):
+ note.append(" with a timeout of %s" % utils.FormatSeconds(timeout))
+ sw.Write("@note: %s", "".join(note))
+
+ if kind == _SINGLE:
+ sw.Write("@type node: string")
+ sw.Write("@param node: Node name")
+ else:
+ sw.Write("@type node_list: list of string")
+ sw.Write("@param node_list: List of node names")
+
+ if args:
+ for (argname, _, argtext) in args:
+ if argtext:
+ docline = "@param %s: %s" % (argname, argtext)
+ for line in _WrapCode(docline):
+ sw.Write(line)
+ sw.Write("")
+ sw.Write("\"\"\"")
+
+
+def _WriteBaseClass(sw, clsname, calls):
+ """Write RPC wrapper class.
+
+ """
+ sw.Write("")
+ sw.Write("class %s(object):", clsname)
+ sw.IncIndent()
+ try:
+ sw.Write("# E1101: Non-existent members")
+ sw.Write("# R0904: Too many public methods")
+ sw.Write("# pylint: disable=E1101,R0904")
+
+ if not calls:
+ sw.Write("pass")
+ return
+
+ sw.Write("_CALLS = rpc_defs.CALLS[%r]", clsname)
+ sw.Write("")
+
+ for v in calls:
+ if len(v) != _RPC_DEF_LEN:
+ raise ValueError("Procedure %s has only %d elements, expected %d" %
+ (v[0], len(v), _RPC_DEF_LEN))
+
+ for (name, kind, _, timeout, args, _, _, desc) in calls:
+ funcargs = ["self"]
+
+ if kind == _SINGLE:
+ funcargs.append("node")
+ elif kind == _MULTI:
+ funcargs.append("node_list")
+ else:
+ raise Exception("Unknown kind '%s'" % kind)
+
+ funcargs.extend(map(compat.fst, args))
+
+ funcargs.append("_def=_CALLS[%r]" % name)
+
+ funcdef = "def call_%s(%s):" % (name, utils.CommaJoin(funcargs))
+ for line in _WrapCode(funcdef):
+ sw.Write(line)
+
+ sw.IncIndent()
+ try:
+ _WriteDocstring(sw, name, timeout, kind, args, desc)
+
+ buf = StringIO()
+ buf.write("return ")
+
+ # In case line gets too long and is wrapped in a bad spot
+ buf.write("( ")
+
+ buf.write("self._Call(_def, ")
+ if kind == _SINGLE:
+ buf.write("[node]")
+ else:
+ buf.write("node_list")
+
+ buf.write(", [%s])" %
+ # Function arguments
+ utils.CommaJoin(map(compat.fst, args)))
+
+ if kind == _SINGLE:
+ buf.write("[node]")
+ buf.write(")")
+
+ for line in _WrapCode(buf.getvalue()):
+ sw.Write(line)
+ finally:
+ sw.DecIndent()
+ sw.Write("")
+ finally:
+ sw.DecIndent()
+
+
+def main():
+ """Main function.
+
+ """
+ buf = StringIO()
+ sw = utils.ShellWriter(buf)
+
+ _WritePreamble(sw)
+
+ for filename in sys.argv[1:]:
+ sw.Write("# Definitions from '%s'", filename)
+
+ module = build.LoadModule(filename)
+
+ # Call types are re-defined in definitions file to avoid imports. Verify
+ # here to ensure they're equal to local constants.
+ assert module.SINGLE == _SINGLE
+ assert module.MULTI == _MULTI
+
+ dups = utils.FindDuplicates(itertools.chain(*map(lambda value: value.keys(),
+ module.CALLS.values())))
+ if dups:
+ raise Exception("Found duplicate RPC definitions for '%s'" %
+ utils.CommaJoin(sorted(dups)))
+
+ for (clsname, calls) in module.CALLS.items():
+ _WriteBaseClass(sw, clsname, calls.values())
+
+ print buf.getvalue()
+
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Script to verify file header.
+
+"""
+
+# pylint: disable=C0103
+# [C0103] Invalid name
+
+import sys
+import re
+import itertools
+
+from ganeti import constants
+from ganeti import utils
+from ganeti import compat
+
+
+#: Assume header is always in the first 8kB of a file
+_READ_SIZE = 8 * 1024
+
+_GPLv2 = [
+ "This program is free software; you can redistribute it and/or modify",
+ "it under the terms of the GNU General Public License as published by",
+ "the Free Software Foundation; either version 2 of the License, or",
+ "(at your option) any later version.",
+ "",
+ "This program is distributed in the hope that it will be useful, but",
+ "WITHOUT ANY WARRANTY; without even the implied warranty of",
+ "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU",
+ "General Public License for more details.",
+ "",
+ "You should have received a copy of the GNU General Public License",
+ "along with this program; if not, write to the Free Software",
+ "Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA",
+ "02110-1301, USA.",
+ ]
+
+
+_SHEBANG = re.compile(r"^#(?:|!(?:/usr/bin/python(?:| -u)|/bin/(?:|ba)sh))$")
+_COPYRIGHT_YEAR = r"20[01][0-9]"
+_COPYRIGHT = re.compile(r"# Copyright \(C\) (%s(?:, %s)*) Google Inc\.$" %
+ (_COPYRIGHT_YEAR, _COPYRIGHT_YEAR))
+_COPYRIGHT_DESC = "Copyright (C) <year>[, <year> ...] Google Inc."
+_AUTOGEN = "# This file is automatically generated, do not edit!"
+
+
+class HeaderError(Exception):
+ pass
+
+
+def _Fail(lineno, msg):
+ raise HeaderError("Line %s: %s" % (lineno, msg))
+
+
+def _CheckHeader(getline_fn):
+ (lineno, line) = getline_fn()
+
+ if line == _AUTOGEN:
+ return
+
+ if not _SHEBANG.match(line):
+ _Fail(lineno, ("Must contain nothing but a hash character (#) or a"
+ " shebang line (e.g. #!/bin/bash)"))
+
+ (lineno, line) = getline_fn()
+
+ if line == _AUTOGEN:
+ return
+
+ if line != "#":
+ _Fail(lineno, "Must contain nothing but hash character (#)")
+
+ (lineno, line) = getline_fn()
+ if line:
+ _Fail(lineno, "Must be empty")
+
+ (lineno, line) = getline_fn()
+ if not _COPYRIGHT.match(line):
+ _Fail(lineno, "Must contain copyright information (%s)" % _COPYRIGHT_DESC)
+
+ (lineno, line) = getline_fn()
+ if line != "#":
+ _Fail(lineno, "Must contain nothing but hash character (#)")
+
+ for licence_line in _GPLv2:
+ (lineno, line) = getline_fn()
+ if line != ("# %s" % licence_line).rstrip():
+ _Fail(lineno, "Does not match expected licence line (%s)" % licence_line)
+
+ (lineno, line) = getline_fn()
+ if line:
+ _Fail(lineno, "Must be empty")
+
+
+def Main():
+ """Main program.
+
+ """
+ fail = False
+
+ for filename in sys.argv[1:]:
+ content = utils.ReadFile(filename, size=_READ_SIZE)
+ lines = zip(itertools.count(1), content.splitlines())
+
+ try:
+ _CheckHeader(compat.partial(lines.pop, 0))
+ except HeaderError, err:
+ report = str(err)
+ print "%s: %s" % (filename, report)
+ fail = True
+
+ if fail:
+ sys.exit(constants.EXIT_FAILURE)
+ else:
+ sys.exit(constants.EXIT_SUCCESS)
+
+
+if __name__ == "__main__":
+ Main()
--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Script to check module imports.
+
+"""
+
+# pylint: disable=C0103
+# C0103: Invalid name
+
+import sys
+
+# All modules imported after this line are removed from the global list before
+# importing a module to be checked
+_STANDARD_MODULES = sys.modules.keys()
+
+import os.path
+
+from ganeti import build
+
+
+def main():
+ args = sys.argv[1:]
+
+ # Get references to functions used later on
+ load_module = build.LoadModule
+ abspath = os.path.abspath
+ commonprefix = os.path.commonprefix
+ normpath = os.path.normpath
+
+ script_path = abspath(__file__)
+ srcdir = normpath(abspath(args.pop(0)))
+
+ assert "ganeti" in sys.modules
+
+ for filename in args:
+ # Reset global state
+ for name in sys.modules.keys():
+ if name not in _STANDARD_MODULES:
+ sys.modules.pop(name, None)
+
+ assert "ganeti" not in sys.modules
+
+ # Load module (this might import other modules)
+ module = load_module(filename)
+
+ result = []
+
+ for (name, checkmod) in sorted(sys.modules.items()):
+ if checkmod is None or checkmod == module:
+ continue
+
+ try:
+ checkmodpath = getattr(checkmod, "__file__")
+ except AttributeError:
+ # Built-in module
+ pass
+ else:
+ abscheckmodpath = os.path.abspath(checkmodpath)
+
+ if abscheckmodpath == script_path:
+ # Ignore check script
+ continue
+
+ if commonprefix([abscheckmodpath, srcdir]) == srcdir:
+ result.append(name)
+
+ if result:
+ raise Exception("Module '%s' has illegal imports: %s" %
+ (filename, ", ".join(result)))
+
+
+if __name__ == "__main__":
+ main()
RELEASED_RE = re.compile(r"^\*\(Released (?P<day>[A-Z][a-z]{2}),"
r" (?P<date>.+)\)\*$")
UNRELEASED_RE = re.compile(r"^\*\(unreleased\)\*$")
+VERSION_RE = re.compile(r"^Version \d+(\.\d+)+( (beta|rc)\d+)?$")
def main():
prevline = None
expect_date = False
+ count_empty = 0
for line in fileinput.input():
line = line.rstrip("\n")
+ if VERSION_RE.match(line):
+ if count_empty != 2:
+ raise Exception("Line %s: Missing 2 empty lines before %s" %
+ (fileinput.filelineno(), line))
+
+ if UNRELEASED_RE.match(line) or RELEASED_RE.match(line):
+ if count_empty != 1:
+ raise Exception("Line %s: Missing 1 empty line before %s" %
+ (fileinput.filelineno(), line))
+
+ if line:
+ count_empty = 0
+ else:
+ count_empty += 1
+
if DASHES_RE.match(line):
- if not prevline.startswith("Version "):
- raise Exception("Line %s: Invalid title" % (fileinput.filelineno() - 1))
+ if not VERSION_RE.match(prevline):
+ raise Exception("Line %s: Invalid title" %
+ (fileinput.filelineno() - 1))
+ if len(line) != len(prevline):
+ raise Exception("Line %s: Invalid dashes length" %
+ (fileinput.filelineno()))
expect_date = True
elif expect_date:
#!/bin/bash
#
-# Copyright (C) 2009 Google Inc.
+# Copyright (C) 2009, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
set -e
+# Ensure the checks always use the same locale
+export LC_ALL=C
+
readonly maxlinelen=$(for ((i=0; i<81; ++i)); do echo -n .; done)
if [[ "${#maxlinelen}" != 81 ]]; then
let ++problems
echo "Longest line in $script is longer than 80 characters" >&2
fi
+
+ if grep -n -H -E -i \
+ '#.*\bpylint[[:space:]]*:[[:space:]]*disable-msg\b' "$script"
+ then
+ let ++problems
+ echo "Found old-style pylint disable pragma in $script" >&2
+ fi
done
if [[ "$problems" -gt 0 ]]; then
from ganeti.build import sphinx_ext
-_QUERY_FIELDS_RE = re.compile(r"^@QUERY_FIELDS_(?P<kind>[A-Z]+)@$")
+_DOC_RE = re.compile(r"^@(?P<class>[A-Z_]+)_(?P<kind>[A-Z]+)@$")
+
+_DOC_CLASSES_DATA = {
+ "CONSTANTS": (sphinx_ext.DOCUMENTED_CONSTANTS, sphinx_ext.BuildValuesDoc),
+ "QUERY_FIELDS": (query.ALL_FIELDS, sphinx_ext.BuildQueryFields),
+ }
def main():
for line in fileinput.input():
- m = _QUERY_FIELDS_RE.match(line)
+ m = _DOC_RE.match(line)
if m:
- fields = query.ALL_FIELDS[m.group("kind").lower()]
- for i in sphinx_ext.BuildQueryFields(fields):
+ fields_dict, builder = _DOC_CLASSES_DATA[m.group("class")]
+ fields = fields_dict[m.group("kind").lower()]
+ for i in builder(fields):
print i
else:
print line,
#!/bin/bash
#
-# Copyright (C) 2010 Google Inc.
+# Copyright (C) 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
$COVERAGE erase
for script; do
- $COVERAGE run --branch --append $script
+ if [[ "$script" == *-runasroot.py ]]; then
+ if [[ -z "$FAKEROOT" ]]; then
+ echo "FAKEROOT variable not set and needed for $script" >&2
+ exit 1
+ fi
+ cmdprefix="$FAKEROOT"
+ else
+ cmdprefix=
+ fi
+ $cmdprefix $COVERAGE run --branch --append $script
done
echo "Writing text report to $TEXT_COVERAGE ..." >&2
cp -r autotools daemons scripts lib tools test $tmpdir
mv $tmpdir/lib $tmpdir/ganeti
+ln -T -s $tmpdir/ganeti $tmpdir/lib
mkdir -p $tmpdir/htools
if [ -e htools/test ]; then
cp -p htools/test $tmpdir/htools/
#!/bin/bash
#
-# Copyright (C) 2010 Google Inc.
+# Copyright (C) 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
filename=$1
+execasroot() {
+ if [[ -z "$FAKEROOT" ]]; then
+ echo "FAKEROOT variable not set" >&2
+ exit 1
+ fi
+ exec "$FAKEROOT" "$@"
+}
+
case "$filename" in
+ *-runasroot.py) execasroot $PYTHON "$@" ;;
*.py) exec $PYTHON "$@" ;;
+ *-runasroot) execasroot "$@" ;;
*) exec "$@" ;;
esac
# --with-lvm-stripecount=...
AC_ARG_WITH([lvm-stripecount],
[AS_HELP_STRING([--with-lvm-stripecount=NUM],
- [the number of stripes to use for LVM volumes]
+ [the default number of stripes to use for LVM volumes]
[ (default is 1)]
)],
[lvm_stripecount="$withval"],
# --enable-drbd-barriers
AC_ARG_ENABLE([drbd-barriers],
[AS_HELP_STRING([--enable-drbd-barriers],
- [enable the DRBD barrier functionality (>= 8.0.12) (default: enabled)])],
+ [enable by default the DRBD barriers functionality (>= 8.0.12) (default: enabled)])],
[[if test "$enableval" != no; then
- DRBD_BARRIERS=True
+ DRBD_BARRIERS=n
+ DRBD_NO_META_FLUSH=False
else
- DRBD_BARRIERS=False
+ DRBD_BARRIERS=bfd
+ DRBD_NO_META_FLUSH=True
fi
]],
- [DRBD_BARRIERS=True])
+ [DRBD_BARRIERS=n
+ DRBD_NO_META_FLUSH=False
+ ])
AC_SUBST(DRBD_BARRIERS, $DRBD_BARRIERS)
+AC_SUBST(DRBD_NO_META_FLUSH, $DRBD_NO_META_FLUSH)
# --enable-syslog[=no/yes/only]
AC_ARG_ENABLE([syslog],
[],
[enable_htools_rapi=no])
+# --enable-htools
+ENABLE_CONFD=
+AC_ARG_ENABLE([confd],
+ [AS_HELP_STRING([--enable-confd],
+ [enable the ganeti-confd daemon (default: yes)])],
+ [[case "$enableval" in
+ no)
+ enable_confd=False
+ ;;
+ yes)
+ enable_confd=True
+ ;;
+ *)
+ echo "Invalid value for enable-confd '$enableval'"
+ exit 1
+ ;;
+ esac
+ ]],
+ [enable_confd=True])
+AC_SUBST(ENABLE_CONFD, $enable_confd)
+
# --with-disk-separator=...
AC_ARG_WITH([disk-separator],
[AS_HELP_STRING([--with-disk-separator=STRING],
if test "$enable_htools" != "no"; then
+# Check for qemu-img
+AC_ARG_VAR(QEMUIMG_PATH, [qemu-img path])
+AC_PATH_PROG(QEMUIMG_PATH, [qemu-img], [])
+if test -z "$QEMUIMG_PATH"
+then
+ AC_MSG_WARN([qemu-img not found, using ovfconverter will not be possible])
+fi
+
# Check for ghc
AC_ARG_VAR(GHC, [ghc path])
AC_PATH_PROG(GHC, [ghc], [])
fi # end if enable_htools, define automake conditions
AM_CONDITIONAL([WANT_HTOOLS], [test x$HTOOLS = xyes])
-AM_CONDITIONAL([WANT_HTOOLSTESTS], [test x$GHC_PKG_QUICKCHECK != x])
+AM_CONDITIONAL([WANT_HTOOLSTESTS], [test "x$GHC_PKG_QUICKCHECK" != x])
AM_CONDITIONAL([WANT_HTOOLSAPIDOC], [test x$HTOOLS_APIDOC = xyes])
+# Check for fakeroot
+AC_ARG_VAR(FAKEROOT_PATH, [fakeroot path])
+AC_PATH_PROG(FAKEROOT_PATH, [fakeroot], [])
+if test -z "$FAKEROOT_PATH"; then
+ AC_MSG_WARN(m4_normalize([fakeroot not found, tests that must run as root
+ will not be executed]))
+fi
+AM_CONDITIONAL([HAS_FAKEROOT], [test "x$FAKEROOT_PATH" != x])
+
SOCAT_USE_ESCAPE=
AC_ARG_ENABLE([socat-escape],
[AS_HELP_STRING([--enable-socat-escape],
AC_PYTHON_MODULE(pyparsing, t)
AC_PYTHON_MODULE(pyinotify, t)
AC_PYTHON_MODULE(pycurl, t)
+AC_PYTHON_MODULE(affinity)
# This is optional but then we've limited functionality
AC_PYTHON_MODULE(paramiko)
#!/bin/bash
#
-# Copyright (C) 2009 Google Inc.
+# Copyright (C) 2009, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
ganeti-noded
ganeti-masterd
ganeti-rapi
- ganeti-confd
)
+if [[ "@CUSTOM_ENABLE_CONFD@" == True ]]; then
+ DAEMONS+=( ganeti-confd )
+fi
+
NODED_ARGS=
MASTERD_ARGS=
CONFD_ARGS=
local name="$1"; shift
+ if [[ "$name" == ganeti-confd &&
+ "@CUSTOM_ENABLE_CONFD@" == False ]]; then
+ echo 'ganeti-confd disabled at build time' >&2
+ return 1
+ fi
+
# Convert daemon name to uppercase after removing "ganeti-" prefix
local plain_name=${name#ganeti-}
local ucname=$(tr a-z A-Z <<<$plain_name)
self._data.mtime = time.time()
utils.WriteFile(self._path,
- data=serializer.DumpJson(self._data.ToDict(), indent=True),
+ data=serializer.DumpJson(self._data.ToDict()),
mode=0400)
make_args=
fi
+# Make sure that directories will get correct permissions
+umask 0022
+
# install ganeti as a real tree
make $make_args install DESTDIR="$TXD"
this node's hardware resources; it runs on all nodes which are in a
cluster
- the :command:`ganeti-confd` daemon (Ganeti 2.1+) which runs on all
- nodes, but is only functional on master candidate nodes
+ nodes, but is only functional on master candidate nodes; this daemon
+ can be disabled at configuration time if you don't need its
+ functionality
- the :command:`ganeti-rapi` daemon which runs on the master node and
offers an HTTP-based API for the cluster
- the :command:`ganeti-masterd` daemon which runs on the master node and
gnt-instance startup INSTANCE_NAME
-While the command to stop one is::
+Note, that this will not work when an instance is in a permanently
+stopped state ``offline``. In this case, you will first have to
+put it back to online mode by running::
+
+ gnt-instance modify --online INSTANCE_NAME
+
+The command to stop the running instance is::
gnt-instance shutdown INSTANCE_NAME
+If you want to shut the instance down more permanently, so that it
+does not require dynamically allocated resources (memory and vcpus),
+after shutting down an instance, execute the following::
+
+ gnt-instance modify --ofline INSTANCE_NAME
+
.. warning:: Do not use the Xen or KVM commands directly to stop
instances. If you run for example ``xm shutdown`` or ``xm destroy``
on an instance Ganeti will automatically restart it (via
Since the process involves copying all data from the working node to the
target node, it will take a while, depending on the instance's disk
-size, node I/O system and network speed. But it is (baring any network
+size, node I/O system and network speed. But it is (barring any network
interruption) completely transparent for the instance.
Re-creating disks for non-redundant instances
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.append(os.path.abspath('.'))
+#sys.path.append(os.path.abspath("."))
# -- General configuration -----------------------------------------------------
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = "1.0"
+
# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.todo', "ganeti.build.sphinx_ext"]
+# coming with Sphinx (named "sphinx.ext.*") or your custom ones.
+extensions = [
+ "sphinx.ext.todo",
+ "sphinx.ext.graphviz",
+ "ganeti.build.sphinx_ext",
+ ]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
# The encoding of source files.
-source_encoding = 'utf-8'
+source_encoding = "utf-8"
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-project = u'Ganeti'
-copyright = u'2006, 2007, 2008, 2009, 2010, Google Inc.'
+project = u"Ganeti"
+copyright = u"2006, 2007, 2008, 2009, 2010, 2011, Google Inc."
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
-language = 'en'
+language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
-#today = ''
+#today = ""
# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+#today_fmt = "%B %d, %Y"
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
-exclude_trees = ['_build', 'examples', 'api']
+exclude_trees = [
+ "_build",
+ "api",
+ "coverage"
+ "examples",
+ ]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
-# If true, '()' will be appended to :func: etc. cross-reference text.
+# If true, "()" will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
-# The theme to use for HTML and HTML Help pages. Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-html_theme = 'default'
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# If not "", a "Last updated on:" timestamp is inserted at every page bottom,
# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+#html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+#html_use_opensearch = ""
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
+#html_file_suffix = ""
# Output file base name for HTML help builder.
-htmlhelp_basename = 'Ganetidoc'
+htmlhelp_basename = "Ganetidoc"
# -- Options for LaTeX output --------------------------------------------------
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
+# The paper size ("letter" or "a4").
+#latex_paper_size = "a4"
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
+# The font size ("10pt", "11pt" or "12pt").
+#latex_font_size = "10pt"
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
- ('index', 'Ganeti.tex', u'Ganeti Documentation',
- u'Google Inc.', 'manual'),
+ ("index", "Ganeti.tex", u"Ganeti Documentation",
+ u"Google Inc.", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# not chapters.
#latex_use_parts = False
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
+#latex_preamble = ""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
"hmac": "4a4139b2c3c5921f7e439469a0a45ad200aead0f"
}
-"plj0" is a fourcc that details the message content. It stands for plain
+``plj0`` is a fourcc that details the message content. It stands for plain
json 0, and can be changed as we move on to different type of protocols
(for example protocol buffers, or encrypted json). What follows is a
json encoded string, with the following fields:
-- 'msg' contains a JSON-encoded query, its fields are:
+- ``msg`` contains a JSON-encoded query, its fields are:
- - 'protocol', integer, is the confd protocol version (initially just
- constants.CONFD_PROTOCOL_VERSION, with a value of 1)
- - 'type', integer, is the query type. For example "node role by name"
- or "node primary ip by instance ip". Constants will be provided for
- the actual available query types.
- - 'query', string, is the search key. For example an ip, or a node
- name.
- - 'rsalt', string, is the required response salt. The client must use
- it to recognize which answer it's getting.
+ - ``protocol``, integer, is the confd protocol version (initially
+ just ``constants.CONFD_PROTOCOL_VERSION``, with a value of 1)
+ - ``type``, integer, is the query type. For example "node role by
+ name" or "node primary ip by instance ip". Constants will be
+ provided for the actual available query types
+ - ``query`` is a multi-type field (depending on the ``type`` field):
-- 'salt' must be the current unix timestamp, according to the client.
- Servers can refuse messages which have a wrong timing, according to
- their configuration and clock.
-- 'hmac' is an hmac signature of salt+msg, with the cluster hmac key
+ - it can be missing, when the request is fully determined by the
+ ``type`` field
+ - it can contain a string which denotes the search key: for
+ example an IP, or a node name
+ - it can contain a dictionary, in which case the actual details
+ vary further per request type
+
+ - ``rsalt``, string, is the required response salt; the client must
+ use it to recognize which answer it's getting.
+
+- ``salt`` must be the current unix timestamp, according to the
+ client; servers should refuse messages which have a wrong timing,
+ according to their configuration and clock
+- ``hmac`` is an hmac signature of salt+msg, with the cluster hmac key
If an answer comes back (which is optional, since confd works over UDP)
it will be in this format::
Where:
-- 'plj0' the message type magic fourcc, as discussed above
-- 'msg' contains a JSON-encoded answer, its fields are:
-
- - 'protocol', integer, is the confd protocol version (initially just
- constants.CONFD_PROTOCOL_VERSION, with a value of 1)
- - 'status', integer, is the error code. Initially just 0 for 'ok' or
- '1' for 'error' (in which case answer contains an error detail,
- rather than an answer), but in the future it may be expanded to have
- more meanings (eg: 2, the answer is compressed)
- - 'answer', is the actual answer. Its type and meaning is query
- specific. For example for "node primary ip by instance ip" queries
+- ``plj0`` the message type magic fourcc, as discussed above
+- ``msg`` contains a JSON-encoded answer, its fields are:
+
+ - ``protocol``, integer, is the confd protocol version (initially
+ just constants.CONFD_PROTOCOL_VERSION, with a value of 1)
+ - ``status``, integer, is the error code; initially just ``0`` for
+ 'ok' or ``1`` for 'error' (in which case answer contains an error
+ detail, rather than an answer), but in the future it may be
+ expanded to have more meanings (e.g. ``2`` if the answer is
+ compressed)
+ - ``answer``, is the actual answer; its type and meaning is query
+ specific: for example for "node primary ip by instance ip" queries
it will be a string containing an IP address, for "node role by
- name" queries it will be an integer which encodes the role (master,
- candidate, drained, offline) according to constants.
+ name" queries it will be an integer which encodes the role
+ (master, candidate, drained, offline) according to constants
-- 'salt' is the requested salt from the query. A client can use it to
- recognize what query the answer is answering.
-- 'hmac' is an hmac signature of salt+msg, with the cluster hmac key
+- ``salt`` is the requested salt from the query; a client can use it
+ to recognize what query the answer is answering.
+- ``hmac`` is an hmac signature of salt+msg, with the cluster hmac key
Redistribute Config
design-cpu-pinning.rst
design-ovf-support.rst
design-network.rst
+ design-node-state-cache.rst
+ design-resource-model.rst
+ design-virtual-clusters.rst
.. vim: set textwidth=72 :
.. Local Variables:
--- /dev/null
+================
+Node State Cache
+================
+
+.. contents:: :depth: 4
+
+This is a design doc about the optimization of machine info retrieval.
+
+
+Current State
+=============
+
+Currently every RPC call is quite expensive as a TCP handshake has to be
+made as well as SSL negotiation. This especially is visible when getting
+node and instance info over and over again.
+
+This data, however, is quite easy to cache but needs some changes to how
+we retrieve data in the RPC as this is spread over several RPC calls
+and are hard to unify.
+
+
+Proposed changes
+================
+
+To overcome this situation with multiple information retrieval calls we
+introduce one single RPC call to get all the info in a organized manner,
+for easy store in the cache.
+
+As of now we have 3 different information RPC calls:
+
+- ``call_node_info``: To retrieve disk and hyper-visor information
+- ``call_instance_info``: To retrieve hyper-visor information for one
+ instance
+- ``call_all_instance_info``: To retrieve hyper-visor information for
+ all instances
+
+Not to mention that ``call_all_instance_info`` and
+``call_instance_info`` return different information in the dict.
+
+To unify the data and organize them we introduce a new RPC call
+``call_node_snapshot`` doing all of the above in one go. Which
+data we want to know will be specified about a dict of request
+types: CACHE_REQ_HV, CACHE_REQ_DISKINFO, CACHE_REQ_BOOTID
+
+As this cache is representing the state of a given node we use the
+name of a node as the key to retrieve the data from the cache. A
+name-space separation of node and instance data is not possible at the
+current point. This is due to the fact that some of the node hyper-visor
+information like free memory is correlating with instances running.
+
+An example of how the data for a node in the cache looks like::
+
+ {
+ constants.CACHE_REQ_HV: {
+ constants.HT_XEN_PVM: {
+ _NODE_DATA: {
+ "memory_total": 32763,
+ "memory_free": 9159,
+ "memory_dom0": 1024,
+ "cpu_total": 4,
+ "cpu_sockets": 2
+ },
+ _INSTANCES_DATA: {
+ "inst1": {
+ "memory": 4096,
+ "state": "-b----",
+ "time": 102399.3,
+ "vcpus": 1
+ },
+ "inst2": {
+ "memory": 4096,
+ "state": "-b----",
+ "time": 12280.0,
+ "vcpus": 3
+ }
+ }
+ }
+ },
+ constants.CACHE_REQ_DISKINFO: {
+ "xenvg": {
+ "vg_size": 1048576,
+ "vg_free": 491520
+ },
+ }
+ constants.CACHE_REQ_BOOTID: "0dd0983c-913d-4ce6-ad94-0eceb77b69f9"
+ }
+
+This way we get easy to organize information which can simply be arranged in
+the cache.
+
+The 3 RPC calls mentioned above will remain for compatibility reason but
+will be simple wrappers around this RPC call.
+
+
+Cache invalidation
+------------------
+
+The cache is invalidated at every RPC call which is not proven to not
+modify the state of a given node. This is to avoid inconsistency between
+cache and actual node state.
+
+There are some corner cases which invalidates the whole cache at once as
+they usually affect other nodes states too:
+
+ - migrate/failover
+ - import/export
+
+A request will be served from the cache if and only if it can be
+fulfilled entirely from it (i.e. all the CACHE_REQ_* entries are already
+present). Otherwise, we will invalidate the cache and actually do the
+remote call.
+
+In addition, every cache entry will have a TTL of about 10 minutes which
+should be enough to accommodate most use cases.
+
+We also allow an option to the calls to bypass the cache completely and
+do a force remote call. However, this will invalidate the present
+entries and populate the cache with the new retrieved values.
+
+
+Additional cache population
+---------------------------
+
+Besides of the commands which calls above RPC calls, a full cache
+population can also be done by a separate new op-code run by
+``ganeti-watcher`` periodically. This op-code will be used instead of
+the old ones.
+
+
+Possible regressions
+====================
+
+As we change from getting "one hyper-visor information" to "get all we
+know about this hyper-visor"-style we have a regression in time of
+execution. The execution time is about 1.8x more in process execution
+time. However, this does not include the latency and negotiation time
+needed for each separate RPC call. Also if we hit the cache all 3 costs
+will be 0. The only time taken is to look up the info in the cache and
+the deserialization of the data. Which takes down the time from today
+~300ms to ~100ms.
+
+.. vim: set textwidth=72 :
+.. Local Variables:
+.. mode: rst
+.. fill-column: 72
+.. End:
Objective
---------
-Extend Ganeti with Out of Band Cluster Node Management Capabilities.
+Extend Ganeti with Out of Band (:term:`OOB`) Cluster Node Management
+Capabilities.
Background
----------
-Ganeti currently has no support for Out of Band management of the nodes in a
-cluster. It relies on the OS running on the nodes and has therefore limited
-possibilities when the OS is not responding. The command ``gnt-node powercycle``
-can be issued to attempt a reboot of a node that crashed but there are no means
-to power a node off and power it back on. Supporting this is very handy in the
-following situations:
-
- * **Emergency Power Off**: During emergencies, time is critical and manual
- tasks just add latency which can be avoided through automation. If a server
- room overheats, halting the OS on the nodes is not enough. The nodes need
- to be powered off cleanly to prevent damage to equipment.
- * **Repairs**: In most cases, repairing a node means that the node has to be
- powered off.
- * **Crashes**: Software bugs may crash a node. Having an OS independent way to
- power-cycle a node helps to recover the node without human intervention.
+Ganeti currently has no support for Out of Band management of the nodes
+in a cluster. It relies on the OS running on the nodes and has therefore
+limited possibilities when the OS is not responding. The command
+``gnt-node powercycle`` can be issued to attempt a reboot of a node that
+crashed but there are no means to power a node off and power it back
+on. Supporting this is very handy in the following situations:
+
+ * **Emergency Power Off**: During emergencies, time is critical and
+ manual tasks just add latency which can be avoided through
+ automation. If a server room overheats, halting the OS on the nodes
+ is not enough. The nodes need to be powered off cleanly to prevent
+ damage to equipment.
+ * **Repairs**: In most cases, repairing a node means that the node has
+ to be powered off.
+ * **Crashes**: Software bugs may crash a node. Having an OS
+ independent way to power-cycle a node helps to recover the node
+ without human intervention.
Overview
--------
-Ganeti will be extended with OOB capabilities through adding a new **Cluster
-Parameter** (``--oob-program``), a new **Node Property** (``--oob-program``), a
-new **Node State (powered)** and support in ``gnt-node`` for invoking an
-**External Helper Command** which executes the actual OOB command (``gnt-node
-<command> nodename ...``). The supported commands are: ``power on``,
-``power off``, ``power cycle``, ``power status`` and ``health``.
+Ganeti will be extended with OOB capabilities through adding a new
+**Cluster Parameter** (``--oob-program``), a new **Node Property**
+(``--oob-program``), a new **Node State (powered)** and support in
+``gnt-node`` for invoking an **External Helper Command** which executes
+the actual OOB command (``gnt-node <command> nodename ...``). The
+supported commands are: ``power on``, ``power off``, ``power cycle``,
+``power status`` and ``health``.
.. note::
- The new **Node State (powered)** is a **State of Record
- (SoR)**, not a **State of World (SoW)**. The maximum execution time of the
- **External Helper Command** will be limited to 60s to prevent the cluster from
- getting locked for an undefined amount of time.
+ The new **Node State (powered)** is a **State of Record**
+ (:term:`SoR`), not a **State of World** (:term:`SoW`). The maximum
+ execution time of the **External Helper Command** will be limited to
+ 60s to prevent the cluster from getting locked for an undefined amount
+ of time.
Detailed Design
---------------
| ``--groups``: To operate on groups instead of nodes
| ``--all``: To operate on the whole cluster
-This is a convenience command to allow easy emergency power off of a whole
-cluster or part of it. It takes care of all steps needed to get the cluster into
-a sane state to turn off the nodes.
+This is a convenience command to allow easy emergency power off of a
+whole cluster or part of it. It takes care of all steps needed to get
+the cluster into a sane state to turn off the nodes.
-With ``--on`` it does the reverse and tries to bring the rest of the cluster back
-to life.
+With ``--on`` it does the reverse and tries to bring the rest of the
+cluster back to life.
.. note::
- The master node is not able to shut itself cleanly down. Therefore, this
- command will not do all the work on single node clusters. On multi node
- clusters the command tries to find another master or if that is not possible
- prepares everything to the point where the user has to shutdown the master
- node itself alone this applies also to the single node cluster configuration.
+ The master node is not able to shut itself cleanly down. Therefore,
+ this command will not do all the work on single node clusters. On
+ multi node clusters the command tries to find another master or if
+ that is not possible prepares everything to the point where the user
+ has to shutdown the master node itself alone this applies also to the
+ single node cluster configuration.
New ``gnt-node`` Property
+++++++++++++++++++++++++
| Options: ``--oob-program``: executable OOB program (absolute path)
.. note::
- If ``--oob-program`` is set to ``!`` then the node has no OOB capabilities.
- Otherwise, we will inherit the node group respectively the cluster wide
- value. I.e. the nodes have to opt out from OOB capabilities.
+ If ``--oob-program`` is set to ``!`` then the node has no OOB
+ capabilities. Otherwise, we will inherit the node group respectively
+ the cluster wide value. I.e. the nodes have to opt out from OOB
+ capabilities.
Addition to ``gnt-cluster verify``
++++++++++++++++++++++++++++++++++
| Option: None
| Additional Checks:
- 1. existence and execution flag of OOB program on all Master Candidates if
- the cluster parameter ``--oob-program`` is set or at least one node has
- the property ``--oob-program`` set. The OOB helper is just invoked on the
- master
- 2. check if node state powered matches actual power state of the machine for
- those nodes where ``--oob-program`` is set
+ 1. existence and execution flag of OOB program on all Master
+ Candidates if the cluster parameter ``--oob-program`` is set or at
+ least one node has the property ``--oob-program`` set. The OOB
+ helper is just invoked on the master
+ 2. check if node state powered matches actual power state of the
+ machine for those nodes where ``--oob-program`` is set
New Node State
++++++++++++++
Ganeti supports the following two boolean states related to the nodes:
**drained**
- The cluster still communicates with drained nodes but excludes them from
- allocation operations
+ The cluster still communicates with drained nodes but excludes them
+ from allocation operations
**offline**
- if offline, the cluster does not communicate with offline nodes; useful for
- nodes that are not reachable in order to avoid delays
+ if offline, the cluster does not communicate with offline nodes;
+ useful for nodes that are not reachable in order to avoid delays
And will extend this list with the following boolean state:
**powered**
- if not powered, the cluster does not communicate with not powered nodes if
- the node property ``--oob-program`` is not set, the state powered is not
- displayed
+ if not powered, the cluster does not communicate with not powered
+ nodes if the node property ``--oob-program`` is not set, the state
+ powered is not displayed
Additionally modify the meaning of the offline state as follows:
**offline**
- if offline, the cluster does not communicate with offline nodes (**with the
- exception of OOB commands for nodes where** ``--oob-program`` **is set**);
- useful for nodes that are not reachable in order to avoid delays
+ if offline, the cluster does not communicate with offline nodes
+ (**with the exception of OOB commands for nodes where**
+ ``--oob-program`` **is set**); useful for nodes that are not reachable
+ in order to avoid delays
The corresponding command extensions are:
| Parameter: [ ``nodename`` ... ]
| Option: None
-Additional Output (SoR, ommited if node property ``--oob-program`` is not set):
+Additional Output (:term:`SoR`, ommited if node property
+``--oob-program`` is not set):
powered: ``[True|False]``
| Program: ``gnt-node``
| Command: ``modify``
| Parameter: nodename
| Option: [ ``--powered=yes|no`` ]
-| Reasoning: sometimes you will need to sync the SoR with the SoW manually
+| Reasoning: sometimes you will need to sync the :term:`SoR` with the :term:`SoW` manually
| Caveat: ``--powered`` can only be modified if ``--oob-program`` is set for
| the node in question
| Options: None
| Caveats:
- * If no nodenames are passed to ``power [on|off|cycle]``, the user will be
- prompted with ``"Do you really want to power [on|off|cycle] the following
- nodes: <display list of OOB capable nodes in the cluster)? (y/n)"``
+ * If no nodenames are passed to ``power [on|off|cycle]``, the user
+ will be prompted with ``"Do you really want to power [on|off|cycle]
+ the following nodes: <display list of OOB capable nodes in the
+ cluster)? (y/n)"``
* For ``power-status``, nodename is optional, if omitted, we list the
- power-status of all OOB capable nodes in the cluster (SoW)
+ power-status of all OOB capable nodes in the cluster (:term:`SoW`)
* User should be warned and needs to confirm with yes if s/he tries to
``power [off|cycle]`` a node with running instances.
Error Handling
^^^^^^^^^^^^^^
-+------------------------------+-----------------------------------------------+
-| Exception | Error Message |
-+==============================+===============================================+
-| OOB program return code != 0 | OOB program execution failed ($ERROR_MSG) |
-+------------------------------+-----------------------------------------------+
-| OOB program execution time | OOB program execution timeout exceeded, OOB |
-| exceeds 60s | program execution aborted |
-+------------------------------+-----------------------------------------------+
++-----------------------------+----------------------------------------------+
+| Exception | Error Message |
++=============================+==============================================+
+| OOB program return code != 0| OOB program execution failed ($ERROR_MSG) |
++-----------------------------+----------------------------------------------+
+| OOB program execution time | OOB program execution timeout exceeded, OOB |
+| exceeds 60s | program execution aborted |
++-----------------------------+----------------------------------------------+
Node State Changes
^^^^^^^^^^^^^^^^^^
-+----------------+-----------------+----------------+--------------------------+
-| State before | Command | State after | Comment |
-| execution | | execution | |
-+================+=================+================+==========================+
-| powered: False | ``power off`` | powered: False | FYI: IPMI will complain |
-| | | | if you try to power off |
-| | | | a machine that is already|
-| | | | powered off |
-+----------------+-----------------+----------------+--------------------------+
-| powered: False | ``power cycle`` | powered: False | FYI: IPMI will complain |
-| | | | if you try to cycle a |
-| | | | machine that is already |
-| | | | powered off |
-+----------------+-----------------+----------------+--------------------------+
-| powered: False | ``power on`` | powered: True | |
-+----------------+-----------------+----------------+--------------------------+
-| powered: True | ``power off`` | powered: False | |
-+----------------+-----------------+----------------+--------------------------+
-| powered: True | ``power cycle`` | powered: True | |
-+----------------+-----------------+----------------+--------------------------+
-| powered: True | ``power on`` | powered: True | FYI: IPMI will complain |
-| | | | if you try to power on |
-| | | | a machine that is already|
-| | | | powered on |
-+----------------+-----------------+----------------+--------------------------+
++----------------+---------------+----------------+--------------------------+
+| State before |Command | State after | Comment |
+| execution | | execution | |
++================+===============+================+==========================+
+| powered: False |``power off`` | powered: False | FYI: IPMI will complain |
+| | | | if you try to power off |
+| | | | a machine that is already|
+| | | | powered off |
++----------------+---------------+----------------+--------------------------+
+| powered: False |``power cycle``| powered: False | FYI: IPMI will complain |
+| | | | if you try to cycle a |
+| | | | machine that is already |
+| | | | powered off |
++----------------+---------------+----------------+--------------------------+
+| powered: False |``power on`` | powered: True | |
++----------------+---------------+----------------+--------------------------+
+| powered: True |``power off`` | powered: False | |
++----------------+---------------+----------------+--------------------------+
+| powered: True |``power cycle``| powered: True | |
++----------------+---------------+----------------+--------------------------+
+| powered: True |``power on`` | powered: True | FYI: IPMI will complain |
+| | | | if you try to power on |
+| | | | a machine that is already|
+| | | | powered on |
++----------------+---------------+----------------+--------------------------+
.. note::
* If the command fails, the Node State remains unchanged.
* We will not prevent the user from trying to power off a node that is
- already powered off since the powered state represents the **SoR** only and
- not the **SoW**. This can however create problems when the cluster
- administrator wants to bring the **SoR** in sync with the **SoW** without
- actually having to mess with the node(s). For this case, we allow direct
- modification of the powered state through the gnt-node modify
- ``--powered=[yes|no]`` command as long as the node has OOB capabilities
- (i.e. ``--oob-program`` is set).
+ already powered off since the powered state represents the
+ :term:`SoR` only and not the :term:`SoW`. This can however create
+ problems when the cluster administrator wants to bring the
+ :term:`SoR` in sync with the :term:SoW` without actually having to
+ mess with the node(s). For this case, we allow direct modification
+ of the powered state through the gnt-node modify
+ ``--powered=[yes|no]`` command as long as the node has OOB
+ capabilities (i.e. ``--oob-program`` is set).
* All node power state changes will be logged
-Node Power Status Listing (SoW)
-+++++++++++++++++++++++++++++++
+Node Power Status Listing (:term:`SoW`)
++++++++++++++++++++++++++++++++++++++++
| Program: ``gnt-node``
| Command: ``power-status``
| Parameters: [ ``nodename`` ... ]
-Example output (represents **SoW**)::
+Example output (represents :term:`SoW`)::
gnt-node oob power-status
Node Power Status
.. note::
- * We use ``unknown`` in case the Helper Program could not determine the power
- state.
- * If no nodenames are provided, we will list the power state of all nodes
- which are not opted out from OOB management.
- * Only nodes which are not opted out from OOB management will be listed.
- Invoking the command on a node that does not meet this condition will
- result in an error message "Node X does not support OOB commands".
+ * We use ``unknown`` in case the Helper Program could not determine
+ the power state.
+ * If no nodenames are provided, we will list the power state of all
+ nodes which are not opted out from OOB management.
+ * Only nodes which are not opted out from OOB management will be
+ listed. Invoking the command on a node that does not meet this
+ condition will result in an error message "Node X does not support
+ OOB commands".
-Node Power Status Listing (SoR)
-+++++++++++++++++++++++++++++++
+Node Power Status Listing (:term:`SoR`)
++++++++++++++++++++++++++++++++++++++++
| Program: ``gnt-node``
| Command: ``info``
| Parameter: [ ``nodename`` ... ]
| Option: None
-Example output (represents **SoR**)::
+Example output (represents :term:`SoR`)::
gnt-node info node1.example.com
Node name: node1.example.com
- inst7.example.com
.. note::
- Only nodes which are not opted out from OOB management will
- report the powered state.
+ Only nodes which are not opted out from OOB management will report the
+ powered state.
New ``gnt-node`` oob subcommand: ``health``
+++++++++++++++++++++++++++++++++++++++++++
Caveats:
- * If no nodename(s) are provided, we will report the health of all nodes in
- the cluster which have ``--oob-program`` set.
- * Only nodes which are not opted out from OOB management will report their
- health. Invoking the command on a node that does not meet this condition
- will result in an error message "Node does not support OOB commands".
+ * If no nodename(s) are provided, we will report the health of all
+ nodes in the cluster which have ``--oob-program`` set.
+ * Only nodes which are not opted out from OOB management will report
+ their health. Invoking the command on a node that does not meet this
+ condition will result in an error message "Node does not support OOB
+ commands".
For error handling see `Error Handling`_
Return Codes
^^^^^^^^^^^^
-+---------------+--------------------------+
-| Return code | Meaning |
-+===============+==========================+
-| 0 | Command succeeded |
-+---------------+--------------------------+
-| 1 | Command failed |
-+---------------+--------------------------+
-| others | Unsupported/undefined |
-+---------------+--------------------------+
-
-Error messages are passed from the helper program to Ganeti through StdErr
-(return code == 1). On StdOut, the helper program will send data back to
-Ganeti (return code == 0). The format of the data is JSON.
-
-+------------------+-------------------------------+
-| Command | Expected output |
-+==================+===============================+
-| ``power-on`` | None |
-+------------------+-------------------------------+
-| ``power-off`` | None |
-+------------------+-------------------------------+
-| ``power-cycle`` | None |
-+------------------+-------------------------------+
-| ``power-status`` | ``{ "powered": true|false }`` |
-+------------------+-------------------------------+
-| ``health`` | :: |
-| | |
-| | [[item, status], |
-| | [item, status], |
-| | ...] |
-+------------------+-------------------------------+
++-------------+-------------------------+
+| Return code | Meaning |
++=============+=========================+
+| 0 | Command succeeded |
++-------------+-------------------------+
+| 1 | Command failed |
++-------------+-------------------------+
+| others | Unsupported/undefined |
++-------------+-------------------------+
+
+Error messages are passed from the helper program to Ganeti through
+:manpage:`stderr(3)` (return code == 1). On :manpage:`stdout(3)`, the
+helper program will send data back to Ganeti (return code == 0). The
+format of the data is JSON.
+
++-----------------+------------------------------+
+| Command | Expected output |
++=================+==============================+
+| ``power-on`` | None |
++-----------------+------------------------------+
+| ``power-off`` | None |
++-----------------+------------------------------+
+| ``power-cycle`` | None |
++-----------------+------------------------------+
+| ``power-status``| ``{ "powered": true|false }``|
++-----------------+------------------------------+
+| ``health`` | :: |
+| | |
+| | [[item, status], |
+| | [item, status], |
+| | ...] |
++-----------------+------------------------------+
Data Format
^^^^^^^^^^^
For the health output, the fields are:
-+--------+--------------------------------------------------------------------+
-| Field | Meaning |
-+========+====================================================================+
-| item | String identifier of the item we are querying the health of, |
-| | examples: |
-| | |
-| | * Ambient Temp |
-| | * PS Redundancy |
-| | * FAN 1 RPM |
-+--------+--------------------------------------------------------------------+
-| status | String; Can take one of the following four values: |
-| | |
-| | * OK |
-| | * WARNING |
-| | * CRITICAL |
-| | * UNKNOWN |
-+--------+--------------------------------------------------------------------+
++--------+------------------------------------------------------------------+
+| Field | Meaning |
++========+==================================================================+
+| item | String identifier of the item we are querying the health of, |
+| | examples: |
+| | |
+| | * Ambient Temp |
+| | * PS Redundancy |
+| | * FAN 1 RPM |
++--------+------------------------------------------------------------------+
+| status | String; Can take one of the following four values: |
+| | |
+| | * OK |
+| | * WARNING |
+| | * CRITICAL |
+| | * UNKNOWN |
++--------+------------------------------------------------------------------+
.. note::
- * The item output list is defined by the Helper Program. It is up to the
- author of the Helper Program to decide which items should be monitored and
- what each corresponding return status is.
- * Ganeti will currently not take any actions based on the item status. It
- will however create log entries for items with status WARNING or CRITICAL
- for each run of the ``gnt-node oob health nodename`` command. Automatic
- actions (regular monitoring of the item status) is considered a new service
- and will be treated in a separate design document.
+ * The item output list is defined by the Helper Program. It is up to
+ the author of the Helper Program to decide which items should be
+ monitored and what each corresponding return status is.
+ * Ganeti will currently not take any actions based on the item
+ status. It will however create log entries for items with status
+ WARNING or CRITICAL for each run of the ``gnt-node oob health
+ nodename`` command. Automatic actions (regular monitoring of the
+ item status) is considered a new service and will be treated in a
+ separate design document.
Logging
-------
-The ``gnt-node power-[on|off]`` (power state changes) commands will create log
-entries following current Ganeti logging practices. In addition, health items
-with status WARNING or CRITICAL will be logged for each run of ``gnt-node
-health``.
+The ``gnt-node power-[on|off]`` (power state changes) commands will
+create log entries following current Ganeti logging practices. In
+addition, health items with status WARNING or CRITICAL will be logged
+for each run of ``gnt-node health``.
.. vim: set textwidth=72 :
.. Local Variables:
distribution (e.g. by allowing usage of public key infrastructure and
providing tools for management of basic software licensing).
-There are no limitations regarding hard drive images used, as long as
-the description is provided. Any hardware described in a proper
-i.e. CIM - Common Information Model) format is accepted, although
-there is no guarantee that every virtualization software will support
-all types of hardware.
+There are no limitations regarding disk images used, as long as the
+description is provided. Any hardware described in a proper format
+(i.e. CIM - Common Information Model) is accepted, although there is no
+guarantee that every virtualization software will support all types of
+hardware.
-OVF package should contain one file with ``.ovf`` extension, which is an
-XML file specifying the following (per virtual machine):
+OVF package should contain exactly one file with ``.ovf`` extension,
+which is an XML file specifying the following (per virtual machine):
- virtual disks
- network description
Additionally, the package may have some disk image files and other
additional resources (e.g. ISO images).
+In order to provide secure means of distribution for OVF packages, the
+manifest and certificate are provided. Manifest (``.mf`` file) contains
+checksums for all the files in OVF package, whereas certificate
+(``.cert`` file) contains X.509 certificate and a checksum of manifest
+file. Both files are not compulsory, but certificate requires manifest
+to be present.
+
Supported disk formats
----------------------
Although OVF is claimed to support 'any disk format', what we are
-interested in is which of the formats are supported by VM managers
-that currently use OVF.
+interested in is which formats are supported by VM managers that
+currently use OVF.
- VMWare: ``.vmdk`` (which comes in at least 3 different flavours:
``sparse``, ``compressed`` and ``streamOptimized``)
- Red Hat Enterprise Virtualization: ``.raw`` (raw disk format),
``.cow`` (qemu's ``QCOW2``)
- other: AbiCloud, OpenNode Cloud, SUSE Studio, Morfeo Claudia,
- OpenStack
+ OpenStack: mostly ``.vmdk``
-In our implementation of the OVF we plan to allow a choice between
-raw, cow and vmdk disk formats for both import and export. The
-justification is the following:
+In our implementation of the OVF we allow a choice between raw, cow and
+vmdk disk formats for both import and export. Other formats covertable
+using ``qemu-img`` are allowed in import mode, but not tested.
+The justification is the following:
- Raw format is supported as it is the main format of disk images used
in Ganeti, thus it is effortless to provide support for this format
-- Cow is used in Qemu, [TODO: ..why do we support it, again? That is,
- if we do?]
+- Cow is used in Qemu
- Vmdk is most commonly supported in virtualization software, it also
has the advantage of producing relatively small disk images, which
is extremely important advantage when moving instances.
-The conversion between RAW and the other formats will be done using
-qemu-img, which transforms, among other, raw disk images to monolithic
-sparse vmdk images.
-
Import and export - the closer look
===================================
<gnt:VersionId/>
<gnt:AutoBalance/>
<gnt:Tags></gnt:Tags>
- <gnt:OSParameters></gnt:OSParameters>
+ <gnt:DiskTemplate</gnt:DiskTemplate>
+ <gnt:OperatingSystem>
+ <gnt:Name/>
+ <gnt:Parameters></gnt:Parameters>
+ </gnt:OperatingSystem>
<gnt:Hypervisor>
- <gnt:HypervisorParameters>
- </gnt:HypervisorParameters>
+ <gnt:Name/>
+ <gnt:Parameters></gnt:Parameters>
</gnt:Hypervisor>
+ <gnt:Network>
+ <gnt:Mode/>
+ <gnt:MACAddress/>
+ <gnt:Link/>
+ <gnt:IPAddress/>
+ </gnt:Network>
</gnt:GanetiSection>
</Envelope>
where will the data be in OVF format::
[instance]
- disk0_dump = filename => References
- disk0_ivname = name => ignored
- disk0_size = size_in_mb => DiskSection
- disk_count = number => ignored
- disk_template = disk_type => References
- hypervisor = hyp-name => gnt:HypervisorSection
+ disk0_dump = filename => File in References
+ disk0_ivname = name => generated automatically
+ disk0_size = size_in_mb => calculated after disk conversion
+ disk_count = number => generated automatically
+ disk_template = disk_type => gnt:DiskTemplate
+ hypervisor = hyp-name => gnt:Name in gnt:Hypervisor
name = inst-name => Name in VirtualSystem
- nic0_ip = ip => Item in VirtualHardwareSection
- nic0_link = link => Item in VirtualHardwareSection
- nic0_mac = mac => Item in VirtualHardwareSection
- nic0_mode = mode => Network in NetworkSection
- nic_count = number => ignored
+ nic0_ip = ip => gnt:IPAddress in gnt:Network
+ nic0_link = link => gnt:Link in gnt:Network
+ nic0_mac = mac => gnt:MACAddress in gnt:Network or
+ Item in VirtualHardwareSection
+ nic0_mode = mode => gnt:Mode in gnt:Network
+ nic_count = number => generated automatically
tags => gnt:Tags
[backend]
vcpus = number => Item in VirtualHardwareSection
[export]
- compression => DiskSection
- os => OperatingSystemSection
+ compression => ignored
+ os => gnt:Name in gnt:OperatingSystem
source => ignored
timestamp => ignored
- version => gnt:VersionId
+ version => gnt:VersionId or
+ constants.EXPORT_VERSION
- [os] => gnt:OSParameters
+ [os] => gnt:Parameters in gnt:OperatingSystem
- [hypervisor] => gnt:HypervisorParameters
+ [hypervisor] => gnt:Parameters in gnt:Hypervisor
In case of multiple networks/disks used by an instance, they will
all be saved in appropriate sections as specified above for the first
import may be missing. Most often it will happen that such OVF package
will lack the ``gnt:GanetiSection``.
-If this happens, the tool will simply ask for all the necessary
-information or otherwise you can specify all the missing parameters in
-the command line. For the latter, please refer to [TODO: reference to
-command line options]
+If this happens you can specify all the missing parameters in
+the command line. Please refer to `Command Line`_ section.
+
+In the :doc:`ovfconverter` we provide examples of
+options when converting from VirtualBox, VMWare and OpenSuseStudio.
Export to other virtualization software
---------------------------------------
cause to skip the non-standard information.
2. Manually remove the gnt:GanetiSection from the ``.ovf`` file. You
-will also have to recompute sha1 sum (``sha1sum`` command) and update
-your ``.mf`` file with new value.
+will also have to recompute sha1 sum (``sha1sum`` command) of the .ovf
+file and update your ``.mf`` file with new value.
.. note::
Manual change option is only recommended when you have exported your
The limitations regarding import of the OVF instances generated
outside Ganeti will be (in general) the same, as limitations for
Ganeti itself. The desired behavior in case of encountering
-unsupported element will be to ignore this element's tag and inform
-the user on console output, if possible - without interruption of the
-import process.
+unsupported element will be to ignore this element's tag without
+interruption of the import process.
Package
-------
Disks
-----
-As mentioned, Ganeti will allow exporting only ``raw``, ``cow`` and
-``vmdk`` formats. As for import, we will support all that
-``qemu-img`` can convert to raw format. At this point this means
-``raw``, ``cow``, ``qcow``, ``qcow2``, ``vmdk`` and ``cloop``. We do
-not plan for now to support ``vdi`` or ``vhd``.
+As mentioned, Ganeti will allow export in ``raw``, ``cow`` and ``vmdk``
+formats. This means i.e. that the appropriate ``ovf:format``
+will be provided.
+As for import, we will support all formats that ``qemu-img`` can convert
+to ``raw``. At this point this means ``raw``, ``cow``, ``qcow``,
+``qcow2``, ``vmdk`` and ``cloop``. We do not plan for now to support
+``vdi`` or ``vhd`` unless they become part of qemu-img supported formats.
-We plan to support compression both for import and export - in tar.gz
+We plan to support compression both for import and export - in gzip
format. There is also a possibility to provide virtual disk in chunks
-of equal size.
+of equal size. The latter will not be implemented in the first version,
+but we do plan to support it eventually.
+
-When no ``ovf:format`` tag is provided during import, we assume that
-the disk is to be created on import and proceed accordingly.
+The ``ovf:format`` tag is not used in our case when importing. Instead
+we use ``qemu-img info``, which provides enough information for our
+purposes and is better standardized.
+
+Please note, that due to security reasons we require the disk image to
+be in the same directory as the ``.ovf`` description file for both
+import and export.
+
+In order to completely ignore disk-related information in resulting
+config file, please use ``--disk-template=diskless`` option.
Network
-------
-There are no known limitations regarding network support.
+Ganeti provides support for routed and bridged mode for the networks.
+Since the standard OVF format does not contain any information regarding
+used network type, we add our own source of such information in
+``gnt:GanetiSection``. In case this additional information is not
+present, we perform a simple check - if network name specified in
+``NetworkSection`` contains words ``bridged`` or ``routed``, we consider
+this to be the network type. Otherwise option ``auto`` is chosen, in
+which case the cluster's default value for that field will be used when
+importing.
+This provides a safe fallback in case of NAT networks usage, which are
+commonly used e.g. in VirtualBox.
Hardware
--------
-TODO
+The supported hardware is limited to virtual CPUs, RAM memory, disks and
+networks. In particular, no USB support is currently provided, as Ganeti
+does not support them.
Operating Systems
-----------------
-TODO
+Support for different operating systems depends solely on their
+accessibility for Ganeti instances. List of installed OSes can be
+checked using ``gnt-os list`` command.
+
+References
+----------
+
+Files listed in ``ovf:References`` section cannot be hyperlinks.
Other
-----
+The instance name (``gnt:VirtualSystem\gnt:Name`` or command line's
+``--name`` option ) has to be resolvable in order for successful import
+using ``gnt-backup import``.
+
+
+_`Command Line`
+===============
+
+The basic usage of the ovf tool is one of the following::
+
+ ovfconverter import filename
+ ovfconverter export --format=<format> filename
+
+This will result in a conversion based solely on the content of provided
+file. In case some information required to make the conversion is
+missing, an error will occur.
+
+If output directory should be different than the standard Ganeti export
+directory (usually ``/srv/ganeti/export``), option ``--output-dir``
+can be used.
+
+If name of resulting entity should be different than the one read from
+the file, use ``--name`` option.
+
+Import options
+--------------
+
+Import options that ``ovfconverter`` supports include options for
+backend, disks, hypervisor, networks and operating system. If an option
+is given, it overrides the values provided in the OVF file.
+
+Backend
+^^^^^^^
+``--backend=option=value`` can be used to set auto balance, number of
+vcpus and amount of RAM memory.
+
+Please note that when you do not provide full set of options, the
+omitted ones will be set to cluster defaults (``auto``).
+
+Disks
+^^^^^
+``--disk-template=diskless`` causes the converter to ignore all other
+disk option - both from .ovf file and the command line. Other disk
+template options include ``plain``, ``drdb``, ``file``, ``sharedfile``
+and ``blockdev``.
+
+``--disk=number:size=value`` causes to create disks instead of
+converting them from OVF package; numbers should start with ``0`` and be
+consecutive.
+
+Hypervisor
+^^^^^^^^^^
+``-H hypervisor_name`` and ``-H hypervisor_name:option=value``
+provide options for hypervisor.
+
+Network
+^^^^^^^
+``--no-nics`` option causes converter to ignore any network information
+provided.
+
+``--network=number:option=value`` sets network information according to
+provided data, ignoring the OVF package configuration.
+
+Operating System
+^^^^^^^^^^^^^^^^
+``--os-type=type`` sets os type accordingly, this option is **required**
+when importing from OVF instance not created from Ganeti config file.
+
+``--os-parameters`` provides options for chosen operating system.
+
+Tags
+^^^^
+``--tags=tag1,tag2,tag3`` is a means of providing tags specific for the
+instance.
+
+
+After the conversion is completed, you may use ``gnt-backup import`` to
+import the instance into Ganeti.
+
+Example::
+
+ ovfconverter import file.ovf --disk-template=diskless \
+ --os-type=lenny-image \
+ --backend=vcpus=1,memory=512,auto_balance \
+ -H:xen-pvm \
+ --net=0:mode=bridged,link=xen-br0 \
+ --name=xen.i1
+ [...]
+ gnt-backup import xen.i1
+ [...]
+ gnt-instance list
+
+Export options
+--------------
+Export options include choice of disk formats to convert the disk image
+(``--format``) and compression of the disk into gzip format
+(``--compress``). User has also the choice of allowing to skip the
+Ganeti-specific part of the OVF document (``--external``).
+
+By default, exported OVF package will not be contained in the OVA
+package, but this may be changed by adding ``--ova`` option.
+
+Please note that in order to create an OVF package, it is first
+required that you export your VM using ``gnt-backup export``.
+
+Example::
+
+ gnt-backup export -n node1.xen xen.i1
+ [...]
+ ovfconverter export --format=vmdk --ova --external \
+ --output-dir=~/xen.i1 \
+ /srv/ganeti/export/xen.i1.node1.xen/config.ini
Implementation details
======================
-TODO
+Disk conversion
+---------------
+
+Disk conversion for both import and export is done using external tool
+called ``qemu-img``. The same tool is used to determine the type of
+disk, as well as its virtual size.
+
+
+Import
+------
+
+Import functionality is implemented using two classes - OVFReader and
+OVFImporter.
+
+OVFReader class is used to read the contents of the ``.ovf`` file. Every
+action that requires ``.ovf`` file access is done through that class.
+It also performs validation of manifest, if one is present.
+
+The result of reading some part of file is typically a dictionary or a
+string, containing options which correspond to the ones in
+``config.ini`` file. Only in case of disks, the resulting value is
+different - it is then a list of disk names. The reason for that is the
+need for conversion.
+
+OVFImporter class performs all the command-line-like tasks, such as
+unpacking OVA package, removing temporary directory, converting disk
+file to raw format or saving the configuration file on disk.
+It also contains a set of functions that read the options provided in
+the command line.
+
+
+Typical workflow for the import is very simple:
+
+- read the ``.ovf`` file into memory
+- verify manifest
+- parse each element of the configuration file: name, disk template,
+ hypervisor, operating system, backend parameters, network and disks
+
+ - check if option for the element can be read from command line
+ options
+
+ - if yes: parse options from command line
+
+ - otherwise: read the appropriate portion of ``.ovf`` file
+
+- save gathered information in ``config.ini`` file
+
+Export
+------
+
+Similar to import, export functionality also uses two classes -
+OVFWriter and OVFExporter.
+
+OVFWriter class produces XML output based on the information given. Its
+sole role is to separate the creation of ``.ovf`` file content.
+
+OVFExporter class gathers information from ``config.ini`` file or
+command line and performs necessary operations like disk conversion, disk
+compression, manifest creation and OVA package creation.
+
+Typical workflow for the export is even simpler, than for the import:
+
+- read the ``config.ini`` file into memory
+- gather information about certain parts of the instance, convert and
+ compress disks if desired
+- save each of these elements as a fragment of XML tree
+- save the XML tree as ``.ovf`` file
+- create manifest file and fill it with appropriate checksums
+- if ``--ova`` option was chosen, pack the results into ``.ova`` tarfile
+
+
+Work in progress
+----------------
+
+- conversion to/from raw disk should be quicker
+- add graphic card memory to export information (12 MB of memory)
+- space requirements for conversion + compression + ova are currently
+ enormous
+- add support for disks in chunks
+- add support for certificates
+- investigate why VMWare's ovftool does not work with ovfconverter's
+ compression and ova packaging -- maybe noteworty: if OVA archive does
+ not have a disk (i.e. in OVA package there is only .ovf ad .mf file),
+ then the ovftool works
+- investigate why new versions of VirtualBox have problems with OVF
+ created by ovfconverter (everything works fine with 3.16 version, but
+ not with 4.0)
+
.. vim: set textwidth=72 :
.. Local Variables:
--- /dev/null
+========================
+ Resource model changes
+========================
+
+
+Introduction
+============
+
+In order to manage virtual machines across the cluster, Ganeti needs to
+understand the resources present on the nodes, the hardware and software
+limitations of the nodes, and how much can be allocated safely on each
+node. Some of these decisions are delegated to IAllocator plugins, for
+easier site-level customisation.
+
+Similarly, the HTools suite has an internal model that simulates the
+hardware resource changes in response to Ganeti operations, in order to
+provide both an iallocator plugin and for balancing the
+cluster.
+
+While currently the HTools model is much more advanced than Ganeti's,
+neither one is flexible enough and both are heavily geared toward a
+specific Xen model; they fail to work well with (e.g.) KVM or LXC, or
+with Xen when :term:`tmem` is enabled. Furthermore, the set of metrics
+contained in the models is limited to historic requirements and fails to
+account for (e.g.) heterogeneity in the I/O performance of the nodes.
+
+Current situation
+=================
+
+Ganeti
+------
+
+At this moment, Ganeti itself doesn't do any static modelling of the
+cluster resources. It only does some runtime checks:
+
+- when creating instances, for the (current) free disk space
+- when starting instances, for the (current) free memory
+- during cluster verify, for enough N+1 memory on the secondaries, based
+ on the (current) free memory
+
+Basically this model is a pure :term:`SoW` one, and it works well when
+there are other instances/LVs on the nodes, as it allows Ganeti to deal
+with ‘orphan’ resource usage, but on the other hand it has many issues,
+described below.
+
+HTools
+------
+
+Since HTools does an pure in-memory modelling of the cluster changes as
+it executes the balancing or allocation steps, it had to introduce a
+static (:term:`SoR`) cluster model.
+
+The model is constructed based on the received node properties from
+Ganeti (hence it basically is constructed on what Ganeti can export).
+
+Disk
+~~~~
+
+For disk it consists of just the total (``tdsk``) and the free disk
+space (``fdsk``); we don't directly track the used disk space. On top of
+this, we compute and warn if the sum of disk sizes used by instance does
+not match with ``tdsk - fdsk``, but otherwise we do not track this
+separately.
+
+Memory
+~~~~~~
+
+For memory, the model is more complex and tracks some variables that
+Ganeti itself doesn't compute. We start from the total (``tmem``), free
+(``fmem``) and node memory (``nmem``) as supplied by Ganeti, and
+additionally we track:
+
+instance memory (``imem``)
+ the total memory used by primary instances on the node, computed
+ as the sum of instance memory
+
+reserved memory (``rmem``)
+ the memory reserved by peer nodes for N+1 redundancy; this memory is
+ tracked per peer-node, and the maximum value out of the peer memory
+ lists is the node's ``rmem``; when not using DRBD, this will be
+ equal to zero
+
+unaccounted memory (``xmem``)
+ memory that cannot be unaccounted for via the Ganeti model; this is
+ computed at startup as::
+
+ tmem - imem - nmem - fmem
+
+ and is presumed to remain constant irrespective of any instance
+ moves
+
+available memory (``amem``)
+ this is simply ``fmem - rmem``, so unless we use DRBD, this will be
+ equal to ``fmem``
+
+``tmem``, ``nmem`` and ``xmem`` are presumed constant during the
+instance moves, whereas the ``fmem``, ``imem``, ``rmem`` and ``amem``
+values are updated according to the executed moves.
+
+CPU
+~~~
+
+The CPU model is different than the disk/memory models, since it's the
+only one where:
+
+#. we do oversubscribe physical CPUs
+#. and there is no natural limit for the number of VCPUs we can allocate
+
+We therefore track the total number of VCPUs used on the node and the
+number of physical CPUs, and we cap the vcpu-to-cpu ratio in order to
+make this somewhat more similar to the other resources which are
+limited.
+
+Dynamic load
+~~~~~~~~~~~~
+
+There is also a model that deals with *dynamic load* values in
+htools. As far as we know, it is not currently used actually with load
+values, but it is active by default with unitary values for all
+instances; it currently tracks these metrics:
+
+- disk load
+- memory load
+- cpu load
+- network load
+
+Even though we do not assign real values to these load values, the fact
+that we at least sum them means that the algorithm tries to equalise
+these loads, and especially the network load, which is otherwise not
+tracked at all. The practical result (due to a combination of these four
+metrics) is that the number of secondaries will be balanced.
+
+Limitations
+-----------
+
+
+There are unfortunately many limitations to the current model.
+
+Memory
+~~~~~~
+
+The memory model doesn't work well in case of KVM. For Xen, the memory
+for the node (i.e. ``dom0``) can be static or dynamic; we don't support
+the latter case, but for the former case, the static value is configured
+in Xen/kernel command line, and can be queried from Xen
+itself. Therefore, Ganeti can query the hypervisor for the memory used
+for the node; the same model was adopted for the chroot/KVM/LXC
+hypervisors, but in these cases there's no natural value for the memory
+used by the base OS/kernel, and we currently try to compute a value for
+the node memory based on current consumption. This, being variable,
+breaks the assumptions in both Ganeti and HTools.
+
+This problem also shows for the free memory: if the free memory on the
+node is not constant (Xen with :term:`tmem` auto-ballooning enabled), or
+if the node and instance memory are pooled together (Linux-based
+hypervisors like KVM and LXC), the current value of the free memory is
+meaningless and cannot be used for instance checks.
+
+A separate issue related to the free memory tracking is that since we
+don't track memory use but rather memory availability, an instance that
+is temporary down changes Ganeti's understanding of the memory status of
+the node. This can lead to problems such as:
+
+.. digraph:: "free-mem-issue"
+
+ node [shape=box];
+ inst1 [label="instance1"];
+ inst2 [label="instance2"];
+
+ node [shape=note];
+ nodeA [label="fmem=0"];
+ nodeB [label="fmem=1"];
+ nodeC [label="fmem=0"];
+
+ node [shape=ellipse, style=filled, fillcolor=green]
+
+ {rank=same; inst1 inst2}
+
+ stop [label="crash!", fillcolor=orange];
+ migrate [label="migrate/ok"];
+ start [style=filled, fillcolor=red, label="start/fail"];
+ inst1 -> stop -> start;
+ stop -> migrate -> start [style=invis, weight=0];
+ inst2 -> migrate;
+
+ {rank=same; inst1 inst2 nodeA}
+ {rank=same; stop nodeB}
+ {rank=same; migrate nodeC}
+
+ nodeA -> nodeB -> nodeC [style=invis, weight=1];
+
+The behaviour here is wrong; the migration of *instance2* to the node in
+question will succeed or fail depending on whether *instance1* is
+running or not. And for *instance1*, it can lead to cases where it if
+crashes, it cannot restart anymore.
+
+Finally, not a problem but rather a missing important feature is support
+for memory over-subscription: both Xen and KVM support memory
+ballooning, even automatic memory ballooning, for a while now. The
+entire memory model is based on a fixed memory size for instances, and
+if memory ballooning is enabled, it will “break” the HTools
+algorithm. Even the fact that KVM instances do not use all memory from
+the start creates problems (although not as high, since it will grow and
+stabilise in the end).
+
+Disks
+~~~~~
+
+Because we only track disk space currently, this means if we have a
+cluster of ``N`` otherwise identical nodes but half of them have 10
+drives of size ``X`` and the other half 2 drives of size ``5X``, HTools
+will consider them exactly the same. However, in the case of mechanical
+drives at least, the I/O performance will differ significantly based on
+spindle count, and a “fair” load distribution should take this into
+account (a similar comment can be made about processor/memory/network
+speed).
+
+Another problem related to the spindle count is the LVM allocation
+algorithm. Currently, the algorithm always creates (or tries to create)
+striped volumes, with the stripe count being hard-coded to the
+``./configure`` parameter ``--with-lvm-stripecount``. This creates
+problems like:
+
+- when installing from a distribution package, all clusters will be
+ either limited or overloaded due to this fixed value
+- it is not possible to mix heterogeneous nodes (even in different node
+ groups) and have optimal settings for all nodes
+- the striping value applies both to LVM/DRBD data volumes (which are on
+ the order of gigabytes to hundreds of gigabytes) and to DRBD metadata
+ volumes (whose size is always fixed at 128MB); when stripping such
+ small volumes over many PVs, their size will increase needlessly (and
+ this can confuse HTools' disk computation algorithm)
+
+Moreover, the allocation currently allocates based on a ‘most free
+space’ algorithm. This balances the free space usage on disks, but on
+the other hand it tends to mix rather badly the data and metadata
+volumes of different instances. For example, it cannot do the following:
+
+- keep DRBD data and metadata volumes on the same drives, in order to
+ reduce exposure to drive failure in a many-drives system
+- keep DRBD data and metadata volumes on different drives, to reduce
+ performance impact of metadata writes
+
+Additionally, while Ganeti supports setting the volume separately for
+data and metadata volumes at instance creation, there are no defaults
+for this setting.
+
+Similar to the above stripe count problem (which is about not good
+enough customisation of Ganeti's behaviour), we have limited
+pass-through customisation of the various options of our storage
+backends; while LVM has a system-wide configuration file that can be
+used to tweak some of its behaviours, for DRBD we don't use the
+:command:`drbdadmin` tool, and instead we call :command:`drbdsetup`
+directly, with a fixed/restricted set of options; so for example one
+cannot tweak the buffer sizes.
+
+Another current problem is that the support for shared storage in HTools
+is still limited, but this problem is outside of this design document.
+
+Locking
+~~~~~~~
+
+A further problem generated by the “current free” model is that during a
+long operation which affects resource usage (e.g. disk replaces,
+instance creations) we have to keep the respective objects locked
+(sometimes even in exclusive mode), since we don't want any concurrent
+modifications to the *free* values.
+
+A classic example of the locking problem is the following:
+
+.. digraph:: "iallocator-lock-issues"
+
+ rankdir=TB;
+
+ start [style=invis];
+ node [shape=box,width=2];
+ job1 [label="add instance\niallocator run\nchoose A,B"];
+ job1e [label="finish add"];
+ job2 [label="add instance\niallocator run\nwait locks"];
+ job2s [label="acquire locks\nchoose C,D"];
+ job2e [label="finish add"];
+
+ job1 -> job1e;
+ job2 -> job2s -> job2e;
+ edge [style=invis,weight=0];
+ start -> {job1; job2}
+ job1 -> job2;
+ job2 -> job1e;
+ job1e -> job2s [style=dotted,label="release locks"];
+
+In the above example, the second IAllocator run will wait for locks for
+nodes ``A`` and ``B``, even though in the end the second instance will
+be placed on another set of nodes (``C`` and ``D``). This wait shouldn't
+be needed, since right after the first IAllocator run has finished,
+:command:`hail` knows the status of the cluster after the allocation,
+and it could answer the question for the second run too; however, Ganeti
+doesn't have such visibility into the cluster state and thus it is
+forced to wait with the second job.
+
+Similar examples can be made about replace disks (another long-running
+opcode).
+
+.. _label-policies:
+
+Policies
+~~~~~~~~
+
+For most of the resources, we have metrics defined by policy: e.g. the
+over-subscription ratio for CPUs, the amount of space to reserve,
+etc. Furthermore, although there are no such definitions in Ganeti such
+as minimum/maximum instance size, a real deployment will need to have
+them, especially in a fully-automated workflow where end-users can
+request instances via an automated interface (that talks to the cluster
+via RAPI, LUXI or command line). However, such an automated interface
+will need to also take into account cluster capacity, and if the
+:command:`hspace` tool is used for the capacity computation, it needs to
+be told the maximum instance size, however it has a built-in minimum
+instance size which is not customisable.
+
+It is clear that this situation leads to duplicate definition of
+resource policies which makes it hard to easily change per-cluster (or
+globally) the respective policies, and furthermore it creates
+inconsistencies if such policies are not enforced at the source (i.e. in
+Ganeti).
+
+Balancing algorithm
+~~~~~~~~~~~~~~~~~~~
+
+The balancing algorithm, as documented in the HTools ``README`` file,
+tries to minimise the cluster score; this score is based on a set of
+metrics that describe both exceptional conditions and how spread the
+instances are across the nodes. In order to achieve this goal, it moves
+the instances around, with a series of moves of various types:
+
+- disk replaces (for DRBD-based instances)
+- instance failover/migrations (for all types)
+
+However, the algorithm only looks at the cluster score, and not at the
+*“cost”* of the moves. In other words, the following can and will happen
+on a cluster:
+
+.. digraph:: "balancing-cost-issues"
+
+ rankdir=LR;
+ ranksep=1;
+
+ start [label="score α", shape=hexagon];
+
+ node [shape=box, width=2];
+ replace1 [label="replace_disks 500G\nscore α-3ε\ncost 3"];
+ replace2a [label="replace_disks 20G\nscore α-2ε\ncost 2"];
+ migrate1 [label="migrate\nscore α-ε\ncost 1"];
+
+ choose [shape=ellipse,label="choose min(score)=α-3ε\ncost 3"];
+
+ start -> {replace1; replace2a; migrate1} -> choose;
+
+Even though a migration is much, much cheaper than a disk replace (in
+terms of network and disk traffic on the cluster), if the disk replace
+results in a score infinitesimally smaller, then it will be
+chosen. Similarly, between two disk replaces, one moving e.g. ``500GiB``
+and one moving ``20GiB``, the first one will be chosen if it results in
+a score smaller than the second one. Furthermore, even if the resulting
+scores are equal, the first computed solution will be kept, whichever it
+is.
+
+Fixing this algorithmic problem is doable, but currently Ganeti doesn't
+export enough information about nodes to make an informed decision; in
+the above example, if the ``500GiB`` move is between nodes having fast
+I/O (both disks and network), it makes sense to execute it over a disk
+replace of ``100GiB`` between nodes with slow I/O, so simply relating to
+the properties of the move itself is not enough; we need more node
+information for cost computation.
+
+Allocation algorithm
+~~~~~~~~~~~~~~~~~~~~
+
+.. note:: This design document will not address this limitation, but it
+ is worth mentioning as it directly related to the resource model.
+
+The current allocation/capacity algorithm works as follows (per
+node-group)::
+
+ repeat:
+ allocate instance without failing N+1
+
+This simple algorithm, and its use of ``N+1`` criterion, has a built-in
+limit of 1 machine failure in case of DRBD. This means the algorithm
+guarantees that, if using DRBD storage, there are enough resources to
+(re)start all affected instances in case of one machine failure. This
+relates mostly to memory; there is no account for CPU over-subscription
+(i.e. in case of failure, make sure we can failover while still not
+going over CPU limits), or for any other resource.
+
+In case of shared storage, there's not even the memory guarantee, as the
+N+1 protection doesn't work for shared storage.
+
+If a given cluster administrator wants to survive up to two machine
+failures, or wants to ensure CPU limits too for DRBD, there is no
+possibility to configure this in HTools (neither in :command:`hail` nor
+in :command:`hspace`). Current workaround employ for example deducting a
+certain number of instances from the size computed by :command:`hspace`,
+but this is a very crude method, and requires that instance creations
+are limited before Ganeti (otherwise :command:`hail` would allocate
+until the cluster is full).
+
+Proposed architecture
+=====================
+
+
+There are two main changes proposed:
+
+- changing the resource model from a pure :term:`SoW` to a hybrid
+ :term:`SoR`/:term:`SoW` one, where the :term:`SoR` component is
+ heavily emphasised
+- extending the resource model to cover additional properties,
+ completing the “holes” in the current coverage
+
+The second change is rather straightforward, but will add more
+complexity in the modelling of the cluster. The first change, however,
+represents a significant shift from the current model, which Ganeti had
+from its beginnings.
+
+Lock-improved resource model
+----------------------------
+
+Hybrid SoR/SoW model
+~~~~~~~~~~~~~~~~~~~~
+
+The resources of a node can be characterised in two broad classes:
+
+- mostly static resources
+- dynamically changing resources
+
+In the first category, we have things such as total core count, total
+memory size, total disk size, number of network interfaces etc. In the
+second category we have things such as free disk space, free memory, CPU
+load, etc. Note that nowadays we don't have (anymore) fully-static
+resources: features like CPU and memory hot-plug, online disk replace,
+etc. mean that theoretically all resources can change (there are some
+practical limitations, of course).
+
+Even though the rate of change of the two resource types is wildly
+different, right now Ganeti handles both the same. Given that the
+interval of change of the semi-static ones is much bigger than most
+Ganeti operations, even more than lengthy sequences of Ganeti jobs, it
+makes sense to treat them separately.
+
+The proposal is then to move the following resources into the
+configuration and treat the configuration as the authoritative source
+for them (a :term:`SoR` model):
+
+- CPU resources:
+ - total core count
+ - node core usage (*new*)
+- memory resources:
+ - total memory size
+ - node memory size
+ - hypervisor overhead (*new*)
+- disk resources:
+ - total disk size
+ - disk overhead (*new*)
+
+Since these resources can though change at run-time, we will need
+functionality to update the recorded values.
+
+Pre-computing dynamic resource values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Remember that the resource model used by HTools models the clusters as
+obeying the following equations:
+
+ disk\ :sub:`free` = disk\ :sub:`total` - ∑ disk\ :sub:`instances`
+
+ mem\ :sub:`free` = mem\ :sub:`total` - ∑ mem\ :sub:`instances` - mem\
+ :sub:`node` - mem\ :sub:`overhead`
+
+As this model worked fine for HTools, we can consider it valid and adopt
+it in Ganeti. Furthermore, note that all values in the right-hand side
+come now from the configuration:
+
+- the per-instance usage values were already stored in the configuration
+- the other values will are moved to the configuration per the previous
+ section
+
+This means that we can now compute the free values without having to
+actually live-query the nodes, which brings a significant advantage.
+
+There are a couple of caveats to this model though. First, as the
+run-time state of the instance is no longer taken into consideration, it
+means that we have to introduce a new *offline* state for an instance
+(similar to the node one). In this state, the instance's runtime
+resources (memory and VCPUs) are no longer reserved for it, and can be
+reused by other instances. Static resources like disk and MAC addresses
+are still reserved though. Transitioning into and out of this reserved
+state will be more involved than simply stopping/starting the instance
+(e.g. de-offlining can fail due to missing resources). This complexity
+is compensated by the increased consistency of what guarantees we have
+in the stopped state (we always guarantee resource reservation), and the
+potential for management tools to restrict which users can transition
+into/out of this state separate from which users can stop/start the
+instance.
+
+Separating per-node resource locks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Many of the current node locks in Ganeti exist in order to guarantee
+correct resource state computation, whereas others are designed to
+guarantee reasonable run-time performance of nodes (e.g. by not
+overloading the I/O subsystem). This is an unfortunate coupling, since
+it means for example that the following two operations conflict in
+practice even though they are orthogonal:
+
+- replacing a instance's disk on a node
+- computing node disk/memory free for an IAllocator run
+
+This conflict increases significantly the lock contention on a big/busy
+cluster and at odds with the goal of increasing the cluster size.
+
+The proposal is therefore to add a new level of locking that is only
+used to prevent concurrent modification to the resource states (either
+node properties or instance properties) and not for long-term
+operations:
+
+- instance creation needs to acquire and keep this lock until adding the
+ instance to the configuration
+- instance modification needs to acquire and keep this lock until
+ updating the instance
+- node property changes will need to acquire this lock for the
+ modification
+
+The new lock level will sit before the instance level (right after BGL)
+and could either be single-valued (like the “Big Ganeti Lock”), in which
+case we won't be able to modify two nodes at the same time, or per-node,
+in which case the list of locks at this level needs to be synchronised
+with the node lock level. To be determined.
+
+Lock contention reduction
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Based on the above, the locking contention will be reduced as follows:
+IAllocator calls will no longer need the ``LEVEL_NODE: ALL_SET`` lock,
+only the resource lock (in exclusive mode). Hence allocating/computing
+evacuation targets will no longer conflict for longer than the time to
+compute the allocation solution.
+
+The remaining long-running locks will be the DRBD replace-disks ones
+(exclusive mode). These can also be removed, or changed into shared
+locks, but that is a separate design change.
+
+.. admonition:: FIXME
+
+ Need to rework instance replace disks. I don't think we need exclusive
+ locks for replacing disks: it is safe to stop/start the instance while
+ it's doing a replace disks. Only modify would need exclusive, and only
+ for transitioning into/out of offline state.
+
+Instance memory model
+---------------------
+
+In order to support ballooning, the instance memory model needs to be
+changed from a “memory size” one to a “min/max memory size”. This
+interacts with the new static resource model, however, and thus we need
+to declare a-priori the expected oversubscription ratio on the cluster.
+
+The new minimum memory size parameter will be similar to the current
+memory size; the cluster will guarantee that in all circumstances, all
+instances will have available their minimum memory size. The maximum
+memory size will permit burst usage of more memory by instances, with
+the restriction that the sum of maximum memory usage will not be more
+than the free memory times the oversubscription factor:
+
+ ∑ memory\ :sub:`min` ≤ memory\ :sub:`available`
+
+ ∑ memory\ :sub:`max` ≤ memory\ :sub:`free` * oversubscription_ratio
+
+The hypervisor will have the possibility of adjusting the instance's
+memory size dynamically between these two boundaries.
+
+Note that the minimum memory is related to the available memory on the
+node, whereas the maximum memory is related to the free memory. On
+DRBD-enabled clusters, this will have the advantage of using the
+reserved memory for N+1 failover for burst usage, instead of having it
+completely idle.
+
+.. admonition:: FIXME
+
+ Need to document how Ganeti forces minimum size at runtime, overriding
+ the hypervisor, in cases of failover/lack of resources.
+
+New parameters
+--------------
+
+Unfortunately the design will add a significant number of new
+parameters, and change the meaning of some of the current ones.
+
+Instance size limits
+~~~~~~~~~~~~~~~~~~~~
+
+As described in :ref:`label-policies`, we currently lack a clear
+definition of the support instance sizes (minimum, maximum and
+standard). As such, we will add the following structure to the cluster
+parameters:
+
+- ``min_ispec``, ``max_ispec``: minimum and maximum acceptable instance
+ specs
+- ``std_ispec``: standard instance size, which will be used for capacity
+ computations and for default parameters on the instance creation
+ request
+
+Ganeti will by default reject non-standard instance sizes (lower than
+``min_ispec`` or greater than ``max_ispec``), but as usual a
+``--ignore-ipolicy`` option on the command line or in the RAPI request
+will override these constraints. The ``std_spec`` structure will be used
+to fill in missing instance specifications on create.
+
+Each of the ispec structures will be a dictionary, since the contents
+can change over time. Initially, we will define the following variables
+in these structures:
+
++---------------+----------------------------------+--------------+
+|Name |Description |Type |
++===============+==================================+==============+
+|mem_size |Allowed memory size |int |
++---------------+----------------------------------+--------------+
+|cpu_count |Allowed vCPU count |int |
++---------------+----------------------------------+--------------+
+|disk_count |Allowed disk count |int |
++---------------+----------------------------------+--------------+
+|disk_size |Allowed disk size |int |
++---------------+----------------------------------+--------------+
+|nic_count |Alowed NIC count |int |
++---------------+----------------------------------+--------------+
+
+Inheritance
++++++++++++
+
+In a single-group cluster, the above structure is sufficient. However,
+on a multi-group cluster, it could be that the hardware specifications
+differ across node groups, and thus the following problem appears: how
+can Ganeti present unified specifications over RAPI?
+
+Since the set of instance specs is only partially ordered (as opposed to
+the sets of values of individual variable in the spec, which are totally
+ordered), it follows that we can't present unified specs. As such, the
+proposed approach is to allow the ``min_ispec`` and ``max_ispec`` to be
+customised per node-group (and export them as a list of specifications),
+and a single ``std_spec`` at cluster level (exported as a single value).
+
+
+Allocation parameters
+~~~~~~~~~~~~~~~~~~~~~
+
+Beside the limits of min/max instance sizes, there are other parameters
+related to capacity and allocation limits. These are mostly related to
+the problems related to over allocation.
+
++-----------------+----------+---------------------------+----------+------+
+| Name |Level(s) |Description |Current |Type |
+| | | |value | |
++=================+==========+===========================+==========+======+
+|vcpu_ratio |cluster, |Maximum ratio of virtual to|64 (only |float |
+| |node group|physical CPUs |in htools)| |
++-----------------+----------+---------------------------+----------+------+
+|spindle_ratio |cluster, |Maximum ratio of instances |none |float |
+| |node group|to spindles; when the I/O | | |
+| | |model doesn't map directly | | |
+| | |to spindles, another | | |
+| | |measure of I/O should be | | |
+| | |used instead | | |
++-----------------+----------+---------------------------+----------+------+
+|max_node_failures|cluster, |Cap allocation/capacity so |1 |int |
+| |node group|that the cluster can |(hardcoded| |
+| | |survive this many node |in htools)| |
+| | |failures | | |
++-----------------+----------+---------------------------+----------+------+
+
+Since these are used mostly internally (in htools), they will be
+exported as-is from Ganeti, without explicit handling of node-groups
+grouping.
+
+Regarding ``spindle_ratio``, in this context spindles do not necessarily
+have to mean actual mechanical hard-drivers; it's rather a measure of
+I/O performance for internal storage.
+
+Disk parameters
+~~~~~~~~~~~~~~~
+
+The proposed model for the new disk parameters is a simple free-form one
+based on dictionaries, indexed per disk template and parameter name.
+Only the disk template parameters are visible to the user, and those are
+internally translated to logical disk level parameters.
+
+This is a simplification, because each parameter is applied to a whole
+nested structure and there is no way of fine-tuning each level's
+parameters, but it is good enough for the current parameter set. This
+model could need to be expanded, e.g., if support for three-nodes stacked
+DRBD setups is added to Ganeti.
+
+At JSON level, since the object key has to be a string, the keys can be
+encoded via a separator (e.g. slash), or by having two dict levels.
+
+When needed, the unit of measurement is expressed inside square
+brackets.
+
++--------+--------------+-------------------------+---------------------+------+
+|Disk |Name |Description |Current status |Type |
+|template| | | | |
++========+==============+=========================+=====================+======+
+|plain |stripes |How many stripes to use |Configured at |int |
+| | |for newly created (plain)|./configure time, not| |
+| | |logical voumes |overridable at | |
+| | | |runtime | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |data-stripes |How many stripes to use |Same as for |int |
+| | |for data volumes |plain/stripes | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |metavg |Default volume group for |Same as the main |string|
+| | |the metadata LVs |volume group, | |
+| | | |overridable via | |
+| | | |'metavg' key | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |meta-stripes |How many stripes to use |Same as for lvm |int |
+| | |for meta volumes |'stripes', suboptimal| |
+| | | |as the meta LVs are | |
+| | | |small | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |disk-barriers |What kind of barriers to |Either all enabled or|string|
+| | |*disable* for disks; |all disabled, per | |
+| | |either "n" or a string |./configure time | |
+| | |containing a subset of |option | |
+| | |"bfd" | | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |meta-barriers |Whether to disable or not|Handled together with|bool |
+| | |the barriers for the meta|disk-barriers | |
+| | |volume | | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |resync-rate |The (static) resync rate |Hardcoded in |int |
+| | |for drbd, when using the |constants.py, not | |
+| | |static syncer, in KiB/s |changeable via Ganeti| |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |dynamic-resync|Whether to use the |Not supported. |bool |
+| | |dynamic resync speed | | |
+| | |controller or not. If | | |
+| | |enabled, c-plan-ahead | | |
+| | |must be non-zero and all | | |
+| | |the c-* parameters will | | |
+| | |be used by DRBD. | | |
+| | |Otherwise, the value of | | |
+| | |resync-rate will be used | | |
+| | |as a static resync speed.| | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |c-plan-ahead |Agility factor of the |Not supported. |int |
+| | |dynamic resync speed | | |
+| | |controller. (the higher, | | |
+| | |the slower the algorithm | | |
+| | |will adapt the resync | | |
+| | |speed). A value of 0 | | |
+| | |(that is the default) | | |
+| | |disables the controller | | |
+| | |[ds] | | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |c-fill-target |Maximum amount of |Not supported. |int |
+| | |in-flight resync data | | |
+| | |for the dynamic resync | | |
+| | |speed controller | | |
+| | |[sectors] | | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |c-delay-target|Maximum estimated peer |Not supported. |int |
+| | |response latency for the | | |
+| | |dynamic resync speed | | |
+| | |controller [ds] | | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |c-max-rate |Upper bound on resync |Not supported. |int |
+| | |speed for the dynamic | | |
+| | |resync speed controller | | |
+| | |[KiB/s] | | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |c-min-rate |Minimum resync speed for |Not supported. |int |
+| | |the dynamic resync speed | | |
+| | |controller [KiB/s] | | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |disk-custom |Free-form string that |Not supported |string|
+| | |will be appended to the | | |
+| | |drbdsetup disk command | | |
+| | |line, for custom options | | |
+| | |not supported by Ganeti | | |
+| | |itself | | |
++--------+--------------+-------------------------+---------------------+------+
+|drbd |net-custom |Free-form string for |Not supported |string|
+| | |custom net setup options | | |
++--------+--------------+-------------------------+---------------------+------+
+
+Currently Ganeti supports only DRBD 8.0.x, 8.2.x, 8.3.x. It will refuse
+to work with DRBD 8.4 since the :command:`drbdsetup` syntax has changed
+significantly.
+
+The barriers-related parameters have been introduced in different DRBD
+versions; please make sure that your version supports all the barrier
+parameters that you pass to Ganeti. Any version later than 8.3.0
+implements all of them.
+
+The minimum DRBD version for using the dynamic resync speed controller
+is 8.3.9, since previous versions implement different parameters.
+
+A more detailed discussion of the dynamic resync speed controller
+parameters is outside the scope of the present document. Please refer to
+the ``drbdsetup`` man page
+(`8.3 <http://www.drbd.org/users-guide-8.3/re-drbdsetup.html>`_ and
+`8.4 <http://www.drbd.org/users-guide/re-drbdsetup.html>`_). An
+interesting discussion about them can also be found in a
+`drbd-user mailing list post
+<http://lists.linbit.com/pipermail/drbd-user/2011-August/016739.html>`_.
+
+All the above parameters are at cluster and node group level; as in
+other parts of the code, the intention is that all nodes in a node group
+should be equal. It will later be decided to which node group give
+precedence in case of instances split over node groups.
+
+.. admonition:: FIXME
+
+ Add details about when each parameter change takes effect (device
+ creation vs. activation)
+
+Node parameters
+~~~~~~~~~~~~~~~
+
+For the new memory model, we'll add the following parameters, in a
+dictionary indexed by the hypervisor name (node attribute
+``hv_state``). The rationale is that, even though multi-hypervisor
+clusters are rare, they make sense sometimes, and thus we need to
+support multipe node states (one per hypervisor).
+
+Since usually only one of the multiple hypervisors is the 'main' one
+(and the others used sparringly), capacity computation will still only
+use the first hypervisor, and not all of them. Thus we avoid possible
+inconsistencies.
+
++----------+-----------------------------------+---------------+-------+
+|Name |Description |Current state |Type |
+| | | | |
++==========+===================================+===============+=======+
+|mem_total |Total node memory, as discovered by|Queried at |int |
+| |this hypervisor |runtime | |
++----------+-----------------------------------+---------------+-------+
+|mem_node |Memory used by, or reserved for, |Queried at |int |
+| |the node itself; not that some |runtime | |
+| |hypervisors can report this in an | | |
+| |authoritative way, other not | | |
++----------+-----------------------------------+---------------+-------+
+|mem_hv |Memory used either by the |Not used, |int |
+| |hypervisor itself or lost due to |htools computes| |
+| |instance allocation rounding; |it internally | |
+| |usually this cannot be precisely | | |
+| |computed, but only roughly | | |
+| |estimated | | |
++----------+-----------------------------------+---------------+-------+
+|cpu_total |Total node cpu (core) count; |Queried at |int |
+| |usually this can be discovered |runtime | |
+| |automatically | | |
+| | | | |
+| | | | |
+| | | | |
++----------+-----------------------------------+---------------+-------+
+|cpu_node |Number of cores reserved for the |Not used at all|int |
+| |node itself; this can either be | | |
+| |discovered or set manually. Only | | |
+| |used for estimating how many VCPUs | | |
+| |are left for instances | | |
+| | | | |
++----------+-----------------------------------+---------------+-------+
+
+Of the above parameters, only ``_total`` ones are straight-forward. The
+others have sometimes strange semantics:
+
+- Xen can report ``mem_node``, if configured statically (as we
+ recommend); but Linux-based hypervisors (KVM, chroot, LXC) do not, and
+ this needs to be configured statically for these values
+- ``mem_hv``, representing unaccounted for memory, is not directly
+ computable; on Xen, it can be seen that on a N GB machine, with 1 GB
+ for dom0 and N-2 GB for instances, there's just a few MB left, instead
+ fo a full 1 GB of RAM; however, the exact value varies with the total
+ memory size (at least)
+- ``cpu_node`` only makes sense on Xen (currently), in the case when we
+ restrict dom0; for Linux-based hypervisors, the node itself cannot be
+ easily restricted, so it should be set as an estimate of how "heavy"
+ the node loads will be
+
+Since these two values cannot be auto-computed from the node, we need to
+be able to declare a default at cluster level (debatable how useful they
+are at node group level); the proposal is to do this via a cluster-level
+``hv_state`` dict (per hypervisor).
+
+Beside the per-hypervisor attributes, we also have disk attributes,
+which are queried directly on the node (without hypervisor
+involvment). The are stored in a separate attribute (``disk_state``),
+which is indexed per storage type and name; currently this will be just
+``LD_LV`` and the volume name as key.
+
++-------------+-------------------------+--------------------+--------+
+|Name |Description |Current state |Type |
+| | | | |
++=============+=========================+====================+========+
+|disk_total |Total disk size |Queried at runtime |int |
+| | | | |
++-------------+-------------------------+--------------------+--------+
+|disk_reserved|Reserved disk size; this |None used in Ganeti;|int |
+| |is a lower limit on the |htools has a | |
+| |free space, if such a |parameter for this | |
+| |limit is desired | | |
++-------------+-------------------------+--------------------+--------+
+|disk_overhead|Disk that is expected to |None used in Ganeti;|int |
+| |be used by other volumes |htools detects this | |
+| |(set via |at runtime | |
+| |``reserved_lvs``); | | |
+| |usually should be zero | | |
++-------------+-------------------------+--------------------+--------+
+
+
+Instance parameters
+~~~~~~~~~~~~~~~~~~~
+
+New instance parameters, needed especially for supporting the new memory
+model:
+
++--------------+----------------------------------+-----------------+------+
+|Name |Description |Current status |Type |
+| | | | |
++==============+==================================+=================+======+
+|offline |Whether the instance is in |Not supported |bool |
+| |“permanent” offline mode; this is | | |
+| |stronger than the "admin_down” | | |
+| |state, and is similar to the node | | |
+| |offline attribute | | |
++--------------+----------------------------------+-----------------+------+
+|be/max_memory |The maximum memory the instance is|Not existent, but|int |
+| |allowed |virtually | |
+| | |identical to | |
+| | |memory | |
++--------------+----------------------------------+-----------------+------+
+
+HTools changes
+--------------
+
+All the new parameters (node, instance, cluster, not so much disk) will
+need to be taken into account by HTools, both in balancing and in
+capacity computation.
+
+Since the Ganeti's cluster model is much enhanced, Ganeti can also
+export its own reserved/overhead variables, and as such HTools can make
+less “guesses” as to the difference in values.
+
+.. admonition:: FIXME
+
+ Need to detail more the htools changes; the model is clear to me, but
+ need to write it down.
+
+.. vim: set textwidth=72 :
+.. Local Variables:
+.. mode: rst
+.. fill-column: 72
+.. End:
--- /dev/null
+==========================
+ Virtual clusters support
+==========================
+
+
+Introduction
+============
+
+Currently there are two ways to test the Ganeti (including HTools) code
+base:
+
+- unittests, which run using mocks as normal user and test small bits of
+ the code
+- QA/burnin/live-test, which require actual hardware (either physical or
+ virtual) and will build an actual cluster, with one machine to one
+ node correspondence
+
+The difference in time between these two is significant:
+
+- the unittests run in about 1-2 minutes
+- a so-called ‘quick’ QA (without burnin) runs in about an hour, and a
+ full QA could be double that time
+
+On one hand, the unittests have a clear advantage: quick to run, not
+requiring many machines, but on the other hand QA is actually able to
+run end-to-end tests (including HTools, for example).
+
+Ideally, we would have an intermediate step between these two extremes:
+be able to test most, if not all, of Ganeti's functionality but without
+requiring actual hardware, full machine ownership or root access.
+
+
+Current situation
+=================
+
+Ganeti
+------
+
+It is possible, given a manually built ``config.data`` and
+``_autoconf.py``, to run the masterd under the current user as a
+single-node cluster master. However, the node daemon and related
+functionality (cluster initialisation, master failover, etc.) are not
+directly runnable in this model.
+
+Also, masterd only works as a master of a single node cluster, due to
+our current “hostname” method of identifying nodes, which results in a
+limit of maximum one node daemon per machine, unless we use multiple
+name and IP aliases.
+
+HTools
+------
+
+In HTools the situation is better, since it doesn't have to deal with
+actual machine management: all tools can use a custom LUXI path, and can
+even load RAPI data from the filesystem (so the RAPI backend can be
+tested), and both the ‘text’ backend for hbal/hspace and the input files
+for hail are text-based, loaded from the file-system.
+
+Proposed changes
+================
+
+The end-goal is to have full support for “virtual clusters”, i.e. be
+able to run a “big” (hundreds of virtual nodes and towards thousands of
+virtual instances) on a reasonably powerful, but single machine, under a
+single user account and without any special privileges.
+
+This would have significant advantages:
+
+- being able to test end-to-end certain changes, without requiring a
+ complicated setup
+- better able to estimate Ganeti's behaviour and performance as the
+ cluster size grows; this is something that we haven't been able to
+ test reliably yet, and as such we still have not yet diagnosed
+ scaling problems
+- easier integration with external tools (and even with HTools)
+
+``masterd``
+-----------
+
+As described above, ``masterd`` already works reasonably well in a
+virtual setup, as it won't execute external programs and it shouldn't
+directly read files from the local filesystem (or at least not
+virtualisation-related, as the master node can be a non-vm_capable
+node).
+
+``noded``
+---------
+
+The node daemon executes many privileged operations, but they can be
+split in a few general categories:
+
++---------------+-----------------------+------------------------------------+
+|Category |Description |Solution |
++===============+=======================+====================================+
+|disk operations|Disk creation and |Use only diskless or file-based |
+| |removal |instances |
++---------------+-----------------------+------------------------------------+
+|disk query |Node disk total/free, |Not supported currently, could use |
+| |used in node listing |file-based |
+| |and htools | |
++---------------+-----------------------+------------------------------------+
+|hypervisor |Instance start, stop |Use the *fake* hypervisor |
+|operations |and query | |
++---------------+-----------------------+------------------------------------+
+|instance |Bridge existence query |Unprivileged operation, can be used |
+|networking | |with an existing bridge at system |
+| | |level or use NIC-less instances |
++---------------+-----------------------+------------------------------------+
+|instance OS |OS add, OS rename, |Only used with non diskless |
+|operations |export and import |instances; could work with custom OS|
+| | |scripts (that just ``dd`` without |
+| | |mounting filesystems |
++---------------+-----------------------+------------------------------------+
+|node networking|IP address management |Not supported; Ganeti will need to |
+| |(master ip), IP query, |work without a master IP. For the IP|
+| |etc. |query operations, the test machine |
+| | |would need externally-configured IPs|
++---------------+-----------------------+------------------------------------+
+|node setup |ssh, /etc/hosts, so on |Can already be disabled from the |
+| | |cluster config |
++---------------+-----------------------+------------------------------------+
+|master failover|start/stop the master |Doable (as long as we use a single |
+| |daemon |user), might get tricky w.r.t. paths|
+| | |to executables |
++---------------+-----------------------+------------------------------------+
+|file upload |Uploading of system |The only issue could be with system |
+| |files, job queue files |files, which are not owned by the |
+| |and ganeti config |current user; internal ganeti files |
+| | |should be working fine |
++---------------+-----------------------+------------------------------------+
+|node oob |Out-of-band commands |Since these are user-defined, we can|
+| | |mock them easily |
++---------------+-----------------------+------------------------------------+
+|node OS |List the existing OSes |No special privileges needed, so |
+|discovery |and their properties |works fine as-is |
++---------------+-----------------------+------------------------------------+
+|hooks |Running hooks for given|No special privileges needed |
+| |operations | |
++---------------+-----------------------+------------------------------------+
+|iallocator |Calling an iallocator |No special privileges needed |
+| |script | |
++---------------+-----------------------+------------------------------------+
+|export/import |Exporting and importing|When exporting/importing file-based |
+| |instances |instances, this should work, as the |
+| | |listening ports are dynamically |
+| | |chosen |
++---------------+-----------------------+------------------------------------+
+|hypervisor |The validation of |As long as the hypervisors don't |
+|validation |hypervisor parameters |call to privileged commands, it |
+| | |should work |
++---------------+-----------------------+------------------------------------+
+|node powercycle|The ability to power |Privileged, so not supported, but |
+| |cycle a node remotely |anyway not very interesting for |
+| | |testing |
++---------------+-----------------------+------------------------------------+
+
+It seems that much of the functionality works as is, or could work with
+small adjustments, even in a non-privileged setup. The bigger problem is
+the actual use of multiple node daemons per machine.
+
+Multiple ``noded`` per machine
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Currently Ganeti identifies node simply by their hostname. Since
+changing this method would imply significant changes to tracking the
+nodes, the proposal is to simply have as many IPs per the (single)
+machine that is used for tests as nodes, and have each IP correspond to
+a different name, and thus no changes are needed to the core RPC
+library. Unfortunately this has the downside of requiring root rights
+for setting up the extra IPs and hostnames.
+
+An alternative option is to implement per-node IP/port support in Ganeti
+(especially in the RPC layer), which would eliminate the root rights. We
+expect that this will get implemented as a second step of this design.
+
+The only remaining problem is with sharing the ``localstatedir``
+structure (lib, run, log) amongst the daemons, for which we propose to
+add a command line parameter which can override this path (via injection
+into ``_autoconf.py``). The rationale for this is two-fold:
+
+- having two or more node daemons writing to the same directory might
+ introduce artificial scenarios not existent in real life; currently
+ noded either owns the entire ``/var/lib/ganeti`` directory or shares
+ it with masterd, but never with another noded
+- having separate directories allows cluster verify to check correctly
+ consistency of file upload operations; otherwise, as long as one node
+ daemon wrote a file successfully, the results from all others are
+ “lost”
+
+
+``rapi``
+--------
+
+The RAPI daemon is not privileged and furthermore we only need one per
+cluster, so it presents no issues.
+
+``confd``
+---------
+
+``confd`` has somewhat the same issues as the node daemon regarding
+multiple daemons per machine, but the per-address binding still works.
+
+``ganeti-watcher``
+------------------
+
+Since the startup of daemons will be customised with per-IP binds, the
+watcher either has to be modified to not activate the daemons, or the
+start-stop tool has to take this into account. Due to watcher's use of
+the hostname, it's recommended that the master node is set to the
+machine hostname (also a requirement for the master daemon).
+
+CLI scripts
+-----------
+
+As long as the master node is set to the machine hostname, these should
+work fine.
+
+Cluster initialisation
+----------------------
+
+It could be possible that the cluster initialisation procedure is a bit
+more involved (this was not tried yet). In any case, we can build a
+``config.data`` file manually, without having to actually run
+``gnt-cluster init``.
+
+Needed tools
+============
+
+With the above investigation results in mind, the only thing we need
+are:
+
+- a tool to setup per-virtual node tree structure of ``localstatedir``
+ and setup correctly the extra IP/hostnames
+- changes to the startup daemon tools to launch correctly the daemons
+ per virtual node
+- changes to ``noded`` to override the ``localstatedir`` path
+- documentation for running such a virtual cluster
+- and eventual small fixes to the node daemon backend functionality, to
+ better separate privileged and non-privileged code
+
+.. vim: set textwidth=72 :
+.. Local Variables:
+.. mode: rst
+.. fill-column: 72
+.. End:
- `HsColour <http://hackage.haskell.org/package/hscolour>`_, again
used for documentation (it's source-code pretty-printing)
- `hlint <http://community.haskell.org/~ndm/hlint/>`_, a source code
- linter (equivalent to pylint for Python)
+ linter (equivalent to pylint for Python), recommended version 1.8 or
+ above (tested with 1.8.15)
- the `QuickCheck <http://hackage.haskell.org/package/QuickCheck>`_
library, version 2.x
- ``hpc``, which comes with the compiler, so you should already have
./autogen.sh && \
./configure --prefix=/usr/local --sysconfdir=/etc --localstatedir=/var
+Haskell development notes
+-------------------------
+
+There are a few things which can help writing or debugging the Haskell
+code.
+
+You can run the Haskell linter :command:`hlint` via::
+
+ make hlint
+
+This is not enabled by default (as the htools component is
+optional). The above command will generate both output on the terminal
+and, if any warnings are found, also an HTML report at
+``doc/hs-lint.html``.
+
+When writing or debugging TemplateHaskell code, it's useful to see
+what the splices are converted to. This can be done via::
+
+ make HEXTRA="-ddump-splices"
+
+Due to the way TemplateHaskell works, it's not straightforward to
+build profiling code. The recommended way is::
+
+ make clean
+ make htools/htools HEXTRA="-osuf .o"
+ rm htools/htools
+ make htools/htools HEXTRA="-osuf .prof_o -prof -auto-all"
+
+This will build the binary twice, per the TemplateHaskell
+documentation, the second one with profiling enabled.
+
Packaging notes
===============
.. if you add new entries, keep the alphabetical sorting!
.. glossary::
+ :sorted:
- BE Parameter
- BE stands for Backend. BE parameters are hypervisor-independent
+ ballooning
+ A term describing runtime, dynamic changes to an instance's memory,
+ without having to reboot the instance. Depending on the hypervisor
+ and configuration, the changes need to be initiated manually, or
+ they can be automatically initiated by the hypervisor based on the
+ node and instances memory usage.
+
+ BE parameter
+ BE stands for *backend*. BE parameters are hypervisor-independent
instance parameters such as the amount of RAM/virtual CPUs it has
been allocated.
+ DRBD
+ A block device driver that can be used to build RAID1 across the
+ network or even shared storage, while using only locally-attached
+ storage.
+
+ HV parameter
+ HV stands for *hypervisor*. HV parameters are the ones that describe
+ the virtualization-specific aspects of the instance; for example,
+ what kernel to use to boot the instance (if any), or what emulation
+ model to use for the emulated hard drives.
+
HVM
- Hardware virtualization mode, where the virtual machine is
- oblivious to the fact that's being virtualized and all the
- hardware is emulated.
+ Hardware virtualization mode, where the virtual machine is oblivious
+ to the fact that's being virtualized and all the hardware is
+ emulated.
LogicalUnit
- The code associated with an OpCode, e.g. the code that implements
- the startup of an instance.
+ The code associated with an :term:`OpCode`, e.g. the code that
+ implements the startup of an instance.
LUXI
- Local UniX Interface. The IPC method over unix sockets used between
- the cli tools and the master daemon.
+ Local UniX Interface. The IPC method over :manpage:`unix(7)`
+ sockets used between the CLI tools/RAPI daemon and the master
+ daemon.
+
+ OOB
+ *Out of Band*. This term describes methods of accessing a machine
+ (or parts of a machine) not via the usual network connection. For
+ example, accessing a remote server via a physical serial console or
+ via a virtual one IPMI counts as out of band access.
OpCode
A data structure encapsulating a basic cluster operation; for
example, start instance, add instance, etc.
PVM
- Para-virtualization mode, where the virtual machine knows it's being
- virtualized and as such there is no need for hardware emulation.
+ (Xen) Para-virtualization mode, where the virtual machine knows it's
+ being virtualized and as such there is no need for hardware
+ emulation or virtualization.
+
+ SoR
+ *State of Record*. Refers to values/properties that come from an
+ authoritative configuration source. For example, the maximum VCPU
+ over-subscription ratio is a *SoR* value, but the current
+ over-subscription ration (based on how many instances live on the
+ node) is a :term:`SoW` value.
+
+ SoW
+ *State of the World*. Refers to values that describe directly the
+ world, as opposed to values that come from the
+ configuration. Contrast with :term:`SoR`.
+
+ tmem
+ Xen Transcendent Memory
+ (http://en.wikipedia.org/wiki/Transcendent_memory). It is a
+ mechanism used by Xen to provide memory over-subscription.
watcher
- ``ganeti-watcher`` is a tool that should be run regularly from cron
- and takes care of restarting failed instances, restarting secondary
- DRBD devices, etc. For more details, see the man page
+ :command:`ganeti-watcher` is a tool that should be run regularly
+ from cron and takes care of restarting failed instances, restarting
+ secondary DRBD devices, etc. For more details, see the man page
:manpage:`ganeti-watcher(8)`.
+
.. vim: set textwidth=72 :
+.. Local Variables:
+.. mode: rst
+.. fill-column: 72
+.. End:
Called when the master IP is activated.
:directory: master-ip-turnup
-:env. vars: MASTER_NETDEV, MASTER_IP
+:env. vars: MASTER_NETDEV, MASTER_IP, MASTER_NETMASK, CLUSTER_IP_VERSION
:pre-execution: master node
:post-execution: master node
Called when the master IP is deactivated.
:directory: master-ip-turndown
-:env. vars: MASTER_NETDEV, MASTER_IP
+:env. vars: MASTER_NETDEV, MASTER_IP, MASTER_NETMASK, CLUSTER_IP_VERSION
:pre-execution: master node
:post-execution: master node
This is the list of variables which are specific to one or more
operations.
+CLUSTER_IP_VERSION
+ IP version of the master IP (4 or 6)
+
INSTANCE_NAME
The name of the instance which is the target of the operation.
MASTER_IP
The master IP
+MASTER_NETMASK
+ Netmask of the master IP
+
INSTANCE_TAGS
A space-delimited list of the instance's tags.
cluster, indexed by instance name; the contents are similar to the
instance definitions for the allocate mode, with the addition of:
- admin_up
+ admin_state
if this instance is set to run (but not the actual status of the
instance)
iallocator.rst
rapi.rst
move-instance.rst
+ ovfconverter.rst
devnotes.rst
news.rst
glossary.rst
--- /dev/null
+=============
+OVF converter
+=============
+
+Using ``ovfconverter`` from the ``tools`` directory, one can easily
+convert previously exported Ganeti instance into OVF package, supported
+by VMWare, VirtualBox and some other virtualization software. It is
+also possible to use instance exported from such a tool and convert it
+to Ganeti config file, used by ``gnt-backup import`` command.
+
+For the internal design of the converter and more detailed description,
+including listing of available command line options, please refer to
+:doc:`design-ovf-support`
+
+As the amount of Ganeti-specific details, that need to be provided in
+order to import an external instance, is rather large, we will present
+here some examples of importing instances from different sources.
+It is also worth noting that there are some limitations regarding
+support for different hardware.
+
+Limitations on import
+=====================
+
+Network
+-------
+Available modes for the network include ``bridged`` and ``routed``.
+There is no ``NIC`` mode, which is typically used e.g. by VirtualBox.
+For most usecases this should not be of any effect, since if
+``NetworkSection`` contains any networks which are not discovered as
+``bridged`` or ``routed``, the network mode is assigned automatically,
+using Ganeti's cluster defaults.
+
+Backend
+-------
+The only values that are taken into account regarding Virtual Hardware
+(described in ``VirtualHardwareSection`` of the ``.ovf`` file) are:
+
+- number of virtual CPUs
+- RAM memory
+- hard disks
+- networks
+
+Neither USB nor CD-ROM drive are used in Ganeti. We decided to simply
+ignore unused elements of this section, so their presence won't raise
+any warnings.
+
+Operating System
+----------------
+List of operating systems available on a cluster is viewable using
+``gnt-os list`` command. When importing from external source, providing
+OS type in a command line (``--os-type=...``) is **required**. This is
+because even if the type is given in OVF description, it is not detailed
+enough for Ganeti to know which os-specific scripts to use.
+Please note, that instance containing disks may only be imported using
+OS script that supports raw disk images.
+
+References
+----------
+Files listed in ``ovf:References`` section cannot be hyperlinks.
+
+
+Limitations on export
+=====================
+
+Disk content
+------------
+Most Ganeti instances do not contain grub. This results in some
+problems when importing to virtualization software that does expect it.
+Examples of such software include VirtualBox and VMWare.
+
+To avoid trouble, please install grub inside the instance before
+exporting it.
+
+
+Import to VirtualBox
+--------------------
+``format`` option should be set to ``vmdk`` in order for instance to be
+importable by VirtualBox.
+
+Tests using existing versions of VirtualBox (3.16) suggest, that
+VirtualBox does not support disk compression or OVA packaging. In future
+versions this might change.
+
+
+Import to VMWare
+----------------
+Importing Ganeti instance to VMWare was tested using ``ovftool``.
+
+``format`` option should be set to ``vmdk`` in order for instance to be
+importable by VMWare.
+
+Presence of Ganeti section does seem to cause some problems and
+therefore it is recommended to use ``--external`` option on export.
+
+Import of compressed disks generated by ovfconverter was impossible in
+current version of ``ovftool`` (2.1.0). This seems to be related to old
+``vmdk`` version. Since the conversion to ``vmdk`` format is done using
+``qemu-img``, it is possible and in fact expected, that future versions
+of the latter tool will resolve this problem.
+
+
+Import examples
+===============
+
+Ganeti's OVF
+------------
+If you are importing instance created using ``ovfconverter export`` --
+you most probably will not have to provide any additional information.
+In that case, the following is all you need (unless you wish to change
+some configuration options)::
+
+ ovfconverter import ganeti.ovf
+ [...]
+ gnt-instance import -n <node> <instance name>
+
+
+Virtualbox, VMWare and other external sources
+---------------------------------------------
+In case of importing from external source, you will most likely have to
+provide the following details:
+
+- ``os-type`` can be any operating system listed on ``gnt-os list``
+- ``name`` that has to be resolvable, as it will be used as instance
+ name (even if your external instance has a name, it most probably is
+ not resolvable to an IP address)
+
+These are not the only options, but the recommended ones. For the
+complete list of available options please refer to
+`Command Line description <design-ovf-support.rst>`
+
+Minimalistic but complete example of importing Virtualbox's OVF
+instance may look like::
+
+ ovfconverter virtualbox.ovf --os-type=lenny-image \
+ --name=xen.test.i1 --disk-template=diskless
+ [...]
+ gnt-instance import -n node1.xen xen.test.i1
+
+
+
+Export example
+==============
+
+Exporting instance into ``.ovf`` format is pretty streightforward and
+requires little - if any - explanation. The only compulsory detail is
+the required disk format, provided using the ``--format`` option.
+
+Export to another Ganeti instance
+---------------------------------
+If for some reason it is convenient for you to use ``ovfconverter`` to
+move instance between clusters (e.g. because of the disk compression),
+the complete example of export may look like this::
+
+ gnt-backup export -n node1.xen xen.test.i1
+ [...]
+ ovfconverter export --format=vmdk --ova \
+ /srv/ganeti/export/xen.i1.node1.xen/config.ini
+ [...]
+
+The result is then in
+``/srv/ganeti/export/xen.i1.node1.xen/xen.test.i1.ova``
+
+Export to Virtualbox/VMWare/other external tool
+-----------------------------------------------
+Typically, when exporting to external tool we do not want
+Ganeti-specific configuration to be saved. In that case, simply use the
+``--external`` option::
+
+ gnt-backup export -n node1.xen xen.test.i1
+ [...]
+ ovfconverter export --external --output-dir ~/ganeti-instance/ \
+ /srv/ganeti/export/xen.i1.node1.xen/config.ini
+
+
+Known issues
+============
+
+Conversion errors
+-----------------
+If you are encountering trouble when converting the disk, please ensure
+that you have newest ``qemu-img`` version.
+
+OVA and compression
+-------------------
+The compressed disks and OVA packaging do not work correctly in either
+VirtualBox (old version) or VMWare.
+
+VirtualBox (3.16 OSE) does not seem to support those two, so there is
+very little we can do about this.
+
+As for VMWare, the reason behind it not accepting compressed or packed
+instances created by ovfconverter seems to be related to the old vmdk
+version.
+
+Problems on newest VirtualBox
+-----------------------------
+In Oracle VM Virtualbox 4.0+ there seems to be a problem when importing
+any OVF instance created by ovfconverter. Reasons are again unknown,
+this will be investigated.
+
+Disk space
+----------
+The disk space requirements for both import and export are at the moment
+very large - we require free space up to about 3-4 times the size of
+disks. This will most likely be changed in future versions.
+
+
+.. vim: set textwidth=72 :
+.. Local Variables:
+.. mode: rst
+.. fill-column: 72
+.. End:
``/``
+++++
-The root resource.
-
-It supports the following commands: ``GET``.
-
-``GET``
-~~~~~~~
-
-Shows the list of mapped resources.
-
-Returns: a dictionary with 'name' and 'uri' keys for each of them.
+The root resource. Has no function, but for legacy reasons the ``GET``
+method is supported.
``/2``
++++++
-The ``/2`` resource, the root of the version 2 API.
-
-It supports the following commands: ``GET``.
-
-``GET``
-~~~~~~~
-
-Show the list of mapped resources.
-
-Returns: a dictionary with ``name`` and ``uri`` keys for each of them.
+Has no function, but for legacy reasons the ``GET`` method is supported.
``/2/info``
+++++++++++
Takes no parameters.
+``/2/instances/[instance_name]/recreate-disks``
++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Recreate disks of an instance. Supports the following commands:
+``POST``.
+
+``POST``
+~~~~~~~~
+
+Returns a job ID.
+
+Body parameters:
+
+.. opcode_params:: OP_INSTANCE_RECREATE_DISKS
+ :exclude: instance_name
+
+
``/2/instances/[instance_name]/disk/[disk_index]/grow``
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
Returned fields: :pyeval:`utils.CommaJoin(sorted(rlib2.N_FIELDS))`
+``/2/nodes/[node_name]/powercycle``
++++++++++++++++++++++++++++++++++++
+
+Powercycles a node. Supports the following commands: ``POST``.
+
+``POST``
+~~~~~~~~
+
+Returns a job ID.
+
+
``/2/nodes/[node_name]/evacuate``
+++++++++++++++++++++++++++++++++
debootstrap
node1#
-Running a burnin
-----------------
+Running a burn-in
+-----------------
Now that the cluster is created, it is time to check that the hardware
works correctly, that the hypervisor can actually create instances,
…
node1#
-You can see in the above what operations the burnin does. Ideally, the
-burnin log would proceed successfully through all the steps and end
+You can see in the above what operations the burn-in does. Ideally, the
+burn-in log would proceed successfully through all the steps and end
cleanly, without throwing errors.
Instance operations
Mon Oct 26 05:27:39 2009 - INFO: Readding a node, the offline/drained flags were reset
Mon Oct 26 05:27:39 2009 - INFO: Node will be a master candidate
-And is now working again::
+And it is now working again::
node1# gnt-node list
Node DTotal DFree MTotal MNode MFree Pinst Sinst
node2 1.3T 1.3T 32.0G 1.0G 30.4G 1 3
node3 1.3T 1.3T 32.0G 1.0G 30.4G 0 0
-.. note:: If you have the Ganeti has been built with the htools
+.. note:: If Ganeti has been built with the htools
component enabled, you can shuffle the instances around to have a
better use of the nodes.
-}
module Ganeti.HTools.CLI
- ( Options(..)
- , OptType
- , parseOpts
- , shTemplate
- , defaultLuxiSocket
- , maybePrintNodes
- , maybePrintInsts
- , maybeShowWarnings
- -- * The options
- , oDataFile
- , oDiskMoves
- , oDiskTemplate
- , oDynuFile
- , oEvacMode
- , oExInst
- , oExTags
- , oExecJobs
- , oGroup
- , oIDisk
- , oIMem
- , oIVcpus
- , oInstMoves
- , oLuxiSocket
- , oMachineReadable
- , oMaxCpu
- , oMaxSolLength
- , oMinDisk
- , oMinGain
- , oMinGainLim
- , oMinScore
- , oNoHeaders
- , oNodeSim
- , oOfflineNode
- , oOneline
- , oOutputDir
- , oPrintCommands
- , oPrintInsts
- , oPrintNodes
- , oQuiet
- , oRapiMaster
- , oReplay
- , oSaveCluster
- , oSelInst
- , oShowHelp
- , oShowVer
- , oTieredSpec
- , oVerbose
- ) where
+ ( Options(..)
+ , OptType
+ , parseOpts
+ , shTemplate
+ , defaultLuxiSocket
+ , maybePrintNodes
+ , maybePrintInsts
+ , maybeShowWarnings
+ , setNodeStatus
+ -- * The options
+ , oDataFile
+ , oDiskMoves
+ , oDiskTemplate
+ , oDynuFile
+ , oEvacMode
+ , oExInst
+ , oExTags
+ , oExecJobs
+ , oGroup
+ , oIDisk
+ , oIMem
+ , oIVcpus
+ , oInstMoves
+ , oLuxiSocket
+ , oMachineReadable
+ , oMaxCpu
+ , oMaxSolLength
+ , oMinDisk
+ , oMinGain
+ , oMinGainLim
+ , oMinScore
+ , oNoHeaders
+ , oNodeSim
+ , oOfflineNode
+ , oOutputDir
+ , oPrintCommands
+ , oPrintInsts
+ , oPrintNodes
+ , oQuiet
+ , oRapiMaster
+ , oReplay
+ , oSaveCluster
+ , oSelInst
+ , oShowHelp
+ , oShowVer
+ , oTieredSpec
+ , oVerbose
+ ) where
import Control.Monad
import Data.Maybe (fromMaybe)
import System.Console.GetOpt
import System.IO
import System.Info
-import System
-import Text.Printf (printf)
+import System.Exit
+import Text.Printf (printf, hPrintf)
import qualified Ganeti.HTools.Version as Version(version)
+import qualified Ganeti.HTools.Container as Container
+import qualified Ganeti.HTools.Node as Node
import qualified Ganeti.Constants as C
import Ganeti.HTools.Types
import Ganeti.HTools.Utils
+import Ganeti.HTools.Loader
-- * Constants
-- | Command line options structure.
data Options = Options
- { optDataFile :: Maybe FilePath -- ^ Path to the cluster data file
- , optDiskMoves :: Bool -- ^ Allow disk moves
- , optInstMoves :: Bool -- ^ Allow instance moves
- , optDiskTemplate :: DiskTemplate -- ^ The requested disk template
- , optDynuFile :: Maybe FilePath -- ^ Optional file with dynamic use data
- , optEvacMode :: Bool -- ^ Enable evacuation mode
- , optExInst :: [String] -- ^ Instances to be excluded
- , optExTags :: Maybe [String] -- ^ Tags to use for exclusion
- , optExecJobs :: Bool -- ^ Execute the commands via Luxi
- , optGroup :: Maybe GroupID -- ^ The UUID of the group to process
- , optSelInst :: [String] -- ^ Instances to be excluded
- , optISpec :: RSpec -- ^ Requested instance specs
- , optLuxi :: Maybe FilePath -- ^ Collect data from Luxi
- , optMachineReadable :: Bool -- ^ Output machine-readable format
- , optMaster :: String -- ^ Collect data from RAPI
- , optMaxLength :: Int -- ^ Stop after this many steps
- , optMcpu :: Double -- ^ Max cpu ratio for nodes
- , optMdsk :: Double -- ^ Max disk usage ratio for nodes
- , optMinGain :: Score -- ^ Min gain we aim for in a step
- , optMinGainLim :: Score -- ^ Limit below which we apply mingain
- , optMinScore :: Score -- ^ The minimum score we aim for
- , optNoHeaders :: Bool -- ^ Do not show a header line
- , optNodeSim :: [String] -- ^ Cluster simulation mode
- , optOffline :: [String] -- ^ Names of offline nodes
- , optOneline :: Bool -- ^ Switch output to a single line
- , optOutPath :: FilePath -- ^ Path to the output directory
- , optSaveCluster :: Maybe FilePath -- ^ Save cluster state to this file
- , optShowCmds :: Maybe FilePath -- ^ Whether to show the command list
- , optShowHelp :: Bool -- ^ Just show the help
- , optShowInsts :: Bool -- ^ Whether to show the instance map
- , optShowNodes :: Maybe [String] -- ^ Whether to show node status
- , optShowVer :: Bool -- ^ Just show the program version
- , optTieredSpec :: Maybe RSpec -- ^ Requested specs for tiered mode
- , optReplay :: Maybe String -- ^ Unittests: RNG state
- , optVerbose :: Int -- ^ Verbosity level
- } deriving Show
+ { optDataFile :: Maybe FilePath -- ^ Path to the cluster data file
+ , optDiskMoves :: Bool -- ^ Allow disk moves
+ , optInstMoves :: Bool -- ^ Allow instance moves
+ , optDiskTemplate :: DiskTemplate -- ^ The requested disk template
+ , optDynuFile :: Maybe FilePath -- ^ Optional file with dynamic use data
+ , optEvacMode :: Bool -- ^ Enable evacuation mode
+ , optExInst :: [String] -- ^ Instances to be excluded
+ , optExTags :: Maybe [String] -- ^ Tags to use for exclusion
+ , optExecJobs :: Bool -- ^ Execute the commands via Luxi
+ , optGroup :: Maybe GroupID -- ^ The UUID of the group to process
+ , optSelInst :: [String] -- ^ Instances to be excluded
+ , optISpec :: RSpec -- ^ Requested instance specs
+ , optLuxi :: Maybe FilePath -- ^ Collect data from Luxi
+ , optMachineReadable :: Bool -- ^ Output machine-readable format
+ , optMaster :: String -- ^ Collect data from RAPI
+ , optMaxLength :: Int -- ^ Stop after this many steps
+ , optMcpu :: Double -- ^ Max cpu ratio for nodes
+ , optMdsk :: Double -- ^ Max disk usage ratio for nodes
+ , optMinGain :: Score -- ^ Min gain we aim for in a step
+ , optMinGainLim :: Score -- ^ Limit below which we apply mingain
+ , optMinScore :: Score -- ^ The minimum score we aim for
+ , optNoHeaders :: Bool -- ^ Do not show a header line
+ , optNodeSim :: [String] -- ^ Cluster simulation mode
+ , optOffline :: [String] -- ^ Names of offline nodes
+ , optOutPath :: FilePath -- ^ Path to the output directory
+ , optSaveCluster :: Maybe FilePath -- ^ Save cluster state to this file
+ , optShowCmds :: Maybe FilePath -- ^ Whether to show the command list
+ , optShowHelp :: Bool -- ^ Just show the help
+ , optShowInsts :: Bool -- ^ Whether to show the instance map
+ , optShowNodes :: Maybe [String] -- ^ Whether to show node status
+ , optShowVer :: Bool -- ^ Just show the program version
+ , optTieredSpec :: Maybe RSpec -- ^ Requested specs for tiered mode
+ , optReplay :: Maybe String -- ^ Unittests: RNG state
+ , optVerbose :: Int -- ^ Verbosity level
+ } deriving Show
-- | Default values for the command line options.
defaultOptions :: Options
defaultOptions = Options
- { optDataFile = Nothing
- , optDiskMoves = True
- , optInstMoves = True
- , optDiskTemplate = DTDrbd8
- , optDynuFile = Nothing
- , optEvacMode = False
- , optExInst = []
- , optExTags = Nothing
- , optExecJobs = False
- , optGroup = Nothing
- , optSelInst = []
- , optISpec = RSpec 1 4096 102400
- , optLuxi = Nothing
- , optMachineReadable = False
- , optMaster = ""
- , optMaxLength = -1
- , optMcpu = defVcpuRatio
- , optMdsk = defReservedDiskRatio
- , optMinGain = 1e-2
- , optMinGainLim = 1e-1
- , optMinScore = 1e-9
- , optNoHeaders = False
- , optNodeSim = []
- , optOffline = []
- , optOneline = False
- , optOutPath = "."
- , optSaveCluster = Nothing
- , optShowCmds = Nothing
- , optShowHelp = False
- , optShowInsts = False
- , optShowNodes = Nothing
- , optShowVer = False
- , optTieredSpec = Nothing
- , optReplay = Nothing
- , optVerbose = 1
- }
+ { optDataFile = Nothing
+ , optDiskMoves = True
+ , optInstMoves = True
+ , optDiskTemplate = DTDrbd8
+ , optDynuFile = Nothing
+ , optEvacMode = False
+ , optExInst = []
+ , optExTags = Nothing
+ , optExecJobs = False
+ , optGroup = Nothing
+ , optSelInst = []
+ , optISpec = RSpec 1 4096 102400
+ , optLuxi = Nothing
+ , optMachineReadable = False
+ , optMaster = ""
+ , optMaxLength = -1
+ , optMcpu = defVcpuRatio
+ , optMdsk = defReservedDiskRatio
+ , optMinGain = 1e-2
+ , optMinGainLim = 1e-1
+ , optMinScore = 1e-9
+ , optNoHeaders = False
+ , optNodeSim = []
+ , optOffline = []
+ , optOutPath = "."
+ , optSaveCluster = Nothing
+ , optShowCmds = Nothing
+ , optShowHelp = False
+ , optShowInsts = False
+ , optShowNodes = Nothing
+ , optShowVer = False
+ , optTieredSpec = Nothing
+ , optReplay = Nothing
+ , optVerbose = 1
+ }
-- | Abrreviation for the option type.
type OptType = OptDescr (Options -> Result Options)
oDiskTemplate :: OptType
oDiskTemplate = Option "" ["disk-template"]
(ReqArg (\ t opts -> do
- dt <- dtFromString t
+ dt <- diskTemplateFromRaw t
return $ opts { optDiskTemplate = dt }) "TEMPLATE")
"select the desired disk template"
oMachineReadable :: OptType
oMachineReadable = Option "" ["machine-readable"]
- (OptArg (\ f opts -> do
+ (OptArg (\ f opts -> do
flag <- parseYesNo True f
return $ opts { optMachineReadable = flag }) "CHOICE")
"enable machine readable output (pass either 'yes' or 'no' to\
oMaxSolLength :: OptType
oMaxSolLength = Option "l" ["max-length"]
(ReqArg (\ i opts -> Ok opts { optMaxLength = read i }) "N")
- "cap the solution at this many moves (useful for very\
- \ unbalanced clusters)"
+ "cap the solution at this many balancing or allocation \
+ \ rounds (useful for very unbalanced clusters or empty \
+ \ clusters)"
oMinDisk :: OptType
oMinDisk = Option "" ["min-disk"]
(ReqArg (\ n o -> Ok o { optOffline = n:optOffline o }) "NODE")
"set node as offline"
-oOneline :: OptType
-oOneline = Option "o" ["oneline"]
- (NoArg (\ opts -> Ok opts { optOneline = True }))
- "print the ganeti command list for reaching the solution"
-
oOutputDir :: OptType
oOutputDir = Option "d" ["output-dir"]
(ReqArg (\ d opts -> Ok opts { optOutPath = d }) "PATH")
oPrintNodes :: OptType
oPrintNodes = Option "p" ["print-nodes"]
(OptArg ((\ f opts ->
- let (prefix, realf) = case f of
- '+':rest -> (["+"], rest)
- _ -> ([], f)
- splitted = prefix ++ sepSplit ',' realf
- in Ok opts { optShowNodes = Just splitted }) .
+ let (prefix, realf) = case f of
+ '+':rest -> (["+"], rest)
+ _ -> ([], f)
+ splitted = prefix ++ sepSplit ',' realf
+ in Ok opts { optShowNodes = Just splitted }) .
fromMaybe []) "FIELDS")
"print the final node list"
oTieredSpec :: OptType
oTieredSpec = Option "" ["tiered-alloc"]
(ReqArg (\ inp opts -> do
- let sp = sepSplit ',' inp
- prs <- mapM (\(fn, val) -> fn val) $
- zip [ annotateResult "tiered specs memory" .
- parseUnit
- , annotateResult "tiered specs disk" .
- parseUnit
- , tryRead "tiered specs cpus"
- ] sp
- tspec <-
- case prs of
- [dsk, ram, cpu] -> return $ RSpec cpu ram dsk
- _ -> Bad $ "Invalid specification: " ++ inp ++
- ", expected disk,ram,cpu"
- return $ opts { optTieredSpec = Just tspec } )
+ let sp = sepSplit ',' inp
+ prs <- mapM (\(fn, val) -> fn val) $
+ zip [ annotateResult "tiered specs memory" .
+ parseUnit
+ , annotateResult "tiered specs disk" .
+ parseUnit
+ , tryRead "tiered specs cpus"
+ ] sp
+ tspec <-
+ case prs of
+ [dsk, ram, cpu] -> return $ RSpec cpu ram dsk
+ _ -> Bad $ "Invalid specification: " ++ inp ++
+ ", expected disk,ram,cpu"
+ return $ opts { optTieredSpec = Just tspec } )
"TSPEC")
"enable tiered specs allocation, given as 'disk,ram,cpu'"
-- | Usage info.
usageHelp :: String -> [OptType] -> String
usageHelp progname =
- usageInfo (printf "%s %s\nUsage: %s [OPTION...]"
- progname Version.version progname)
+ usageInfo (printf "%s %s\nUsage: %s [OPTION...]"
+ progname Version.version progname)
-- | Command line parser, using the 'Options' structure.
parseOpts :: [String] -- ^ The command line arguments
-> IO (Options, [String]) -- ^ The resulting options and leftover
-- arguments
parseOpts argv progname options =
- case getOpt Permute options argv of
- (o, n, []) ->
- do
- let (pr, args) = (foldM (flip id) defaultOptions o, n)
- po <- (case pr of
- Bad msg -> do
- hPutStrLn stderr "Error while parsing command\
- \line arguments:"
- hPutStrLn stderr msg
- exitWith $ ExitFailure 1
- Ok val -> return val)
- when (optShowHelp po) $ do
- putStr $ usageHelp progname options
- exitWith ExitSuccess
- when (optShowVer po) $ do
- printf "%s %s\ncompiled with %s %s\nrunning on %s %s\n"
- progname Version.version
- compilerName (Data.Version.showVersion compilerVersion)
- os arch :: IO ()
- exitWith ExitSuccess
- return (po, args)
- (_, _, errs) -> do
- hPutStrLn stderr $ "Command line error: " ++ concat errs
- hPutStrLn stderr $ usageHelp progname options
- exitWith $ ExitFailure 2
+ case getOpt Permute options argv of
+ (o, n, []) ->
+ do
+ let (pr, args) = (foldM (flip id) defaultOptions o, n)
+ po <- case pr of
+ Bad msg -> do
+ hPutStrLn stderr "Error while parsing command\
+ \line arguments:"
+ hPutStrLn stderr msg
+ exitWith $ ExitFailure 1
+ Ok val -> return val
+ when (optShowHelp po) $ do
+ putStr $ usageHelp progname options
+ exitWith ExitSuccess
+ when (optShowVer po) $ do
+ printf "%s %s\ncompiled with %s %s\nrunning on %s %s\n"
+ progname Version.version
+ compilerName (Data.Version.showVersion compilerVersion)
+ os arch :: IO ()
+ exitWith ExitSuccess
+ return (po, args)
+ (_, _, errs) -> do
+ hPutStrLn stderr $ "Command line error: " ++ concat errs
+ hPutStrLn stderr $ usageHelp progname options
+ exitWith $ ExitFailure 2
-- | A shell script template for autogenerated scripts.
shTemplate :: String
shTemplate =
- printf "#!/bin/sh\n\n\
- \# Auto-generated script for executing cluster rebalancing\n\n\
- \# To stop, touch the file /tmp/stop-htools\n\n\
- \set -e\n\n\
- \check() {\n\
- \ if [ -f /tmp/stop-htools ]; then\n\
- \ echo 'Stop requested, exiting'\n\
- \ exit 0\n\
- \ fi\n\
- \}\n\n"
+ printf "#!/bin/sh\n\n\
+ \# Auto-generated script for executing cluster rebalancing\n\n\
+ \# To stop, touch the file /tmp/stop-htools\n\n\
+ \set -e\n\n\
+ \check() {\n\
+ \ if [ -f /tmp/stop-htools ]; then\n\
+ \ echo 'Stop requested, exiting'\n\
+ \ exit 0\n\
+ \ fi\n\
+ \}\n\n"
-- | Optionally print the node list.
maybePrintNodes :: Maybe [String] -- ^ The field list
unless (null fix_msgs) $ do
hPutStrLn stderr "Warning: cluster has inconsistent data:"
hPutStrLn stderr . unlines . map (printf " - %s") $ fix_msgs
+
+-- | Set node properties based on command line options.
+setNodeStatus :: Options -> Node.List -> IO Node.List
+setNodeStatus opts fixed_nl = do
+ let offline_passed = optOffline opts
+ all_nodes = Container.elems fixed_nl
+ offline_lkp = map (lookupName (map Node.name all_nodes)) offline_passed
+ offline_wrong = filter (not . goodLookupResult) offline_lkp
+ offline_names = map lrContent offline_lkp
+ offline_indices = map Node.idx $
+ filter (\n -> Node.name n `elem` offline_names)
+ all_nodes
+ m_cpu = optMcpu opts
+ m_dsk = optMdsk opts
+
+ unless (null offline_wrong) $ do
+ hPrintf stderr "Error: Wrong node name(s) set as offline: %s\n"
+ (commaJoin (map lrContent offline_wrong)) :: IO ()
+ exitWith $ ExitFailure 1
+
+ let nm = Container.map (\n -> if Node.idx n `elem` offline_indices
+ then Node.setOffline n True
+ else n) fixed_nl
+ nlf = Container.map (flip Node.setMdsk m_dsk . flip Node.setMcpu m_cpu)
+ nm
+ return nlf
-}
module Ganeti.HTools.Cluster
- (
- -- * Types
- AllocSolution(..)
- , EvacSolution(..)
- , Table(..)
- , CStats(..)
- , AllocStats
- -- * Generic functions
- , totalResources
- , computeAllocationDelta
- -- * First phase functions
- , computeBadItems
- -- * Second phase functions
- , printSolutionLine
- , formatCmds
- , involvedNodes
- , splitJobs
- -- * Display functions
- , printNodes
- , printInsts
- -- * Balacing functions
- , checkMove
- , doNextBalance
- , tryBalance
- , compCV
- , compCVNodes
- , compDetailedCV
- , printStats
- , iMoveToJob
- -- * IAllocator functions
- , genAllocNodes
- , tryAlloc
- , tryMGAlloc
- , tryReloc
- , tryNodeEvac
- , tryChangeGroup
- , collapseFailures
- -- * Allocation functions
- , iterateAlloc
- , tieredAlloc
- -- * Node group functions
- , instanceGroup
- , findSplitInstances
- , splitCluster
- ) where
+ (
+ -- * Types
+ AllocSolution(..)
+ , EvacSolution(..)
+ , Table(..)
+ , CStats(..)
+ , AllocStats
+ , AllocResult
+ , AllocMethod
+ -- * Generic functions
+ , totalResources
+ , computeAllocationDelta
+ -- * First phase functions
+ , computeBadItems
+ -- * Second phase functions
+ , printSolutionLine
+ , formatCmds
+ , involvedNodes
+ , splitJobs
+ -- * Display functions
+ , printNodes
+ , printInsts
+ -- * Balacing functions
+ , checkMove
+ , doNextBalance
+ , tryBalance
+ , compCV
+ , compCVNodes
+ , compDetailedCV
+ , printStats
+ , iMoveToJob
+ -- * IAllocator functions
+ , genAllocNodes
+ , tryAlloc
+ , tryMGAlloc
+ , tryReloc
+ , tryNodeEvac
+ , tryChangeGroup
+ , collapseFailures
+ -- * Allocation functions
+ , iterateAlloc
+ , tieredAlloc
+ -- * Node group functions
+ , instanceGroup
+ , findSplitInstances
+ , splitCluster
+ ) where
import qualified Data.IntSet as IntSet
import Data.List
-import Data.Maybe (fromJust)
+import Data.Maybe (fromJust, isNothing)
import Data.Ord (comparing)
import Text.Printf (printf)
-import Control.Monad
import qualified Ganeti.HTools.Container as Container
import qualified Ganeti.HTools.Instance as Instance
-- | Allocation\/relocation solution.
data AllocSolution = AllocSolution
- { asFailures :: [FailMode] -- ^ Failure counts
- , asAllocs :: Int -- ^ Good allocation count
- , asSolutions :: [Node.AllocElement] -- ^ The actual result, length
- -- of the list depends on the
- -- allocation/relocation mode
- , asLog :: [String] -- ^ A list of informational messages
+ { asFailures :: [FailMode] -- ^ Failure counts
+ , asAllocs :: Int -- ^ Good allocation count
+ , asSolution :: Maybe Node.AllocElement -- ^ The actual allocation result
+ , asLog :: [String] -- ^ Informational messages
}
-- | Node evacuation/group change iallocator result type. This result
-- type consists of actual opcodes (a restricted subset) that are
-- transmitted back to Ganeti.
data EvacSolution = EvacSolution
- { esMoved :: [(Idx, Gdx, [Ndx])] -- ^ Instances moved successfully
- , esFailed :: [(Idx, String)] -- ^ Instances which were not
- -- relocated
- , esOpCodes :: [[OpCodes.OpCode]] -- ^ List of jobs
- }
+ { esMoved :: [(Idx, Gdx, [Ndx])] -- ^ Instances moved successfully
+ , esFailed :: [(Idx, String)] -- ^ Instances which were not
+ -- relocated
+ , esOpCodes :: [[OpCodes.OpCode]] -- ^ List of jobs
+ }
-- | Allocation results, as used in 'iterateAlloc' and 'tieredAlloc'.
type AllocResult = (FailStats, Node.List, Instance.List,
-- | A type denoting the valid allocation mode/pairs.
--
--- For a one-node allocation, this will be a @Left ['Node.Node']@,
--- whereas for a two-node allocation, this will be a @Right
--- [('Node.Node', 'Node.Node')]@.
-type AllocNodes = Either [Ndx] [(Ndx, Ndx)]
+-- For a one-node allocation, this will be a @Left ['Ndx']@, whereas
+-- for a two-node allocation, this will be a @Right [('Ndx',
+-- ['Ndx'])]@. In the latter case, the list is basically an
+-- association list, grouped by primary node and holding the potential
+-- secondary nodes in the sub-list.
+type AllocNodes = Either [Ndx] [(Ndx, [Ndx])]
-- | The empty solution we start with when computing allocations.
emptyAllocSolution :: AllocSolution
emptyAllocSolution = AllocSolution { asFailures = [], asAllocs = 0
- , asSolutions = [], asLog = [] }
+ , asSolution = Nothing, asLog = [] }
-- | The empty evac solution.
emptyEvacSolution :: EvacSolution
-- | Currently used, possibly to allocate, unallocable.
type AllocStats = (RSpec, RSpec, RSpec)
+-- | A simple type for allocation functions.
+type AllocMethod = Node.List -- ^ Node list
+ -> Instance.List -- ^ Instance list
+ -> Maybe Int -- ^ Optional allocation limit
+ -> Instance.Instance -- ^ Instance spec for allocation
+ -> AllocNodes -- ^ Which nodes we should allocate on
+ -> [Instance.Instance] -- ^ Allocated instances
+ -> [CStats] -- ^ Running cluster stats
+ -> Result AllocResult -- ^ Allocation result
+
-- * Utility functions
-- | Verifies the N+1 status and return the affected nodes.
in
(bad_nodes, bad_instances)
+-- | Extracts the node pairs for an instance. This can fail if the
+-- instance is single-homed. FIXME: this needs to be improved,
+-- together with the general enhancement for handling non-DRBD moves.
+instanceNodes :: Node.List -> Instance.Instance ->
+ (Ndx, Ndx, Node.Node, Node.Node)
+instanceNodes nl inst =
+ let old_pdx = Instance.pNode inst
+ old_sdx = Instance.sNode inst
+ old_p = Container.find old_pdx nl
+ old_s = Container.find old_sdx nl
+ in (old_pdx, old_sdx, old_p, old_s)
+
-- | Zero-initializer for the CStats type.
emptyCStats :: CStats
emptyCStats = CStats 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-- | Update stats with data from a new node.
updateCStats :: CStats -> Node.Node -> CStats
updateCStats cs node =
- let CStats { csFmem = x_fmem, csFdsk = x_fdsk,
- csAmem = x_amem, csAcpu = x_acpu, csAdsk = x_adsk,
- csMmem = x_mmem, csMdsk = x_mdsk, csMcpu = x_mcpu,
- csImem = x_imem, csIdsk = x_idsk, csIcpu = x_icpu,
- csTmem = x_tmem, csTdsk = x_tdsk, csTcpu = x_tcpu,
- csVcpu = x_vcpu,
- csXmem = x_xmem, csNmem = x_nmem, csNinst = x_ninst
- }
- = cs
- inc_amem = Node.fMem node - Node.rMem node
- inc_amem' = if inc_amem > 0 then inc_amem else 0
- inc_adsk = Node.availDisk node
- inc_imem = truncate (Node.tMem node) - Node.nMem node
- - Node.xMem node - Node.fMem node
- inc_icpu = Node.uCpu node
- inc_idsk = truncate (Node.tDsk node) - Node.fDsk node
- inc_vcpu = Node.hiCpu node
- inc_acpu = Node.availCpu node
-
- in cs { csFmem = x_fmem + fromIntegral (Node.fMem node)
- , csFdsk = x_fdsk + fromIntegral (Node.fDsk node)
- , csAmem = x_amem + fromIntegral inc_amem'
- , csAdsk = x_adsk + fromIntegral inc_adsk
- , csAcpu = x_acpu + fromIntegral inc_acpu
- , csMmem = max x_mmem (fromIntegral inc_amem')
- , csMdsk = max x_mdsk (fromIntegral inc_adsk)
- , csMcpu = max x_mcpu (fromIntegral inc_acpu)
- , csImem = x_imem + fromIntegral inc_imem
- , csIdsk = x_idsk + fromIntegral inc_idsk
- , csIcpu = x_icpu + fromIntegral inc_icpu
- , csTmem = x_tmem + Node.tMem node
- , csTdsk = x_tdsk + Node.tDsk node
- , csTcpu = x_tcpu + Node.tCpu node
- , csVcpu = x_vcpu + fromIntegral inc_vcpu
- , csXmem = x_xmem + fromIntegral (Node.xMem node)
- , csNmem = x_nmem + fromIntegral (Node.nMem node)
- , csNinst = x_ninst + length (Node.pList node)
- }
+ let CStats { csFmem = x_fmem, csFdsk = x_fdsk,
+ csAmem = x_amem, csAcpu = x_acpu, csAdsk = x_adsk,
+ csMmem = x_mmem, csMdsk = x_mdsk, csMcpu = x_mcpu,
+ csImem = x_imem, csIdsk = x_idsk, csIcpu = x_icpu,
+ csTmem = x_tmem, csTdsk = x_tdsk, csTcpu = x_tcpu,
+ csVcpu = x_vcpu,
+ csXmem = x_xmem, csNmem = x_nmem, csNinst = x_ninst
+ }
+ = cs
+ inc_amem = Node.fMem node - Node.rMem node
+ inc_amem' = if inc_amem > 0 then inc_amem else 0
+ inc_adsk = Node.availDisk node
+ inc_imem = truncate (Node.tMem node) - Node.nMem node
+ - Node.xMem node - Node.fMem node
+ inc_icpu = Node.uCpu node
+ inc_idsk = truncate (Node.tDsk node) - Node.fDsk node
+ inc_vcpu = Node.hiCpu node
+ inc_acpu = Node.availCpu node
+ in cs { csFmem = x_fmem + fromIntegral (Node.fMem node)
+ , csFdsk = x_fdsk + fromIntegral (Node.fDsk node)
+ , csAmem = x_amem + fromIntegral inc_amem'
+ , csAdsk = x_adsk + fromIntegral inc_adsk
+ , csAcpu = x_acpu + fromIntegral inc_acpu
+ , csMmem = max x_mmem (fromIntegral inc_amem')
+ , csMdsk = max x_mdsk (fromIntegral inc_adsk)
+ , csMcpu = max x_mcpu (fromIntegral inc_acpu)
+ , csImem = x_imem + fromIntegral inc_imem
+ , csIdsk = x_idsk + fromIntegral inc_idsk
+ , csIcpu = x_icpu + fromIntegral inc_icpu
+ , csTmem = x_tmem + Node.tMem node
+ , csTdsk = x_tdsk + Node.tDsk node
+ , csTcpu = x_tcpu + Node.tCpu node
+ , csVcpu = x_vcpu + fromIntegral inc_vcpu
+ , csXmem = x_xmem + fromIntegral (Node.xMem node)
+ , csNmem = x_nmem + fromIntegral (Node.nMem node)
+ , csNinst = x_ninst + length (Node.pList node)
+ }
-- | Compute the total free disk and memory in the cluster.
totalResources :: Node.List -> CStats
totalResources nl =
- let cs = foldl' updateCStats emptyCStats . Container.elems $ nl
- in cs { csScore = compCV nl }
+ let cs = foldl' updateCStats emptyCStats . Container.elems $ nl
+ in cs { csScore = compCV nl }
-- | Compute the delta between two cluster state.
--
-- was left unallocated.
computeAllocationDelta :: CStats -> CStats -> AllocStats
computeAllocationDelta cini cfin =
- let CStats {csImem = i_imem, csIdsk = i_idsk, csIcpu = i_icpu} = cini
- CStats {csImem = f_imem, csIdsk = f_idsk, csIcpu = f_icpu,
- csTmem = t_mem, csTdsk = t_dsk, csVcpu = v_cpu } = cfin
- rini = RSpec (fromIntegral i_icpu) (fromIntegral i_imem)
- (fromIntegral i_idsk)
- rfin = RSpec (fromIntegral (f_icpu - i_icpu))
- (fromIntegral (f_imem - i_imem))
- (fromIntegral (f_idsk - i_idsk))
- un_cpu = fromIntegral (v_cpu - f_icpu)::Int
- runa = RSpec un_cpu (truncate t_mem - fromIntegral f_imem)
- (truncate t_dsk - fromIntegral f_idsk)
- in (rini, rfin, runa)
+ let CStats {csImem = i_imem, csIdsk = i_idsk, csIcpu = i_icpu} = cini
+ CStats {csImem = f_imem, csIdsk = f_idsk, csIcpu = f_icpu,
+ csTmem = t_mem, csTdsk = t_dsk, csVcpu = v_cpu } = cfin
+ rini = RSpec (fromIntegral i_icpu) (fromIntegral i_imem)
+ (fromIntegral i_idsk)
+ rfin = RSpec (fromIntegral (f_icpu - i_icpu))
+ (fromIntegral (f_imem - i_imem))
+ (fromIntegral (f_idsk - i_idsk))
+ un_cpu = fromIntegral (v_cpu - f_icpu)::Int
+ runa = RSpec un_cpu (truncate t_mem - fromIntegral f_imem)
+ (truncate t_dsk - fromIntegral f_idsk)
+ in (rini, rfin, runa)
-- | The names and weights of the individual elements in the CV list.
detailedCVInfo :: [(Double, String)]
-- | Compute the mem and disk covariance.
compDetailedCV :: [Node.Node] -> [Double]
compDetailedCV all_nodes =
- let
- (offline, nodes) = partition Node.offline all_nodes
- mem_l = map Node.pMem nodes
- dsk_l = map Node.pDsk nodes
- -- metric: memory covariance
- mem_cv = stdDev mem_l
- -- metric: disk covariance
- dsk_cv = stdDev dsk_l
- -- metric: count of instances living on N1 failing nodes
- n1_score = fromIntegral . sum . map (\n -> length (Node.sList n) +
- length (Node.pList n)) .
- filter Node.failN1 $ nodes :: Double
- res_l = map Node.pRem nodes
- -- metric: reserved memory covariance
- res_cv = stdDev res_l
- -- offline instances metrics
- offline_ipri = sum . map (length . Node.pList) $ offline
- offline_isec = sum . map (length . Node.sList) $ offline
- -- metric: count of instances on offline nodes
- off_score = fromIntegral (offline_ipri + offline_isec)::Double
- -- metric: count of primary instances on offline nodes (this
- -- helps with evacuation/failover of primary instances on
- -- 2-node clusters with one node offline)
- off_pri_score = fromIntegral offline_ipri::Double
- cpu_l = map Node.pCpu nodes
- -- metric: covariance of vcpu/pcpu ratio
- cpu_cv = stdDev cpu_l
- -- metrics: covariance of cpu, memory, disk and network load
- (c_load, m_load, d_load, n_load) = unzip4 $
- map (\n ->
- let DynUtil c1 m1 d1 n1 = Node.utilLoad n
- DynUtil c2 m2 d2 n2 = Node.utilPool n
- in (c1/c2, m1/m2, d1/d2, n1/n2)
- ) nodes
- -- metric: conflicting instance count
- pri_tags_inst = sum $ map Node.conflictingPrimaries nodes
- pri_tags_score = fromIntegral pri_tags_inst::Double
- in [ mem_cv, dsk_cv, n1_score, res_cv, off_score, off_pri_score, cpu_cv
- , stdDev c_load, stdDev m_load , stdDev d_load, stdDev n_load
- , pri_tags_score ]
+ let (offline, nodes) = partition Node.offline all_nodes
+ mem_l = map Node.pMem nodes
+ dsk_l = map Node.pDsk nodes
+ -- metric: memory covariance
+ mem_cv = stdDev mem_l
+ -- metric: disk covariance
+ dsk_cv = stdDev dsk_l
+ -- metric: count of instances living on N1 failing nodes
+ n1_score = fromIntegral . sum . map (\n -> length (Node.sList n) +
+ length (Node.pList n)) .
+ filter Node.failN1 $ nodes :: Double
+ res_l = map Node.pRem nodes
+ -- metric: reserved memory covariance
+ res_cv = stdDev res_l
+ -- offline instances metrics
+ offline_ipri = sum . map (length . Node.pList) $ offline
+ offline_isec = sum . map (length . Node.sList) $ offline
+ -- metric: count of instances on offline nodes
+ off_score = fromIntegral (offline_ipri + offline_isec)::Double
+ -- metric: count of primary instances on offline nodes (this
+ -- helps with evacuation/failover of primary instances on
+ -- 2-node clusters with one node offline)
+ off_pri_score = fromIntegral offline_ipri::Double
+ cpu_l = map Node.pCpu nodes
+ -- metric: covariance of vcpu/pcpu ratio
+ cpu_cv = stdDev cpu_l
+ -- metrics: covariance of cpu, memory, disk and network load
+ (c_load, m_load, d_load, n_load) =
+ unzip4 $ map (\n ->
+ let DynUtil c1 m1 d1 n1 = Node.utilLoad n
+ DynUtil c2 m2 d2 n2 = Node.utilPool n
+ in (c1/c2, m1/m2, d1/d2, n1/n2)) nodes
+ -- metric: conflicting instance count
+ pri_tags_inst = sum $ map Node.conflictingPrimaries nodes
+ pri_tags_score = fromIntegral pri_tags_inst::Double
+ in [ mem_cv, dsk_cv, n1_score, res_cv, off_score, off_pri_score, cpu_cv
+ , stdDev c_load, stdDev m_load , stdDev d_load, stdDev n_load
+ , pri_tags_score ]
-- | Compute the /total/ variance.
compCVNodes :: [Node.Node] -> Double
-- | Compute best table. Note that the ordering of the arguments is important.
compareTables :: Table -> Table -> Table
compareTables a@(Table _ _ a_cv _) b@(Table _ _ b_cv _ ) =
- if a_cv > b_cv then b else a
+ if a_cv > b_cv then b else a
-- | Applies an instance move to a given node list and instance.
applyMove :: Node.List -> Instance.Instance
-> IMove -> OpResult (Node.List, Instance.Instance, Ndx, Ndx)
-- Failover (f)
applyMove nl inst Failover =
- let old_pdx = Instance.pNode inst
- old_sdx = Instance.sNode inst
- old_p = Container.find old_pdx nl
- old_s = Container.find old_sdx nl
- int_p = Node.removePri old_p inst
- int_s = Node.removeSec old_s inst
- force_p = Node.offline old_p
- new_nl = do -- Maybe monad
- new_p <- Node.addPriEx force_p int_s inst
- new_s <- Node.addSec int_p inst old_sdx
- let new_inst = Instance.setBoth inst old_sdx old_pdx
- return (Container.addTwo old_pdx new_s old_sdx new_p nl,
- new_inst, old_sdx, old_pdx)
- in new_nl
+ let (old_pdx, old_sdx, old_p, old_s) = instanceNodes nl inst
+ int_p = Node.removePri old_p inst
+ int_s = Node.removeSec old_s inst
+ new_nl = do -- Maybe monad
+ new_p <- Node.addPriEx (Node.offline old_p) int_s inst
+ new_s <- Node.addSec int_p inst old_sdx
+ let new_inst = Instance.setBoth inst old_sdx old_pdx
+ return (Container.addTwo old_pdx new_s old_sdx new_p nl,
+ new_inst, old_sdx, old_pdx)
+ in new_nl
-- Replace the primary (f:, r:np, f)
applyMove nl inst (ReplacePrimary new_pdx) =
- let old_pdx = Instance.pNode inst
- old_sdx = Instance.sNode inst
- old_p = Container.find old_pdx nl
- old_s = Container.find old_sdx nl
- tgt_n = Container.find new_pdx nl
- int_p = Node.removePri old_p inst
- int_s = Node.removeSec old_s inst
- force_p = Node.offline old_p
- new_nl = do -- Maybe monad
- -- check that the current secondary can host the instance
- -- during the migration
- tmp_s <- Node.addPriEx force_p int_s inst
- let tmp_s' = Node.removePri tmp_s inst
- new_p <- Node.addPriEx force_p tgt_n inst
- new_s <- Node.addSecEx force_p tmp_s' inst new_pdx
- let new_inst = Instance.setPri inst new_pdx
- return (Container.add new_pdx new_p $
- Container.addTwo old_pdx int_p old_sdx new_s nl,
- new_inst, new_pdx, old_sdx)
- in new_nl
+ let (old_pdx, old_sdx, old_p, old_s) = instanceNodes nl inst
+ tgt_n = Container.find new_pdx nl
+ int_p = Node.removePri old_p inst
+ int_s = Node.removeSec old_s inst
+ force_p = Node.offline old_p
+ new_nl = do -- Maybe monad
+ -- check that the current secondary can host the instance
+ -- during the migration
+ tmp_s <- Node.addPriEx force_p int_s inst
+ let tmp_s' = Node.removePri tmp_s inst
+ new_p <- Node.addPriEx force_p tgt_n inst
+ new_s <- Node.addSecEx force_p tmp_s' inst new_pdx
+ let new_inst = Instance.setPri inst new_pdx
+ return (Container.add new_pdx new_p $
+ Container.addTwo old_pdx int_p old_sdx new_s nl,
+ new_inst, new_pdx, old_sdx)
+ in new_nl
-- Replace the secondary (r:ns)
applyMove nl inst (ReplaceSecondary new_sdx) =
- let old_pdx = Instance.pNode inst
- old_sdx = Instance.sNode inst
- old_s = Container.find old_sdx nl
- tgt_n = Container.find new_sdx nl
- int_s = Node.removeSec old_s inst
- force_s = Node.offline old_s
- new_inst = Instance.setSec inst new_sdx
- new_nl = Node.addSecEx force_s tgt_n inst old_pdx >>=
- \new_s -> return (Container.addTwo new_sdx
- new_s old_sdx int_s nl,
- new_inst, old_pdx, new_sdx)
- in new_nl
+ let old_pdx = Instance.pNode inst
+ old_sdx = Instance.sNode inst
+ old_s = Container.find old_sdx nl
+ tgt_n = Container.find new_sdx nl
+ int_s = Node.removeSec old_s inst
+ force_s = Node.offline old_s
+ new_inst = Instance.setSec inst new_sdx
+ new_nl = Node.addSecEx force_s tgt_n inst old_pdx >>=
+ \new_s -> return (Container.addTwo new_sdx
+ new_s old_sdx int_s nl,
+ new_inst, old_pdx, new_sdx)
+ in new_nl
-- Replace the secondary and failover (r:np, f)
applyMove nl inst (ReplaceAndFailover new_pdx) =
- let old_pdx = Instance.pNode inst
- old_sdx = Instance.sNode inst
- old_p = Container.find old_pdx nl
- old_s = Container.find old_sdx nl
- tgt_n = Container.find new_pdx nl
- int_p = Node.removePri old_p inst
- int_s = Node.removeSec old_s inst
- force_s = Node.offline old_s
- new_nl = do -- Maybe monad
- new_p <- Node.addPri tgt_n inst
- new_s <- Node.addSecEx force_s int_p inst new_pdx
- let new_inst = Instance.setBoth inst new_pdx old_pdx
- return (Container.add new_pdx new_p $
- Container.addTwo old_pdx new_s old_sdx int_s nl,
- new_inst, new_pdx, old_pdx)
- in new_nl
+ let (old_pdx, old_sdx, old_p, old_s) = instanceNodes nl inst
+ tgt_n = Container.find new_pdx nl
+ int_p = Node.removePri old_p inst
+ int_s = Node.removeSec old_s inst
+ force_s = Node.offline old_s
+ new_nl = do -- Maybe monad
+ new_p <- Node.addPri tgt_n inst
+ new_s <- Node.addSecEx force_s int_p inst new_pdx
+ let new_inst = Instance.setBoth inst new_pdx old_pdx
+ return (Container.add new_pdx new_p $
+ Container.addTwo old_pdx new_s old_sdx int_s nl,
+ new_inst, new_pdx, old_pdx)
+ in new_nl
-- Failver and replace the secondary (f, r:ns)
applyMove nl inst (FailoverAndReplace new_sdx) =
- let old_pdx = Instance.pNode inst
- old_sdx = Instance.sNode inst
- old_p = Container.find old_pdx nl
- old_s = Container.find old_sdx nl
- tgt_n = Container.find new_sdx nl
- int_p = Node.removePri old_p inst
- int_s = Node.removeSec old_s inst
- force_p = Node.offline old_p
- new_nl = do -- Maybe monad
- new_p <- Node.addPriEx force_p int_s inst
- new_s <- Node.addSecEx force_p tgt_n inst old_sdx
- let new_inst = Instance.setBoth inst old_sdx new_sdx
- return (Container.add new_sdx new_s $
- Container.addTwo old_sdx new_p old_pdx int_p nl,
- new_inst, old_sdx, new_sdx)
- in new_nl
+ let (old_pdx, old_sdx, old_p, old_s) = instanceNodes nl inst
+ tgt_n = Container.find new_sdx nl
+ int_p = Node.removePri old_p inst
+ int_s = Node.removeSec old_s inst
+ force_p = Node.offline old_p
+ new_nl = do -- Maybe monad
+ new_p <- Node.addPriEx force_p int_s inst
+ new_s <- Node.addSecEx force_p tgt_n inst old_sdx
+ let new_inst = Instance.setBoth inst old_sdx new_sdx
+ return (Container.add new_sdx new_s $
+ Container.addTwo old_sdx new_p old_pdx int_p nl,
+ new_inst, old_sdx, new_sdx)
+ in new_nl
-- | Tries to allocate an instance on one given node.
allocateOnSingle :: Node.List -> Instance.Instance -> Ndx
-> OpResult Node.AllocElement
allocateOnSingle nl inst new_pdx =
- let p = Container.find new_pdx nl
- new_inst = Instance.setBoth inst new_pdx Node.noSecondary
- in Node.addPri p inst >>= \new_p -> do
- let new_nl = Container.add new_pdx new_p nl
- new_score = compCV nl
- return (new_nl, new_inst, [new_p], new_score)
+ let p = Container.find new_pdx nl
+ new_inst = Instance.setBoth inst new_pdx Node.noSecondary
+ in Node.addPri p inst >>= \new_p -> do
+ let new_nl = Container.add new_pdx new_p nl
+ new_score = compCV nl
+ return (new_nl, new_inst, [new_p], new_score)
-- | Tries to allocate an instance on a given pair of nodes.
allocateOnPair :: Node.List -> Instance.Instance -> Ndx -> Ndx
-> OpResult Node.AllocElement
allocateOnPair nl inst new_pdx new_sdx =
- let tgt_p = Container.find new_pdx nl
- tgt_s = Container.find new_sdx nl
- in do
- new_p <- Node.addPri tgt_p inst
- new_s <- Node.addSec tgt_s inst new_pdx
- let new_inst = Instance.setBoth inst new_pdx new_sdx
- new_nl = Container.addTwo new_pdx new_p new_sdx new_s nl
- return (new_nl, new_inst, [new_p, new_s], compCV new_nl)
+ let tgt_p = Container.find new_pdx nl
+ tgt_s = Container.find new_sdx nl
+ in do
+ new_p <- Node.addPri tgt_p inst
+ new_s <- Node.addSec tgt_s inst new_pdx
+ let new_inst = Instance.setBoth inst new_pdx new_sdx
+ new_nl = Container.addTwo new_pdx new_p new_sdx new_s nl
+ return (new_nl, new_inst, [new_p, new_s], compCV new_nl)
-- | Tries to perform an instance move and returns the best table
-- between the original one and the new one.
-> IMove -- ^ The move to apply
-> Table -- ^ The final best table
checkSingleStep ini_tbl target cur_tbl move =
- let
- Table ini_nl ini_il _ ini_plc = ini_tbl
- tmp_resu = applyMove ini_nl target move
- in
- case tmp_resu of
- OpFail _ -> cur_tbl
- OpGood (upd_nl, new_inst, pri_idx, sec_idx) ->
- let tgt_idx = Instance.idx target
- upd_cvar = compCV upd_nl
- upd_il = Container.add tgt_idx new_inst ini_il
- upd_plc = (tgt_idx, pri_idx, sec_idx, move, upd_cvar):ini_plc
- upd_tbl = Table upd_nl upd_il upd_cvar upd_plc
- in
- compareTables cur_tbl upd_tbl
+ let Table ini_nl ini_il _ ini_plc = ini_tbl
+ tmp_resu = applyMove ini_nl target move
+ in case tmp_resu of
+ OpFail _ -> cur_tbl
+ OpGood (upd_nl, new_inst, pri_idx, sec_idx) ->
+ let tgt_idx = Instance.idx target
+ upd_cvar = compCV upd_nl
+ upd_il = Container.add tgt_idx new_inst ini_il
+ upd_plc = (tgt_idx, pri_idx, sec_idx, move, upd_cvar):ini_plc
+ upd_tbl = Table upd_nl upd_il upd_cvar upd_plc
+ in compareTables cur_tbl upd_tbl
-- | Given the status of the current secondary as a valid new node and
-- the current candidate target node, generate the possible moves for
-> [IMove] -- ^ List of valid result moves
possibleMoves _ False tdx =
- [ReplaceSecondary tdx]
+ [ReplaceSecondary tdx]
possibleMoves True True tdx =
- [ReplaceSecondary tdx,
- ReplaceAndFailover tdx,
- ReplacePrimary tdx,
- FailoverAndReplace tdx]
+ [ ReplaceSecondary tdx
+ , ReplaceAndFailover tdx
+ , ReplacePrimary tdx
+ , FailoverAndReplace tdx
+ ]
possibleMoves False True tdx =
- [ReplaceSecondary tdx,
- ReplaceAndFailover tdx]
+ [ ReplaceSecondary tdx
+ , ReplaceAndFailover tdx
+ ]
-- | Compute the best move for a given instance.
checkInstanceMove :: [Ndx] -- ^ Allowed target node indices
-> Instance.Instance -- ^ Instance to move
-> Table -- ^ Best new table for this instance
checkInstanceMove nodes_idx disk_moves inst_moves ini_tbl target =
- let
- opdx = Instance.pNode target
- osdx = Instance.sNode target
- nodes = filter (\idx -> idx /= opdx && idx /= osdx) nodes_idx
- use_secondary = elem osdx nodes_idx && inst_moves
- aft_failover = if use_secondary -- if allowed to failover
+ let opdx = Instance.pNode target
+ osdx = Instance.sNode target
+ bad_nodes = [opdx, osdx]
+ nodes = filter (`notElem` bad_nodes) nodes_idx
+ use_secondary = elem osdx nodes_idx && inst_moves
+ aft_failover = if use_secondary -- if allowed to failover
then checkSingleStep ini_tbl target ini_tbl Failover
else ini_tbl
- all_moves = if disk_moves
+ all_moves = if disk_moves
then concatMap
- (possibleMoves use_secondary inst_moves) nodes
+ (possibleMoves use_secondary inst_moves) nodes
else []
in
-- iterate over the possible nodes for this instance
-> [Instance.Instance] -- ^ List of instances still to move
-> Table -- ^ The new solution
checkMove nodes_idx disk_moves inst_moves ini_tbl victims =
- let Table _ _ _ ini_plc = ini_tbl
- -- we're using rwhnf from the Control.Parallel.Strategies
- -- package; we don't need to use rnf as that would force too
- -- much evaluation in single-threaded cases, and in
- -- multi-threaded case the weak head normal form is enough to
- -- spark the evaluation
- tables = parMap rwhnf (checkInstanceMove nodes_idx disk_moves
- inst_moves ini_tbl)
- victims
- -- iterate over all instances, computing the best move
- best_tbl = foldl' compareTables ini_tbl tables
- Table _ _ _ best_plc = best_tbl
- in if length best_plc == length ini_plc
+ let Table _ _ _ ini_plc = ini_tbl
+ -- we're using rwhnf from the Control.Parallel.Strategies
+ -- package; we don't need to use rnf as that would force too
+ -- much evaluation in single-threaded cases, and in
+ -- multi-threaded case the weak head normal form is enough to
+ -- spark the evaluation
+ tables = parMap rwhnf (checkInstanceMove nodes_idx disk_moves
+ inst_moves ini_tbl)
+ victims
+ -- iterate over all instances, computing the best move
+ best_tbl = foldl' compareTables ini_tbl tables
+ Table _ _ _ best_plc = best_tbl
+ in if length best_plc == length ini_plc
then ini_tbl -- no advancement
else best_tbl
-> Score -- ^ Score at which to stop
-> Bool -- ^ The resulting table and commands
doNextBalance ini_tbl max_rounds min_score =
- let Table _ _ ini_cv ini_plc = ini_tbl
- ini_plc_len = length ini_plc
- in (max_rounds < 0 || ini_plc_len < max_rounds) && ini_cv > min_score
+ let Table _ _ ini_cv ini_plc = ini_tbl
+ ini_plc_len = length ini_plc
+ in (max_rounds < 0 || ini_plc_len < max_rounds) && ini_cv > min_score
-- | Run a balance move.
tryBalance :: Table -- ^ The starting table
map (\k -> (k, foldl' (\a e -> if e == k then a + 1 else a) 0 flst))
[minBound..maxBound]
+-- | Compares two Maybe AllocElement and chooses the besst score.
+bestAllocElement :: Maybe Node.AllocElement
+ -> Maybe Node.AllocElement
+ -> Maybe Node.AllocElement
+bestAllocElement a Nothing = a
+bestAllocElement Nothing b = b
+bestAllocElement a@(Just (_, _, _, ascore)) b@(Just (_, _, _, bscore)) =
+ if ascore < bscore then a else b
+
-- | Update current Allocation solution and failure stats with new
-- elements.
concatAllocs :: AllocSolution -> OpResult Node.AllocElement -> AllocSolution
concatAllocs as (OpFail reason) = as { asFailures = reason : asFailures as }
-concatAllocs as (OpGood ns@(_, _, _, nscore)) =
- let -- Choose the old or new solution, based on the cluster score
- cntok = asAllocs as
- osols = asSolutions as
- nsols = case osols of
- [] -> [ns]
- (_, _, _, oscore):[] ->
- if oscore < nscore
- then osols
- else [ns]
- -- FIXME: here we simply concat to lists with more
- -- than one element; we should instead abort, since
- -- this is not a valid usage of this function
- xs -> ns:xs
- nsuc = cntok + 1
+concatAllocs as (OpGood ns) =
+ let -- Choose the old or new solution, based on the cluster score
+ cntok = asAllocs as
+ osols = asSolution as
+ nsols = bestAllocElement osols (Just ns)
+ nsuc = cntok + 1
-- Note: we force evaluation of nsols here in order to keep the
-- memory profile low - we know that we will need nsols for sure
-- in the next cycle, so we force evaluation of nsols, since the
-- foldl' in the caller will only evaluate the tuple, but not the
-- elements of the tuple
- in nsols `seq` nsuc `seq` as { asAllocs = nsuc, asSolutions = nsols }
+ in nsols `seq` nsuc `seq` as { asAllocs = nsuc, asSolution = nsols }
+
+-- | Sums two 'AllocSolution' structures.
+sumAllocs :: AllocSolution -> AllocSolution -> AllocSolution
+sumAllocs (AllocSolution aFails aAllocs aSols aLog)
+ (AllocSolution bFails bAllocs bSols bLog) =
+ -- note: we add b first, since usually it will be smaller; when
+ -- fold'ing, a will grow and grow whereas b is the per-group
+ -- result, hence smaller
+ let nFails = bFails ++ aFails
+ nAllocs = aAllocs + bAllocs
+ nSols = bestAllocElement aSols bSols
+ nLog = bLog ++ aLog
+ in AllocSolution nFails nAllocs nSols nLog
-- | Given a solution, generates a reasonable description for it.
describeSolution :: AllocSolution -> String
describeSolution as =
let fcnt = asFailures as
- sols = asSolutions as
+ sols = asSolution as
freasons =
intercalate ", " . map (\(a, b) -> printf "%s: %d" (show a) b) .
filter ((> 0) . snd) . collapseFailures $ fcnt
- in if null sols
- then "No valid allocation solutions, failure reasons: " ++
- (if null fcnt
- then "unknown reasons"
- else freasons)
- else let (_, _, nodes, cv) = head sols
- in printf ("score: %.8f, successes %d, failures %d (%s)" ++
- " for node(s) %s") cv (asAllocs as) (length fcnt) freasons
- (intercalate "/" . map Node.name $ nodes)
+ in case sols of
+ Nothing -> "No valid allocation solutions, failure reasons: " ++
+ (if null fcnt then "unknown reasons" else freasons)
+ Just (_, _, nodes, cv) ->
+ printf ("score: %.8f, successes %d, failures %d (%s)" ++
+ " for node(s) %s") cv (asAllocs as) (length fcnt) freasons
+ (intercalate "/" . map Node.name $ nodes)
-- | Annotates a solution with the appropriate string.
annotateSolution :: AllocSolution -> AllocSolution
-- for proper jobset execution, we should reverse all lists.
reverseEvacSolution :: EvacSolution -> EvacSolution
reverseEvacSolution (EvacSolution f m o) =
- EvacSolution (reverse f) (reverse m) (reverse o)
+ EvacSolution (reverse f) (reverse m) (reverse o)
-- | Generate the valid node allocation singles or pairs for a new instance.
genAllocNodes :: Group.List -- ^ Group list
-- unallocable nodes
-> Result AllocNodes -- ^ The (monadic) result
genAllocNodes gl nl count drop_unalloc =
- let filter_fn = if drop_unalloc
+ let filter_fn = if drop_unalloc
then filter (Group.isAllocable .
flip Container.find gl . Node.group)
else id
- all_nodes = filter_fn $ getOnline nl
- all_pairs = liftM2 (,) all_nodes all_nodes
- ok_pairs = filter (\(x, y) -> Node.idx x /= Node.idx y &&
- Node.group x == Node.group y) all_pairs
- in case count of
- 1 -> Ok (Left (map Node.idx all_nodes))
- 2 -> Ok (Right (map (\(p, s) -> (Node.idx p, Node.idx s)) ok_pairs))
- _ -> Bad "Unsupported number of nodes, only one or two supported"
+ all_nodes = filter_fn $ getOnline nl
+ all_pairs = [(Node.idx p,
+ [Node.idx s | s <- all_nodes,
+ Node.idx p /= Node.idx s,
+ Node.group p == Node.group s]) |
+ p <- all_nodes]
+ in case count of
+ 1 -> Ok (Left (map Node.idx all_nodes))
+ 2 -> Ok (Right (filter (not . null . snd) all_pairs))
+ _ -> Bad "Unsupported number of nodes, only one or two supported"
-- | Try to allocate an instance on the cluster.
tryAlloc :: (Monad m) =>
-> Instance.Instance -- ^ The instance to allocate
-> AllocNodes -- ^ The allocation targets
-> m AllocSolution -- ^ Possible solution list
+tryAlloc _ _ _ (Right []) = fail "Not enough online nodes"
tryAlloc nl _ inst (Right ok_pairs) =
- let sols = foldl' (\cstate (p, s) ->
- concatAllocs cstate $ allocateOnPair nl inst p s
- ) emptyAllocSolution ok_pairs
-
- in if null ok_pairs -- means we have just one node
- then fail "Not enough online nodes"
- else return $ annotateSolution sols
-
+ let psols = parMap rwhnf (\(p, ss) ->
+ foldl' (\cstate ->
+ concatAllocs cstate .
+ allocateOnPair nl inst p)
+ emptyAllocSolution ss) ok_pairs
+ sols = foldl' sumAllocs emptyAllocSolution psols
+ in return $ annotateSolution sols
+
+tryAlloc _ _ _ (Left []) = fail "No online nodes"
tryAlloc nl _ inst (Left all_nodes) =
- let sols = foldl' (\cstate ->
- concatAllocs cstate . allocateOnSingle nl inst
- ) emptyAllocSolution all_nodes
- in if null all_nodes
- then fail "No online nodes"
- else return $ annotateSolution sols
+ let sols = foldl' (\cstate ->
+ concatAllocs cstate . allocateOnSingle nl inst
+ ) emptyAllocSolution all_nodes
+ in return $ annotateSolution sols
-- | Given a group/result, describe it as a nice (list of) messages.
solutionDescription :: Group.List -> (Gdx, Result AllocSolution) -> [String]
Bad message -> [printf "Group %s: error %s" gname message]
where grp = Container.find groupId gl
gname = Group.name grp
- pol = apolToString (Group.allocPolicy grp)
+ pol = allocPolicyToRaw (Group.allocPolicy grp)
-- | From a list of possibly bad and possibly empty solutions, filter
-- only the groups with a valid result. Note that the result will be
-> [(Gdx, Result AllocSolution)]
-> [(Gdx, AllocSolution)]
filterMGResults gl = foldl' fn []
- where unallocable = not . Group.isAllocable . flip Container.find gl
- fn accu (gdx, rasol) =
- case rasol of
- Bad _ -> accu
- Ok sol | null (asSolutions sol) -> accu
- | unallocable gdx -> accu
- | otherwise -> (gdx, sol):accu
+ where unallocable = not . Group.isAllocable . flip Container.find gl
+ fn accu (gdx, rasol) =
+ case rasol of
+ Bad _ -> accu
+ Ok sol | isNothing (asSolution sol) -> accu
+ | unallocable gdx -> accu
+ | otherwise -> (gdx, sol):accu
-- | Sort multigroup results based on policy and score.
sortMGResults :: Group.List
-> [(Gdx, AllocSolution)]
-> [(Gdx, AllocSolution)]
sortMGResults gl sols =
- let extractScore (_, _, _, x) = x
- solScore (gdx, sol) = (Group.allocPolicy (Container.find gdx gl),
- (extractScore . head . asSolutions) sol)
- in sortBy (comparing solScore) sols
+ let extractScore (_, _, _, x) = x
+ solScore (gdx, sol) = (Group.allocPolicy (Container.find gdx gl),
+ (extractScore . fromJust . asSolution) sol)
+ in sortBy (comparing solScore) sols
-- | Finds the best group for an instance on a multi-group cluster.
--
goodSols = filterMGResults mggl sols
sortedSols = sortMGResults mggl goodSols
in if null sortedSols
- then Bad $ intercalate ", " all_msgs
- else let (final_group, final_sol) = head sortedSols
- in return (final_group, final_sol, all_msgs)
+ then Bad $ intercalate ", " all_msgs
+ else let (final_group, final_sol) = head sortedSols
+ in return (final_group, final_sol, all_msgs)
-- | Try to allocate an instance on a multi-group cluster.
tryMGAlloc :: Group.List -- ^ The group list
-> [Ndx] -- ^ Nodes which should not be used
-> m AllocSolution -- ^ Solution list
tryReloc nl il xid 1 ex_idx =
- let all_nodes = getOnline nl
- inst = Container.find xid il
- ex_idx' = Instance.pNode inst:ex_idx
- valid_nodes = filter (not . flip elem ex_idx' . Node.idx) all_nodes
- valid_idxes = map Node.idx valid_nodes
- sols1 = foldl' (\cstate x ->
- let em = do
- (mnl, i, _, _) <-
- applyMove nl inst (ReplaceSecondary x)
- return (mnl, i, [Container.find x mnl],
- compCV mnl)
- in concatAllocs cstate em
- ) emptyAllocSolution valid_idxes
- in return sols1
+ let all_nodes = getOnline nl
+ inst = Container.find xid il
+ ex_idx' = Instance.pNode inst:ex_idx
+ valid_nodes = filter (not . flip elem ex_idx' . Node.idx) all_nodes
+ valid_idxes = map Node.idx valid_nodes
+ sols1 = foldl' (\cstate x ->
+ let em = do
+ (mnl, i, _, _) <-
+ applyMove nl inst (ReplaceSecondary x)
+ return (mnl, i, [Container.find x mnl],
+ compCV mnl)
+ in concatAllocs cstate em
+ ) emptyAllocSolution valid_idxes
+ in return sols1
tryReloc _ _ _ reqn _ = fail $ "Unsupported number of relocation \
\destinations required (" ++ show reqn ++
-- this function, whatever mode we have is just a primary change.
failOnSecondaryChange :: (Monad m) => EvacMode -> DiskTemplate -> m ()
failOnSecondaryChange ChangeSecondary dt =
- fail $ "Instances with disk template '" ++ dtToString dt ++
+ fail $ "Instances with disk template '" ++ diskTemplateToRaw dt ++
"' can't execute change secondary"
failOnSecondaryChange _ _ = return ()
, Score
, Ndx) -- ^ New best solution
evacDrbdSecondaryInner nl inst gdx accu ndx =
- case applyMove nl inst (ReplaceSecondary ndx) of
- OpFail fm ->
- case accu of
- Right _ -> accu
- Left _ -> Left $ "Node " ++ Container.nameOf nl ndx ++
- " failed: " ++ show fm
- OpGood (nl', inst', _, _) ->
- let nodes = Container.elems nl'
- -- The fromJust below is ugly (it can fail nastily), but
- -- at this point we should have any internal mismatches,
- -- and adding a monad here would be quite involved
- grpnodes = fromJust (gdx `lookup` Node.computeGroups nodes)
- new_cv = compCVNodes grpnodes
- new_accu = Right (nl', inst', new_cv, ndx)
- in case accu of
- Left _ -> new_accu
- Right (_, _, old_cv, _) ->
- if old_cv < new_cv
- then accu
- else new_accu
+ case applyMove nl inst (ReplaceSecondary ndx) of
+ OpFail fm ->
+ case accu of
+ Right _ -> accu
+ Left _ -> Left $ "Node " ++ Container.nameOf nl ndx ++
+ " failed: " ++ show fm
+ OpGood (nl', inst', _, _) ->
+ let nodes = Container.elems nl'
+ -- The fromJust below is ugly (it can fail nastily), but
+ -- at this point we should have any internal mismatches,
+ -- and adding a monad here would be quite involved
+ grpnodes = fromJust (gdx `lookup` Node.computeGroups nodes)
+ new_cv = compCVNodes grpnodes
+ new_accu = Right (nl', inst', new_cv, ndx)
+ in case accu of
+ Left _ -> new_accu
+ Right (_, _, old_cv, _) ->
+ if old_cv < new_cv
+ then accu
+ else new_accu
-- | Compute result of changing all nodes of a DRBD instance.
--
-> (Ndx, Ndx) -- ^ Tuple of new
-- primary\/secondary nodes
-> Result (Node.List, Instance.List, [OpCodes.OpCode], Score)
-evacDrbdAllInner nl il inst gdx (t_pdx, t_sdx) =
- do
- let primary = Container.find (Instance.pNode inst) nl
- idx = Instance.idx inst
- -- if the primary is offline, then we first failover
- (nl1, inst1, ops1) <-
- if Node.offline primary
- then do
- (nl', inst', _, _) <-
- annotateResult "Failing over to the secondary" $
- opToResult $ applyMove nl inst Failover
- return (nl', inst', [Failover])
- else return (nl, inst, [])
- let (o1, o2, o3) = (ReplaceSecondary t_pdx,
- Failover,
- ReplaceSecondary t_sdx)
- -- we now need to execute a replace secondary to the future
- -- primary node
- (nl2, inst2, _, _) <-
- annotateResult "Changing secondary to new primary" $
- opToResult $
- applyMove nl1 inst1 o1
- let ops2 = o1:ops1
- -- we now execute another failover, the primary stays fixed now
- (nl3, inst3, _, _) <- annotateResult "Failing over to new primary" $
- opToResult $ applyMove nl2 inst2 o2
- let ops3 = o2:ops2
- -- and finally another replace secondary, to the final secondary
- (nl4, inst4, _, _) <-
- annotateResult "Changing secondary to final secondary" $
- opToResult $
- applyMove nl3 inst3 o3
- let ops4 = o3:ops3
- il' = Container.add idx inst4 il
- ops = concatMap (iMoveToJob nl4 il' idx) $ reverse ops4
- let nodes = Container.elems nl4
- -- The fromJust below is ugly (it can fail nastily), but
- -- at this point we should have any internal mismatches,
- -- and adding a monad here would be quite involved
- grpnodes = fromJust (gdx `lookup` Node.computeGroups nodes)
- new_cv = compCVNodes grpnodes
- return (nl4, il', ops, new_cv)
+evacDrbdAllInner nl il inst gdx (t_pdx, t_sdx) = do
+ let primary = Container.find (Instance.pNode inst) nl
+ idx = Instance.idx inst
+ -- if the primary is offline, then we first failover
+ (nl1, inst1, ops1) <-
+ if Node.offline primary
+ then do
+ (nl', inst', _, _) <-
+ annotateResult "Failing over to the secondary" $
+ opToResult $ applyMove nl inst Failover
+ return (nl', inst', [Failover])
+ else return (nl, inst, [])
+ let (o1, o2, o3) = (ReplaceSecondary t_pdx,
+ Failover,
+ ReplaceSecondary t_sdx)
+ -- we now need to execute a replace secondary to the future
+ -- primary node
+ (nl2, inst2, _, _) <-
+ annotateResult "Changing secondary to new primary" $
+ opToResult $
+ applyMove nl1 inst1 o1
+ let ops2 = o1:ops1
+ -- we now execute another failover, the primary stays fixed now
+ (nl3, inst3, _, _) <- annotateResult "Failing over to new primary" $
+ opToResult $ applyMove nl2 inst2 o2
+ let ops3 = o2:ops2
+ -- and finally another replace secondary, to the final secondary
+ (nl4, inst4, _, _) <-
+ annotateResult "Changing secondary to final secondary" $
+ opToResult $
+ applyMove nl3 inst3 o3
+ let ops4 = o3:ops3
+ il' = Container.add idx inst4 il
+ ops = concatMap (iMoveToJob nl4 il' idx) $ reverse ops4
+ let nodes = Container.elems nl4
+ -- The fromJust below is ugly (it can fail nastily), but
+ -- at this point we should have any internal mismatches,
+ -- and adding a monad here would be quite involved
+ grpnodes = fromJust (gdx `lookup` Node.computeGroups nodes)
+ new_cv = compCVNodes grpnodes
+ return (nl4, il', ops, new_cv)
-- | Computes the nodes in a given group which are available for
-- allocation.
-> Result (Node.List, Instance.List, [OpCodes.OpCode])
-> (Node.List, Instance.List, EvacSolution)
updateEvacSolution (nl, il, es) idx (Bad msg) =
- (nl, il, es { esFailed = (idx, msg):esFailed es})
+ (nl, il, es { esFailed = (idx, msg):esFailed es})
updateEvacSolution (_, _, es) idx (Ok (nl, il, opcodes)) =
- (nl, il, es { esMoved = new_elem:esMoved es
- , esOpCodes = opcodes:esOpCodes es })
- where inst = Container.find idx il
- new_elem = (idx,
- instancePriGroup nl inst,
- Instance.allNodes inst)
+ (nl, il, es { esMoved = new_elem:esMoved es
+ , esOpCodes = opcodes:esOpCodes es })
+ where inst = Container.find idx il
+ new_elem = (idx,
+ instancePriGroup nl inst,
+ Instance.allNodes inst)
-- | Node-evacuation IAllocator mode main function.
tryNodeEvac :: Group.List -- ^ The cluster groups
-> [Idx] -- ^ List of instance (indices) to be evacuated
-> Result (Node.List, Instance.List, EvacSolution)
tryNodeEvac _ ini_nl ini_il mode idxs =
- let evac_ndx = nodesToEvacuate ini_il mode idxs
- offline = map Node.idx . filter Node.offline $ Container.elems ini_nl
- excl_ndx = foldl' (flip IntSet.insert) evac_ndx offline
- group_ndx = map (\(gdx, (nl, _)) -> (gdx, map Node.idx
- (Container.elems nl))) $
- splitCluster ini_nl ini_il
- (fin_nl, fin_il, esol) =
- foldl' (\state@(nl, il, _) inst ->
- let gdx = instancePriGroup nl inst
- pdx = Instance.pNode inst in
- updateEvacSolution state (Instance.idx inst) $
- availableGroupNodes group_ndx
- (IntSet.insert pdx excl_ndx) gdx >>=
- nodeEvacInstance nl il mode inst gdx
- )
- (ini_nl, ini_il, emptyEvacSolution)
- (map (`Container.find` ini_il) idxs)
- in return (fin_nl, fin_il, reverseEvacSolution esol)
+ let evac_ndx = nodesToEvacuate ini_il mode idxs
+ offline = map Node.idx . filter Node.offline $ Container.elems ini_nl
+ excl_ndx = foldl' (flip IntSet.insert) evac_ndx offline
+ group_ndx = map (\(gdx, (nl, _)) -> (gdx, map Node.idx
+ (Container.elems nl))) $
+ splitCluster ini_nl ini_il
+ (fin_nl, fin_il, esol) =
+ foldl' (\state@(nl, il, _) inst ->
+ let gdx = instancePriGroup nl inst
+ pdx = Instance.pNode inst in
+ updateEvacSolution state (Instance.idx inst) $
+ availableGroupNodes group_ndx
+ (IntSet.insert pdx excl_ndx) gdx >>=
+ nodeEvacInstance nl il mode inst gdx
+ )
+ (ini_nl, ini_il, emptyEvacSolution)
+ (map (`Container.find` ini_il) idxs)
+ in return (fin_nl, fin_il, reverseEvacSolution esol)
-- | Change-group IAllocator mode main function.
--
-> [Idx] -- ^ List of instance (indices) to be evacuated
-> Result (Node.List, Instance.List, EvacSolution)
tryChangeGroup gl ini_nl ini_il gdxs idxs =
- let evac_gdxs = nub $ map (instancePriGroup ini_nl .
- flip Container.find ini_il) idxs
- target_gdxs = (if null gdxs
+ let evac_gdxs = nub $ map (instancePriGroup ini_nl .
+ flip Container.find ini_il) idxs
+ target_gdxs = (if null gdxs
then Container.keys gl
else gdxs) \\ evac_gdxs
- offline = map Node.idx . filter Node.offline $ Container.elems ini_nl
- excl_ndx = foldl' (flip IntSet.insert) IntSet.empty offline
- group_ndx = map (\(gdx, (nl, _)) -> (gdx, map Node.idx
- (Container.elems nl))) $
- splitCluster ini_nl ini_il
- (fin_nl, fin_il, esol) =
- foldl' (\state@(nl, il, _) inst ->
- let solution = do
- let ncnt = Instance.requiredNodes $
- Instance.diskTemplate inst
- (gdx, _, _) <- findBestAllocGroup gl nl il
- (Just target_gdxs) inst ncnt
- av_nodes <- availableGroupNodes group_ndx
- excl_ndx gdx
- nodeEvacInstance nl il ChangeAll inst
- gdx av_nodes
- in updateEvacSolution state
- (Instance.idx inst) solution
- )
- (ini_nl, ini_il, emptyEvacSolution)
- (map (`Container.find` ini_il) idxs)
- in return (fin_nl, fin_il, reverseEvacSolution esol)
-
--- | Recursively place instances on the cluster until we're out of space.
-iterateAlloc :: Node.List
- -> Instance.List
- -> Maybe Int
- -> Instance.Instance
- -> AllocNodes
- -> [Instance.Instance]
- -> [CStats]
- -> Result AllocResult
+ offline = map Node.idx . filter Node.offline $ Container.elems ini_nl
+ excl_ndx = foldl' (flip IntSet.insert) IntSet.empty offline
+ group_ndx = map (\(gdx, (nl, _)) -> (gdx, map Node.idx
+ (Container.elems nl))) $
+ splitCluster ini_nl ini_il
+ (fin_nl, fin_il, esol) =
+ foldl' (\state@(nl, il, _) inst ->
+ let solution = do
+ let ncnt = Instance.requiredNodes $
+ Instance.diskTemplate inst
+ (gdx, _, _) <- findBestAllocGroup gl nl il
+ (Just target_gdxs) inst ncnt
+ av_nodes <- availableGroupNodes group_ndx
+ excl_ndx gdx
+ nodeEvacInstance nl il ChangeAll inst gdx av_nodes
+ in updateEvacSolution state (Instance.idx inst) solution
+ )
+ (ini_nl, ini_il, emptyEvacSolution)
+ (map (`Container.find` ini_il) idxs)
+ in return (fin_nl, fin_il, reverseEvacSolution esol)
+
+-- | Standard-sized allocation method.
+--
+-- This places instances of the same size on the cluster until we're
+-- out of space. The result will be a list of identically-sized
+-- instances.
+iterateAlloc :: AllocMethod
iterateAlloc nl il limit newinst allocnodes ixes cstats =
- let depth = length ixes
- newname = printf "new-%d" depth::String
- newidx = length (Container.elems il) + depth
- newi2 = Instance.setIdx (Instance.setName newinst newname) newidx
- newlimit = fmap (flip (-) 1) limit
- in case tryAlloc nl il newi2 allocnodes of
- Bad s -> Bad s
- Ok (AllocSolution { asFailures = errs, asSolutions = sols3 }) ->
- let newsol = Ok (collapseFailures errs, nl, il, ixes, cstats) in
- case sols3 of
- [] -> newsol
- (xnl, xi, _, _):[] ->
- if limit == Just 0
- then newsol
- else iterateAlloc xnl (Container.add newidx xi il)
- newlimit newinst allocnodes (xi:ixes)
- (totalResources xnl:cstats)
- _ -> Bad "Internal error: multiple solutions for single\
- \ allocation"
-
--- | The core of the tiered allocation mode.
-tieredAlloc :: Node.List
- -> Instance.List
- -> Maybe Int
- -> Instance.Instance
- -> AllocNodes
- -> [Instance.Instance]
- -> [CStats]
- -> Result AllocResult
+ let depth = length ixes
+ newname = printf "new-%d" depth::String
+ newidx = length (Container.elems il) + depth
+ newi2 = Instance.setIdx (Instance.setName newinst newname) newidx
+ newlimit = fmap (flip (-) 1) limit
+ in case tryAlloc nl il newi2 allocnodes of
+ Bad s -> Bad s
+ Ok (AllocSolution { asFailures = errs, asSolution = sols3 }) ->
+ let newsol = Ok (collapseFailures errs, nl, il, ixes, cstats) in
+ case sols3 of
+ Nothing -> newsol
+ Just (xnl, xi, _, _) ->
+ if limit == Just 0
+ then newsol
+ else iterateAlloc xnl (Container.add newidx xi il)
+ newlimit newinst allocnodes (xi:ixes)
+ (totalResources xnl:cstats)
+
+-- | Tiered allocation method.
+--
+-- This places instances on the cluster, and decreases the spec until
+-- we can allocate again. The result will be a list of decreasing
+-- instance specs.
+tieredAlloc :: AllocMethod
tieredAlloc nl il limit newinst allocnodes ixes cstats =
- case iterateAlloc nl il limit newinst allocnodes ixes cstats of
- Bad s -> Bad s
- Ok (errs, nl', il', ixes', cstats') ->
- let newsol = Ok (errs, nl', il', ixes', cstats')
- ixes_cnt = length ixes'
- (stop, newlimit) = case limit of
- Nothing -> (False, Nothing)
- Just n -> (n <= ixes_cnt,
- Just (n - ixes_cnt)) in
- if stop then newsol else
+ case iterateAlloc nl il limit newinst allocnodes ixes cstats of
+ Bad s -> Bad s
+ Ok (errs, nl', il', ixes', cstats') ->
+ let newsol = Ok (errs, nl', il', ixes', cstats')
+ ixes_cnt = length ixes'
+ (stop, newlimit) = case limit of
+ Nothing -> (False, Nothing)
+ Just n -> (n <= ixes_cnt,
+ Just (n - ixes_cnt)) in
+ if stop then newsol else
case Instance.shrinkByType newinst . fst . last $
sortBy (comparing snd) errs of
Bad _ -> newsol
-- secondary, while the command list holds gnt-instance
-- commands (without that prefix), e.g \"@failover instance1@\"
computeMoves i inam mv c d =
- case mv of
- Failover -> ("f", [mig])
- FailoverAndReplace _ -> (printf "f r:%s" d, [mig, rep d])
- ReplaceSecondary _ -> (printf "r:%s" d, [rep d])
- ReplaceAndFailover _ -> (printf "r:%s f" c, [rep c, mig])
- ReplacePrimary _ -> (printf "f r:%s f" c, [mig, rep c, mig])
- where morf = if Instance.running i then "migrate" else "failover"
- mig = printf "%s -f %s" morf inam::String
- rep n = printf "replace-disks -n %s %s" n inam
+ case mv of
+ Failover -> ("f", [mig])
+ FailoverAndReplace _ -> (printf "f r:%s" d, [mig, rep d])
+ ReplaceSecondary _ -> (printf "r:%s" d, [rep d])
+ ReplaceAndFailover _ -> (printf "r:%s f" c, [rep c, mig])
+ ReplacePrimary _ -> (printf "f r:%s f" c, [mig, rep c, mig])
+ where morf = if Instance.instanceRunning i then "migrate" else "failover"
+ mig = printf "%s -f %s" morf inam::String
+ rep n = printf "replace-disks -n %s %s" n inam
-- | Converts a placement to string format.
printSolutionLine :: Node.List -- ^ The node list
-- the solution
-> (String, [String])
printSolutionLine nl il nmlen imlen plc pos =
- let
- pmlen = (2*nmlen + 1)
- (i, p, s, mv, c) = plc
- inst = Container.find i il
- inam = Instance.alias inst
- npri = Node.alias $ Container.find p nl
- nsec = Node.alias $ Container.find s nl
- opri = Node.alias $ Container.find (Instance.pNode inst) nl
- osec = Node.alias $ Container.find (Instance.sNode inst) nl
- (moves, cmds) = computeMoves inst inam mv npri nsec
- ostr = printf "%s:%s" opri osec::String
- nstr = printf "%s:%s" npri nsec::String
- in
- (printf " %3d. %-*s %-*s => %-*s %.8f a=%s"
- pos imlen inam pmlen ostr
- pmlen nstr c moves,
- cmds)
+ let pmlen = (2*nmlen + 1)
+ (i, p, s, mv, c) = plc
+ inst = Container.find i il
+ inam = Instance.alias inst
+ npri = Node.alias $ Container.find p nl
+ nsec = Node.alias $ Container.find s nl
+ opri = Node.alias $ Container.find (Instance.pNode inst) nl
+ osec = Node.alias $ Container.find (Instance.sNode inst) nl
+ (moves, cmds) = computeMoves inst inam mv npri nsec
+ ostr = printf "%s:%s" opri osec::String
+ nstr = printf "%s:%s" npri nsec::String
+ in (printf " %3d. %-*s %-*s => %-*s %.8f a=%s"
+ pos imlen inam pmlen ostr
+ pmlen nstr c moves,
+ cmds)
-- | Return the instance and involved nodes in an instance move.
--
-- instance index
-> [Ndx] -- ^ Resulting list of node indices
involvedNodes il plc =
- let (i, np, ns, _, _) = plc
- inst = Container.find i il
- in nub $ [np, ns] ++ Instance.allNodes inst
+ let (i, np, ns, _, _) = plc
+ inst = Container.find i il
+ in nub $ [np, ns] ++ Instance.allNodes inst
-- | Inner function for splitJobs, that either appends the next job to
-- the current jobset, or starts a new jobset.
mergeJobs :: ([JobSet], [Ndx]) -> MoveJob -> ([JobSet], [Ndx])
mergeJobs ([], _) n@(ndx, _, _, _) = ([[n]], ndx)
mergeJobs (cjs@(j:js), nbuf) n@(ndx, _, _, _)
- | null (ndx `intersect` nbuf) = ((n:j):js, ndx ++ nbuf)
- | otherwise = ([n]:cjs, ndx)
+ | null (ndx `intersect` nbuf) = ((n:j):js, ndx ++ nbuf)
+ | otherwise = ([n]:cjs, ndx)
-- | Break a list of moves into independent groups. Note that this
-- will reverse the order of jobs.
-- also beautify the display a little.
formatJob :: Int -> Int -> (Int, MoveJob) -> [String]
formatJob jsn jsl (sn, (_, _, _, cmds)) =
- let out =
- printf " echo job %d/%d" jsn sn:
- printf " check":
- map (" gnt-instance " ++) cmds
- in if sn == 1
+ let out =
+ printf " echo job %d/%d" jsn sn:
+ printf " check":
+ map (" gnt-instance " ++) cmds
+ in if sn == 1
then ["", printf "echo jobset %d, %d jobs" jsn jsl] ++ out
else out
-- also beautify the display a little.
formatCmds :: [JobSet] -> String
formatCmds =
- unlines .
- concatMap (\(jsn, js) -> concatMap (formatJob jsn (length js))
- (zip [1..] js)) .
- zip [1..]
+ unlines .
+ concatMap (\(jsn, js) -> concatMap (formatJob jsn (length js))
+ (zip [1..] js)) .
+ zip [1..]
-- | Print the node list.
printNodes :: Node.List -> [String] -> String
printNodes nl fs =
- let fields = case fs of
- [] -> Node.defaultFields
- "+":rest -> Node.defaultFields ++ rest
- _ -> fs
- snl = sortBy (comparing Node.idx) (Container.elems nl)
- (header, isnum) = unzip $ map Node.showHeader fields
- in unlines . map ((:) ' ' . intercalate " ") $
- formatTable (header:map (Node.list fields) snl) isnum
+ let fields = case fs of
+ [] -> Node.defaultFields
+ "+":rest -> Node.defaultFields ++ rest
+ _ -> fs
+ snl = sortBy (comparing Node.idx) (Container.elems nl)
+ (header, isnum) = unzip $ map Node.showHeader fields
+ in unlines . map ((:) ' ' . unwords) $
+ formatTable (header:map (Node.list fields) snl) isnum
-- | Print the instance list.
printInsts :: Node.List -> Instance.List -> String
printInsts nl il =
- let sil = sortBy (comparing Instance.idx) (Container.elems il)
- helper inst = [ if Instance.running inst then "R" else " "
- , Instance.name inst
- , Container.nameOf nl (Instance.pNode inst)
- , let sdx = Instance.sNode inst
- in if sdx == Node.noSecondary
+ let sil = sortBy (comparing Instance.idx) (Container.elems il)
+ helper inst = [ if Instance.instanceRunning inst then "R" else " "
+ , Instance.name inst
+ , Container.nameOf nl (Instance.pNode inst)
+ , let sdx = Instance.sNode inst
+ in if sdx == Node.noSecondary
then ""
else Container.nameOf nl sdx
- , if Instance.autoBalance inst then "Y" else "N"
- , printf "%3d" $ Instance.vcpus inst
- , printf "%5d" $ Instance.mem inst
- , printf "%5d" $ Instance.dsk inst `div` 1024
- , printf "%5.3f" lC
- , printf "%5.3f" lM
- , printf "%5.3f" lD
- , printf "%5.3f" lN
- ]
- where DynUtil lC lM lD lN = Instance.util inst
- header = [ "F", "Name", "Pri_node", "Sec_node", "Auto_bal"
- , "vcpu", "mem" , "dsk", "lCpu", "lMem", "lDsk", "lNet" ]
- isnum = False:False:False:False:False:repeat True
- in unlines . map ((:) ' ' . intercalate " ") $
- formatTable (header:map helper sil) isnum
+ , if Instance.autoBalance inst then "Y" else "N"
+ , printf "%3d" $ Instance.vcpus inst
+ , printf "%5d" $ Instance.mem inst
+ , printf "%5d" $ Instance.dsk inst `div` 1024
+ , printf "%5.3f" lC
+ , printf "%5.3f" lM
+ , printf "%5.3f" lD
+ , printf "%5.3f" lN
+ ]
+ where DynUtil lC lM lD lN = Instance.util inst
+ header = [ "F", "Name", "Pri_node", "Sec_node", "Auto_bal"
+ , "vcpu", "mem" , "dsk", "lCpu", "lMem", "lDsk", "lNet" ]
+ isnum = False:False:False:False:False:repeat True
+ in unlines . map ((:) ' ' . unwords) $
+ formatTable (header:map helper sil) isnum
-- | Shows statistics for a given node list.
printStats :: Node.List -> String
printStats nl =
- let dcvs = compDetailedCV $ Container.elems nl
- (weights, names) = unzip detailedCVInfo
- hd = zip3 (weights ++ repeat 1) (names ++ repeat "unknown") dcvs
- formatted = map (\(w, header, val) ->
- printf "%s=%.8f(x%.2f)" header val w::String) hd
- in intercalate ", " formatted
+ let dcvs = compDetailedCV $ Container.elems nl
+ (weights, names) = unzip detailedCVInfo
+ hd = zip3 (weights ++ repeat 1) (names ++ repeat "unknown") dcvs
+ formatted = map (\(w, header, val) ->
+ printf "%s=%.8f(x%.2f)" header val w::String) hd
+ in intercalate ", " formatted
-- | Convert a placement into a list of OpCodes (basically a job).
iMoveToJob :: Node.List -- ^ The node list; only used for node
-> [OpCodes.OpCode] -- ^ The list of opcodes equivalent to
-- the given move
iMoveToJob nl il idx move =
- let inst = Container.find idx il
- iname = Instance.name inst
- lookNode = Just . Container.nameOf nl
- opF = OpCodes.OpInstanceMigrate iname True False True Nothing
- opR n = OpCodes.OpInstanceReplaceDisks iname (lookNode n)
- OpCodes.ReplaceNewSecondary [] Nothing
- in case move of
- Failover -> [ opF ]
- ReplacePrimary np -> [ opF, opR np, opF ]
- ReplaceSecondary ns -> [ opR ns ]
- ReplaceAndFailover np -> [ opR np, opF ]
- FailoverAndReplace ns -> [ opF, opR ns ]
+ let inst = Container.find idx il
+ iname = Instance.name inst
+ lookNode = Just . Container.nameOf nl
+ opF = OpCodes.OpInstanceMigrate iname True False True Nothing
+ opR n = OpCodes.OpInstanceReplaceDisks iname (lookNode n)
+ OpCodes.ReplaceNewSecondary [] Nothing
+ in case move of
+ Failover -> [ opF ]
+ ReplacePrimary np -> [ opF, opR np, opF ]
+ ReplaceSecondary ns -> [ opR ns ]
+ ReplaceAndFailover np -> [ opR np, opF ]
+ FailoverAndReplace ns -> [ opF, opR ns ]
-- * Node group functions
pgroup = Node.group pnode
sgroup = Node.group snode
in if pgroup /= sgroup
- then fail ("Instance placed accross two node groups, primary " ++
- show pgroup ++ ", secondary " ++ show sgroup)
- else return pgroup
+ then fail ("Instance placed accross two node groups, primary " ++
+ show pgroup ++ ", secondary " ++ show sgroup)
+ else return pgroup
-- | Computes the group of an instance per the primary node.
instancePriGroup :: Node.List -> Instance.Instance -> Gdx
-> [Idx] -- ^ List of instance indices being evacuated
-> IntSet.IntSet -- ^ Set of node indices
nodesToEvacuate il mode =
- IntSet.delete Node.noSecondary .
- foldl' (\ns idx ->
- let i = Container.find idx il
- pdx = Instance.pNode i
- sdx = Instance.sNode i
- dt = Instance.diskTemplate i
- withSecondary = case dt of
- DTDrbd8 -> IntSet.insert sdx ns
- _ -> ns
- in case mode of
- ChangePrimary -> IntSet.insert pdx ns
- ChangeSecondary -> withSecondary
- ChangeAll -> IntSet.insert pdx withSecondary
- ) IntSet.empty
+ IntSet.delete Node.noSecondary .
+ foldl' (\ns idx ->
+ let i = Container.find idx il
+ pdx = Instance.pNode i
+ sdx = Instance.sNode i
+ dt = Instance.diskTemplate i
+ withSecondary = case dt of
+ DTDrbd8 -> IntSet.insert sdx ns
+ _ -> ns
+ in case mode of
+ ChangePrimary -> IntSet.insert pdx ns
+ ChangeSecondary -> withSecondary
+ ChangeAll -> IntSet.insert pdx withSecondary
+ ) IntSet.empty
{- | Compatibility helper module.
-This module holds definitions that help with supporting multiple library versions or transitions between versions.
+This module holds definitions that help with supporting multiple
+library versions or transitions between versions.
-}
-}
module Ganeti.HTools.Compat
- ( rwhnf
- , Control.Parallel.Strategies.parMap
- ) where
+ ( rwhnf
+ , Control.Parallel.Strategies.parMap
+ ) where
import qualified Control.Parallel.Strategies
-}
module Ganeti.HTools.Container
- (
- -- * Types
- Container
- , Key
- -- * Creation
- , IntMap.empty
- , IntMap.singleton
- , IntMap.fromList
- -- * Query
- , IntMap.size
- , IntMap.null
- , find
- , IntMap.findMax
- , IntMap.member
- -- * Update
- , add
- , addTwo
- , IntMap.map
- , IntMap.mapAccum
- , IntMap.filter
- -- * Conversion
- , IntMap.elems
- , IntMap.keys
- -- * Element functions
- , nameOf
- , findByName
- ) where
+ ( -- * Types
+ Container
+ , Key
+ -- * Creation
+ , IntMap.empty
+ , IntMap.singleton
+ , IntMap.fromList
+ -- * Query
+ , IntMap.size
+ , IntMap.null
+ , find
+ , IntMap.findMax
+ , IntMap.member
+ -- * Update
+ , add
+ , addTwo
+ , IntMap.map
+ , IntMap.mapAccum
+ , IntMap.filter
+ -- * Conversion
+ , IntMap.elems
+ , IntMap.keys
+ -- * Element functions
+ , nameOf
+ , findByName
+ ) where
import qualified Data.IntMap as IntMap
findByName :: (T.Element a, Monad m) =>
Container a -> String -> m a
findByName c n =
- let all_elems = IntMap.elems c
- result = filter ((n `elem`) . T.allNames) all_elems
- in case result of
- [item] -> return item
- _ -> fail $ "Wrong number of elems found with name " ++ n
+ let all_elems = IntMap.elems c
+ result = filter ((n `elem`) . T.allNames) all_elems
+ in case result of
+ [item] -> return item
+ _ -> fail $ "Wrong number of elems found with name " ++ n
-}
module Ganeti.HTools.ExtLoader
- ( loadExternalData
- , commonSuffix
- , maybeSaveData
- ) where
+ ( loadExternalData
+ , commonSuffix
+ , maybeSaveData
+ ) where
import Control.Monad
import Data.Maybe (isJust, fromJust)
import System.FilePath
import System.IO
-import System
+import System.Exit
import Text.Printf (hPrintf)
import qualified Ganeti.HTools.Luxi as Luxi
-- | Parses a user-supplied utilisation string.
parseUtilisation :: String -> Result (String, DynUtil)
parseUtilisation line =
- case sepSplit ' ' line of
- [name, cpu, mem, dsk, net] ->
- do
- rcpu <- tryRead name cpu
- rmem <- tryRead name mem
- rdsk <- tryRead name dsk
- rnet <- tryRead name net
- let du = DynUtil { cpuWeight = rcpu, memWeight = rmem
- , dskWeight = rdsk, netWeight = rnet }
- return (name, du)
- _ -> Bad $ "Cannot parse line " ++ line
+ case sepSplit ' ' line of
+ [name, cpu, mem, dsk, net] ->
+ do
+ rcpu <- tryRead name cpu
+ rmem <- tryRead name mem
+ rdsk <- tryRead name dsk
+ rnet <- tryRead name net
+ let du = DynUtil { cpuWeight = rcpu, memWeight = rmem
+ , dskWeight = rdsk, netWeight = rnet }
+ return (name, du)
+ _ -> Bad $ "Cannot parse line " ++ line
-- | External tool data loader from a variety of sources.
loadExternalData :: Options
" files options should be given.")
exitWith $ ExitFailure 1
- util_contents <- (case optDynuFile opts of
- Just path -> readFile path
- Nothing -> return "")
+ util_contents <- maybe (return "") readFile (optDynuFile opts)
let util_data = mapM parseUtilisation $ lines util_contents
- util_data' <- (case util_data of
- Ok x -> return x
- Bad y -> do
- hPutStrLn stderr ("Error: can't parse utilisation" ++
- " data: " ++ show y)
- exitWith $ ExitFailure 1)
+ util_data' <- case util_data of
+ Ok x -> return x
+ Bad y -> do
+ hPutStrLn stderr ("Error: can't parse utilisation" ++
+ " data: " ++ show y)
+ exitWith $ ExitFailure 1
input_data <-
- case () of
- _ | setRapi -> wrapIO $ Rapi.loadData mhost
- | setLuxi -> wrapIO $ Luxi.loadData $ fromJust lsock
- | setSim -> Simu.loadData simdata
- | setFile -> wrapIO $ Text.loadData $ fromJust tfile
- | otherwise -> return $ Bad "No backend selected! Exiting."
+ case () of
+ _ | setRapi -> wrapIO $ Rapi.loadData mhost
+ | setLuxi -> wrapIO $ Luxi.loadData $ fromJust lsock
+ | setSim -> Simu.loadData simdata
+ | setFile -> wrapIO $ Text.loadData $ fromJust tfile
+ | otherwise -> return $ Bad "No backend selected! Exiting."
let ldresult = input_data >>= mergeData util_data' exTags selInsts exInsts
cdata <-
- (case ldresult of
- Ok x -> return x
- Bad s -> do
- hPrintf stderr
- "Error: failed to load data, aborting. Details:\n%s\n" s:: IO ()
- exitWith $ ExitFailure 1
- )
+ case ldresult of
+ Ok x -> return x
+ Bad s -> do
+ hPrintf stderr
+ "Error: failed to load data, aborting. Details:\n%s\n" s:: IO ()
+ exitWith $ ExitFailure 1
let (fix_msgs, nl) = checkData (cdNodes cdata) (cdInstances cdata)
unless (optVerbose opts == 0) $ maybeShowWarnings fix_msgs
-}
module Ganeti.HTools.Group
- ( Group(..)
- , List
- , AssocList
- -- * Constructor
- , create
- , setIdx
- , isAllocable
- ) where
+ ( Group(..)
+ , List
+ , AssocList
+ -- * Constructor
+ , create
+ , setIdx
+ , isAllocable
+ ) where
import qualified Ganeti.HTools.Container as Container
-- | The node group type.
data Group = Group
- { name :: String -- ^ The node name
- , uuid :: T.GroupID -- ^ The UUID of the group
- , idx :: T.Gdx -- ^ Internal index for book-keeping
- , allocPolicy :: T.AllocPolicy -- ^ The allocation policy for this group
- } deriving (Show, Read, Eq)
+ { name :: String -- ^ The node name
+ , uuid :: T.GroupID -- ^ The UUID of the group
+ , idx :: T.Gdx -- ^ Internal index for book-keeping
+ , allocPolicy :: T.AllocPolicy -- ^ The allocation policy for this group
+ } deriving (Show, Read, Eq)
-- Note: we use the name as the alias, and the UUID as the official
-- name
instance T.Element Group where
- nameOf = uuid
- idxOf = idx
- setAlias = setName
- setIdx = setIdx
- allNames n = [name n, uuid n]
+ nameOf = uuid
+ idxOf = idx
+ setAlias = setName
+ setIdx = setIdx
+ allNames n = [name n, uuid n]
-- | A simple name for the int, node association list.
type AssocList = [(T.Gdx, Group)]
-- | Create a new group.
create :: String -> T.GroupID -> T.AllocPolicy -> Group
create name_init id_init apol_init =
- Group { name = name_init
- , uuid = id_init
- , allocPolicy = apol_init
- , idx = -1
- }
+ Group { name = name_init
+ , uuid = id_init
+ , allocPolicy = apol_init
+ , idx = -1
+ }
-- | Sets the group index.
--
-}
module Ganeti.HTools.IAlloc
- ( readRequest
- , runIAllocator
- , processRelocate
- ) where
+ ( readRequest
+ , runIAllocator
+ , processRelocate
+ ) where
import Data.Either ()
import Data.Maybe (fromMaybe, isJust)
import Control.Monad
import Text.JSON (JSObject, JSValue(JSArray),
makeObj, encodeStrict, decodeStrict, fromJSObject, showJSON)
-import System (exitWith, ExitCode(..))
+import System.Exit
import System.IO
import qualified Ganeti.HTools.Cluster as Cluster
import Ganeti.HTools.Utils
import Ganeti.HTools.Types
+{-# ANN module "HLint: ignore Eta reduce" #-}
+
-- | Type alias for the result of an IAllocator call.
type IAllocResult = (String, JSValue, Node.List, Instance.List)
vcpus <- extract "vcpus"
tags <- extract "tags"
dt <- extract "disk_template"
- let running = "running"
- return (n, Instance.create n mem disk vcpus running tags True 0 0 dt)
+ return (n, Instance.create n mem disk vcpus Running tags True 0 0 dt)
-- | Parses an instance as found in the cluster instance list.
parseInstance :: NameAssoc -- ^ The node name-to-index association list
else readEitherString $ head nodes
pidx <- lookupNode ktn n pnode
let snodes = tail nodes
- sidx <- (if null snodes then return Node.noSecondary
- else readEitherString (head snodes) >>= lookupNode ktn n)
+ sidx <- if null snodes
+ then return Node.noSecondary
+ else readEitherString (head snodes) >>= lookupNode ktn n
return (n, Instance.setBoth (snd base) pidx sidx)
-- | Parses a node as found in the cluster node list.
vm_capable <- annotateResult desc $ maybeFromObj a "vm_capable"
let vm_capable' = fromMaybe True vm_capable
gidx <- lookupGroup ktg n guuid
- node <- (if offline || drained || not vm_capable'
- then return $ Node.create n 0 0 0 0 0 0 True gidx
- else do
- mtotal <- extract "total_memory"
- mnode <- extract "reserved_memory"
- mfree <- extract "free_memory"
- dtotal <- extract "total_disk"
- dfree <- extract "free_disk"
- ctotal <- extract "total_cpus"
- return $ Node.create n mtotal mnode mfree
- dtotal dfree ctotal False gidx)
+ node <- if offline || drained || not vm_capable'
+ then return $ Node.create n 0 0 0 0 0 0 True gidx
+ else do
+ mtotal <- extract "total_memory"
+ mnode <- extract "reserved_memory"
+ mfree <- extract "free_memory"
+ dtotal <- extract "total_disk"
+ dfree <- extract "free_disk"
+ ctotal <- extract "total_cpus"
+ return $ Node.create n mtotal mnode mfree
+ dtotal dfree ctotal False gidx
return (n, node)
-- | Parses a group as found in the cluster group list.
map_g = cdGroups cdata
optype <- extrReq "type"
rqtype <-
- case () of
- _ | optype == C.iallocatorModeAlloc ->
- do
- rname <- extrReq "name"
- req_nodes <- extrReq "required_nodes"
- inew <- parseBaseInstance rname request
- let io = snd inew
- return $ Allocate io req_nodes
- | optype == C.iallocatorModeReloc ->
- do
- rname <- extrReq "name"
- ridx <- lookupInstance kti rname
- req_nodes <- extrReq "required_nodes"
- ex_nodes <- extrReq "relocate_from"
- ex_idex <- mapM (Container.findByName map_n) ex_nodes
- return $ Relocate ridx req_nodes (map Node.idx ex_idex)
- | optype == C.iallocatorModeChgGroup ->
- do
- rl_names <- extrReq "instances"
- rl_insts <- mapM (liftM Instance.idx .
- Container.findByName map_i) rl_names
- gr_uuids <- extrReq "target_groups"
- gr_idxes <- mapM (liftM Group.idx .
- Container.findByName map_g) gr_uuids
- return $ ChangeGroup rl_insts gr_idxes
- | optype == C.iallocatorModeNodeEvac ->
- do
- rl_names <- extrReq "instances"
- rl_insts <- mapM (Container.findByName map_i) rl_names
- let rl_idx = map Instance.idx rl_insts
- rl_mode <- extrReq "evac_mode"
- return $ NodeEvacuate rl_idx rl_mode
+ case () of
+ _ | optype == C.iallocatorModeAlloc ->
+ do
+ rname <- extrReq "name"
+ req_nodes <- extrReq "required_nodes"
+ inew <- parseBaseInstance rname request
+ let io = snd inew
+ return $ Allocate io req_nodes
+ | optype == C.iallocatorModeReloc ->
+ do
+ rname <- extrReq "name"
+ ridx <- lookupInstance kti rname
+ req_nodes <- extrReq "required_nodes"
+ ex_nodes <- extrReq "relocate_from"
+ ex_idex <- mapM (Container.findByName map_n) ex_nodes
+ return $ Relocate ridx req_nodes (map Node.idx ex_idex)
+ | optype == C.iallocatorModeChgGroup ->
+ do
+ rl_names <- extrReq "instances"
+ rl_insts <- mapM (liftM Instance.idx .
+ Container.findByName map_i) rl_names
+ gr_uuids <- extrReq "target_groups"
+ gr_idxes <- mapM (liftM Group.idx .
+ Container.findByName map_g) gr_uuids
+ return $ ChangeGroup rl_insts gr_idxes
+ | optype == C.iallocatorModeNodeEvac ->
+ do
+ rl_names <- extrReq "instances"
+ rl_insts <- mapM (Container.findByName map_i) rl_names
+ let rl_idx = map Instance.idx rl_insts
+ rl_mode <- extrReq "evac_mode"
+ return $ NodeEvacuate rl_idx rl_mode
- | otherwise -> fail ("Invalid request type '" ++ optype ++ "'")
+ | otherwise -> fail ("Invalid request type '" ++ optype ++ "'")
return (msgs, Request rqtype cdata)
-- | Formats the result into a valid IAllocator response message.
-> JSValue -- ^ The JSON encoded result
-> String -- ^ The full JSON-formatted message
formatResponse success info result =
- let
- e_success = ("success", showJSON success)
- e_info = ("info", showJSON info)
- e_result = ("result", result)
- in encodeStrict $ makeObj [e_success, e_info, e_result]
+ let e_success = ("success", showJSON success)
+ e_info = ("info", showJSON info)
+ e_result = ("result", result)
+ in encodeStrict $ makeObj [e_success, e_info, e_result]
-- | Flatten the log of a solution into a string.
describeSolution :: Cluster.AllocSolution -> String
formatAllocate :: Instance.List -> Cluster.AllocSolution -> Result IAllocResult
formatAllocate il as = do
let info = describeSolution as
- case Cluster.asSolutions as of
- [] -> fail info
- (nl, inst, nodes, _):[] ->
- do
- let il' = Container.add (Instance.idx inst) inst il
- return (info, showJSON $ map Node.name nodes, nl, il')
- _ -> fail "Internal error: multiple allocation solutions"
+ case Cluster.asSolution as of
+ Nothing -> fail info
+ Just (nl, inst, nodes, _) ->
+ do
+ let il' = Container.add (Instance.idx inst) inst il
+ return (info, showJSON $ map Node.name nodes, nl, il')
-- | Convert a node-evacuation/change group result.
formatNodeEvac :: Group.List
-> (Node.List, Instance.List, Cluster.EvacSolution)
-> Result IAllocResult
formatNodeEvac gl nl il (fin_nl, fin_il, es) =
- let iname = Instance.name . flip Container.find il
- nname = Node.name . flip Container.find nl
- gname = Group.name . flip Container.find gl
- fes = map (\(idx, msg) -> (iname idx, msg)) $ Cluster.esFailed es
- mes = map (\(idx, gdx, ndxs) -> (iname idx, gname gdx, map nname ndxs))
- $ Cluster.esMoved es
- failed = length fes
- moved = length mes
- info = show failed ++ " instances failed to move and " ++ show moved ++
- " were moved successfully"
- in Ok (info, showJSON (mes, fes, Cluster.esOpCodes es), fin_nl, fin_il)
+ let iname = Instance.name . flip Container.find il
+ nname = Node.name . flip Container.find nl
+ gname = Group.name . flip Container.find gl
+ fes = map (\(idx, msg) -> (iname idx, msg)) $ Cluster.esFailed es
+ mes = map (\(idx, gdx, ndxs) -> (iname idx, gname gdx, map nname ndxs))
+ $ Cluster.esMoved es
+ failed = length fes
+ moved = length mes
+ info = show failed ++ " instances failed to move and " ++ show moved ++
+ " were moved successfully"
+ in Ok (info, showJSON (mes, fes, Cluster.esOpCodes es), fin_nl, fin_il)
-- | Runs relocate for a single instance.
--
when (snode == pnode) $
fail "Internal error: selected primary as new secondary?!"
- nodes' <- if (nodes == [pnode, snode])
+ nodes' <- if nodes == [pnode, snode]
then return [snode] -- only the new secondary is needed
else fail $ "Internal error: inconsistent node list (" ++
show nodes ++ ") versus instance nodes (" ++ show pnode ++
formatRelocate :: (Node.List, Instance.List, [Ndx])
-> Result IAllocResult
formatRelocate (nl, il, ndxs) =
- let nodes = map (`Container.find` nl) ndxs
- names = map Node.name nodes
- in Ok ("success", showJSON names, nl, il)
+ let nodes = map (`Container.find` nl) ndxs
+ names = map Node.name nodes
+ in Ok ("success", showJSON names, nl, il)
-- | Process a request and return new node lists.
processRequest :: Request -> Result IAllocResult
let Request rqtype (ClusterData gl nl il _) = request
in case rqtype of
Allocate xi reqn ->
- Cluster.tryMGAlloc gl nl il xi reqn >>= formatAllocate il
+ Cluster.tryMGAlloc gl nl il xi reqn >>= formatAllocate il
Relocate idx reqn exnodes ->
- processRelocate gl nl il idx reqn exnodes >>= formatRelocate
+ processRelocate gl nl il idx reqn exnodes >>= formatRelocate
ChangeGroup gdxs idxs ->
- Cluster.tryChangeGroup gl nl il idxs gdxs >>=
- formatNodeEvac gl nl il
+ Cluster.tryChangeGroup gl nl il idxs gdxs >>=
+ formatNodeEvac gl nl il
NodeEvacuate xi mode ->
- Cluster.tryNodeEvac gl nl il mode xi >>=
- formatNodeEvac gl nl il
+ Cluster.tryNodeEvac gl nl il mode xi >>=
+ formatNodeEvac gl nl il
-- | Reads the request from the data file(s).
readRequest :: Options -> [String] -> IO Request
readRequest opts args = do
when (null args) $ do
- hPutStrLn stderr "Error: this program needs an input file."
- exitWith $ ExitFailure 1
+ hPutStrLn stderr "Error: this program needs an input file."
+ exitWith $ ExitFailure 1
input_data <- readFile (head args)
r1 <- case parseData input_data of
hPutStrLn stderr $ "Error: " ++ err
exitWith $ ExitFailure 1
Ok (fix_msgs, rq) -> maybeShowWarnings fix_msgs >> return rq
- (if isJust (optDataFile opts) || (not . null . optNodeSim) opts
- then do
- cdata <- loadExternalData opts
- let Request rqt _ = r1
- return $ Request rqt cdata
- else return r1)
+ if isJust (optDataFile opts) || (not . null . optNodeSim) opts
+ then do
+ cdata <- loadExternalData opts
+ let Request rqt _ = r1
+ return $ Request rqt cdata
+ else return r1
-- | Main iallocator pipeline.
runIAllocator :: Request -> (Maybe (Node.List, Instance.List), String)
runIAllocator request =
let (ok, info, result, cdata) =
- case processRequest request of
- Ok (msg, r, nl, il) -> (True, "Request successful: " ++ msg, r,
- Just (nl, il))
- Bad msg -> (False, "Request failed: " ++ msg, JSArray [], Nothing)
+ case processRequest request of
+ Ok (msg, r, nl, il) -> (True, "Request successful: " ++ msg, r,
+ Just (nl, il))
+ Bad msg -> (False, "Request failed: " ++ msg, JSArray [], Nothing)
rstring = formatResponse ok info result
in (cdata, rstring)
-}
module Ganeti.HTools.Instance
- ( Instance(..)
- , AssocList
- , List
- , create
- , setIdx
- , setName
- , setAlias
- , setPri
- , setSec
- , setBoth
- , setMovable
- , specOf
- , shrinkByType
- , runningStates
- , localStorageTemplates
- , hasSecondary
- , requiredNodes
- , allNodes
- , usesLocalStorage
- ) where
+ ( Instance(..)
+ , AssocList
+ , List
+ , create
+ , instanceRunning
+ , instanceOffline
+ , instanceDown
+ , applyIfOnline
+ , setIdx
+ , setName
+ , setAlias
+ , setPri
+ , setSec
+ , setBoth
+ , setMovable
+ , specOf
+ , shrinkByType
+ , localStorageTemplates
+ , hasSecondary
+ , requiredNodes
+ , allNodes
+ , usesLocalStorage
+ ) where
import qualified Ganeti.HTools.Types as T
import qualified Ganeti.HTools.Container as Container
import qualified Ganeti.Constants as C
+import Ganeti.HTools.Utils
+
-- * Type declarations
-- | The instance type.
data Instance = Instance
- { name :: String -- ^ The instance name
- , alias :: String -- ^ The shortened name
- , mem :: Int -- ^ Memory of the instance
- , dsk :: Int -- ^ Disk size of instance
- , vcpus :: Int -- ^ Number of VCPUs
- , running :: Bool -- ^ Is the instance running?
- , runSt :: String -- ^ Original (text) run status
- , pNode :: T.Ndx -- ^ Original primary node
- , sNode :: T.Ndx -- ^ Original secondary node
- , idx :: T.Idx -- ^ Internal index
- , util :: T.DynUtil -- ^ Dynamic resource usage
- , movable :: Bool -- ^ Can and should the instance be moved?
- , autoBalance :: Bool -- ^ Is the instance auto-balanced?
- , tags :: [String] -- ^ List of instance tags
- , diskTemplate :: T.DiskTemplate -- ^ The disk template of the instance
- } deriving (Show, Read)
+ { name :: String -- ^ The instance name
+ , alias :: String -- ^ The shortened name
+ , mem :: Int -- ^ Memory of the instance
+ , dsk :: Int -- ^ Disk size of instance
+ , vcpus :: Int -- ^ Number of VCPUs
+ , runSt :: T.InstanceStatus -- ^ Original run status
+ , pNode :: T.Ndx -- ^ Original primary node
+ , sNode :: T.Ndx -- ^ Original secondary node
+ , idx :: T.Idx -- ^ Internal index
+ , util :: T.DynUtil -- ^ Dynamic resource usage
+ , movable :: Bool -- ^ Can and should the instance be moved?
+ , autoBalance :: Bool -- ^ Is the instance auto-balanced?
+ , tags :: [String] -- ^ List of instance tags
+ , diskTemplate :: T.DiskTemplate -- ^ The disk template of the instance
+ } deriving (Show, Read)
instance T.Element Instance where
- nameOf = name
- idxOf = idx
- setAlias = setAlias
- setIdx = setIdx
- allNames n = [name n, alias n]
-
--- | Constant holding the running instance states.
-runningStates :: [String]
-runningStates = [C.inststRunning, C.inststErrorup]
+ nameOf = name
+ idxOf = idx
+ setAlias = setAlias
+ setIdx = setIdx
+ allNames n = [name n, alias n]
+
+-- | Check if instance is running.
+instanceRunning :: Instance -> Bool
+instanceRunning (Instance {runSt = T.Running}) = True
+instanceRunning (Instance {runSt = T.ErrorUp}) = True
+instanceRunning _ = False
+
+-- | Check if instance is offline.
+instanceOffline :: Instance -> Bool
+instanceOffline (Instance {runSt = T.AdminOffline}) = True
+instanceOffline _ = False
+
+-- | Check if instance is down.
+instanceDown :: Instance -> Bool
+instanceDown inst | instanceRunning inst = False
+instanceDown inst | instanceOffline inst = False
+instanceDown _ = True
+
+-- | Apply the function if the instance is online. Otherwise use
+-- the initial value
+applyIfOnline :: Instance -> (a -> a) -> a -> a
+applyIfOnline = applyIf . not . instanceOffline
-- | Constant holding the local storage templates.
--
--
-- Some parameters are not initialized by function, and must be set
-- later (via 'setIdx' for example).
-create :: String -> Int -> Int -> Int -> String
+create :: String -> Int -> Int -> Int -> T.InstanceStatus
-> [String] -> Bool -> T.Ndx -> T.Ndx -> T.DiskTemplate -> Instance
create name_init mem_init dsk_init vcpus_init run_init tags_init
auto_balance_init pn sn dt =
- Instance { name = name_init
- , alias = name_init
- , mem = mem_init
- , dsk = dsk_init
- , vcpus = vcpus_init
- , running = run_init `elem` runningStates
- , runSt = run_init
- , pNode = pn
- , sNode = sn
- , idx = -1
- , util = T.baseUtil
- , tags = tags_init
- , movable = supportsMoves dt
- , autoBalance = auto_balance_init
- , diskTemplate = dt
- }
+ Instance { name = name_init
+ , alias = name_init
+ , mem = mem_init
+ , dsk = dsk_init
+ , vcpus = vcpus_init
+ , runSt = run_init
+ , pNode = pn
+ , sNode = sn
+ , idx = -1
+ , util = T.baseUtil
+ , tags = tags_init
+ , movable = supportsMoves dt
+ , autoBalance = auto_balance_init
+ , diskTemplate = dt
+ }
-- | Changes the index.
--
-- | Return the spec of an instance.
specOf :: Instance -> T.RSpec
specOf Instance { mem = m, dsk = d, vcpus = c } =
- T.RSpec { T.rspecCpu = c, T.rspecMem = m, T.rspecDsk = d }
+ T.RSpec { T.rspecCpu = c, T.rspecMem = m, T.rspecDsk = d }
-- | Checks whether the instance uses a secondary node.
--
--- /dev/null
+{-| JSON utility functions. -}
+
+{-
+
+Copyright (C) 2009, 2010, 2011 Google Inc.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+-}
+
+module Ganeti.HTools.JSON
+ ( fromJResult
+ , readEitherString
+ , JSRecord
+ , loadJSArray
+ , fromObj
+ , maybeFromObj
+ , fromObjWithDefault
+ , fromKeyValue
+ , fromJVal
+ , asJSObject
+ , asObjectList
+ )
+ where
+
+import Control.Monad (liftM)
+import Data.Maybe (fromMaybe)
+import Text.Printf (printf)
+
+import qualified Text.JSON as J
+
+-- * JSON-related functions
+
+-- | A type alias for the list-based representation of J.JSObject.
+type JSRecord = [(String, J.JSValue)]
+
+-- | Converts a JSON Result into a monadic value.
+fromJResult :: Monad m => String -> J.Result a -> m a
+fromJResult s (J.Error x) = fail (s ++ ": " ++ x)
+fromJResult _ (J.Ok x) = return x
+
+-- | Tries to read a string from a JSON value.
+--
+-- In case the value was not a string, we fail the read (in the
+-- context of the current monad.
+readEitherString :: (Monad m) => J.JSValue -> m String
+readEitherString v =
+ case v of
+ J.JSString s -> return $ J.fromJSString s
+ _ -> fail "Wrong JSON type"
+
+-- | Converts a JSON message into an array of JSON objects.
+loadJSArray :: (Monad m)
+ => String -- ^ Operation description (for error reporting)
+ -> String -- ^ Input message
+ -> m [J.JSObject J.JSValue]
+loadJSArray s = fromJResult s . J.decodeStrict
+
+-- | Reads the value of a key in a JSON object.
+fromObj :: (J.JSON a, Monad m) => JSRecord -> String -> m a
+fromObj o k =
+ case lookup k o of
+ Nothing -> fail $ printf "key '%s' not found, object contains only %s"
+ k (show (map fst o))
+ Just val -> fromKeyValue k val
+
+-- | Reads the value of an optional key in a JSON object.
+maybeFromObj :: (J.JSON a, Monad m) =>
+ JSRecord -> String -> m (Maybe a)
+maybeFromObj o k =
+ case lookup k o of
+ Nothing -> return Nothing
+ Just val -> liftM Just (fromKeyValue k val)
+
+-- | Reads the value of a key in a JSON object with a default if missing.
+fromObjWithDefault :: (J.JSON a, Monad m) =>
+ JSRecord -> String -> a -> m a
+fromObjWithDefault o k d = liftM (fromMaybe d) $ maybeFromObj o k
+
+-- | Reads a JValue, that originated from an object key.
+fromKeyValue :: (J.JSON a, Monad m)
+ => String -- ^ The key name
+ -> J.JSValue -- ^ The value to read
+ -> m a
+fromKeyValue k val =
+ fromJResult (printf "key '%s'" k) (J.readJSON val)
+
+-- | Small wrapper over readJSON.
+fromJVal :: (Monad m, J.JSON a) => J.JSValue -> m a
+fromJVal v =
+ case J.readJSON v of
+ J.Error s -> fail ("Cannot convert value '" ++ show v ++
+ "', error: " ++ s)
+ J.Ok x -> return x
+
+-- | Converts a JSON value into a JSON object.
+asJSObject :: (Monad m) => J.JSValue -> m (J.JSObject J.JSValue)
+asJSObject (J.JSObject a) = return a
+asJSObject _ = fail "not an object"
+
+-- | Coneverts a list of JSON values into a list of JSON objects.
+asObjectList :: (Monad m) => [J.JSValue] -> m [J.JSObject J.JSValue]
+asObjectList = mapM asJSObject
-}
module Ganeti.HTools.Loader
- ( mergeData
- , checkData
- , assignIndices
- , lookupName
- , goodLookupResult
- , lookupNode
- , lookupInstance
- , lookupGroup
- , commonSuffix
- , RqType(..)
- , Request(..)
- , ClusterData(..)
- , emptyCluster
- , compareNameComponent
- , prefixMatch
- , LookupResult(..)
- , MatchPriority(..)
- ) where
+ ( mergeData
+ , checkData
+ , assignIndices
+ , lookupName
+ , goodLookupResult
+ , lookupNode
+ , lookupInstance
+ , lookupGroup
+ , commonSuffix
+ , RqType(..)
+ , Request(..)
+ , ClusterData(..)
+ , emptyCluster
+ , compareNameComponent
+ , prefixMatch
+ , LookupResult(..)
+ , MatchPriority(..)
+ ) where
import Data.List
import Data.Function
-}
data RqType
- = Allocate Instance.Instance Int -- ^ A new instance allocation
- | Relocate Idx Int [Ndx] -- ^ Choose a new secondary node
- | NodeEvacuate [Idx] EvacMode -- ^ node-evacuate mode
- | ChangeGroup [Gdx] [Idx] -- ^ Multi-relocate mode
+ = Allocate Instance.Instance Int -- ^ A new instance allocation
+ | Relocate Idx Int [Ndx] -- ^ Choose a new secondary node
+ | NodeEvacuate [Idx] EvacMode -- ^ node-evacuate mode
+ | ChangeGroup [Gdx] [Idx] -- ^ Multi-relocate mode
deriving (Show, Read)
-- | A complete request, as received from Ganeti.
data Request = Request RqType ClusterData
- deriving (Show, Read)
+ deriving (Show, Read)
-- | The cluster state.
data ClusterData = ClusterData
- { cdGroups :: Group.List -- ^ The node group list
- , cdNodes :: Node.List -- ^ The node list
- , cdInstances :: Instance.List -- ^ The instance list
- , cdTags :: [String] -- ^ The cluster tags
- } deriving (Show, Read)
+ { cdGroups :: Group.List -- ^ The node group list
+ , cdNodes :: Node.List -- ^ The node list
+ , cdInstances :: Instance.List -- ^ The instance list
+ , cdTags :: [String] -- ^ The cluster tags
+ } deriving (Show, Read)
-- | The priority of a match in a lookup result.
data MatchPriority = ExactMatch
-- | The result of a name lookup in a list.
data LookupResult = LookupResult
- { lrMatchPriority :: MatchPriority -- ^ The result type
- -- | Matching value (for ExactMatch, PartialMatch), Lookup string otherwise
- , lrContent :: String
- } deriving (Show, Read)
+ { lrMatchPriority :: MatchPriority -- ^ The result type
+ -- | Matching value (for ExactMatch, PartialMatch), Lookup string otherwise
+ , lrContent :: String
+ } deriving (Show, Read)
-- | Lookup results have an absolute preference ordering.
instance Eq LookupResult where
-- | Lookups a node into an assoc list.
lookupNode :: (Monad m) => NameAssoc -> String -> String -> m Ndx
lookupNode ktn inst node =
- case M.lookup node ktn of
- Nothing -> fail $ "Unknown node '" ++ node ++ "' for instance " ++ inst
- Just idx -> return idx
+ case M.lookup node ktn of
+ Nothing -> fail $ "Unknown node '" ++ node ++ "' for instance " ++ inst
+ Just idx -> return idx
-- | Lookups an instance into an assoc list.
lookupInstance :: (Monad m) => NameAssoc -> String -> m Idx
lookupInstance kti inst =
- case M.lookup inst kti of
- Nothing -> fail $ "Unknown instance '" ++ inst ++ "'"
- Just idx -> return idx
+ case M.lookup inst kti of
+ Nothing -> fail $ "Unknown instance '" ++ inst ++ "'"
+ Just idx -> return idx
-- | Lookups a group into an assoc list.
lookupGroup :: (Monad m) => NameAssoc -> String -> String -> m Gdx
lookupGroup ktg nname gname =
- case M.lookup gname ktg of
- Nothing -> fail $ "Unknown group '" ++ gname ++ "' for node " ++ nname
- Just idx -> return idx
+ case M.lookup gname ktg of
+ Nothing -> fail $ "Unknown group '" ++ gname ++ "' for node " ++ nname
+ Just idx -> return idx
-- | Check for prefix matches in names.
-- Implemented in Ganeti core utils.text.MatchNameComponent
prefixMatch :: String -- ^ Lookup
-> String -- ^ Full name
-> Bool -- ^ Whether there is a prefix match
-prefixMatch lkp = isPrefixOf (lkp ++ ".")
+prefixMatch = isPrefixOf . (++ ".")
-- | Is the lookup priority a "good" one?
goodMatchPriority :: MatchPriority -> Bool
select (min new old)
-- special cases:
-- short circuit if the new result is an exact match
- [ ((lrMatchPriority new) == ExactMatch, new)
+ [ (lrMatchPriority new == ExactMatch, new)
-- if both are partial matches generate a multiple match
, (partial2, LookupResult MultipleMatch lkp)
] where new = compareNameComponent cstr lkp
-> Instance.Instance
-> Node.List
fixNodes accu inst =
- let
- pdx = Instance.pNode inst
- sdx = Instance.sNode inst
- pold = Container.find pdx accu
- pnew = Node.setPri pold inst
- ac2 = Container.add pdx pnew accu
- in
- if sdx /= Node.noSecondary
- then let sold = Container.find sdx accu
- snew = Node.setSec sold inst
- in Container.add sdx snew ac2
- else ac2
+ let pdx = Instance.pNode inst
+ sdx = Instance.sNode inst
+ pold = Container.find pdx accu
+ pnew = Node.setPri pold inst
+ ac2 = Container.add pdx pnew accu
+ in if sdx /= Node.noSecondary
+ then let sold = Container.find sdx accu
+ snew = Node.setSec sold inst
+ in Container.add sdx snew ac2
+ else ac2
-- | Remove non-selected tags from the exclusion list.
filterExTags :: [String] -> Instance.Instance -> Instance.Instance
filterExTags tl inst =
- let old_tags = Instance.tags inst
- new_tags = filter (\tag -> any (`isPrefixOf` tag) tl)
- old_tags
- in inst { Instance.tags = new_tags }
+ let old_tags = Instance.tags inst
+ new_tags = filter (\tag -> any (`isPrefixOf` tag) tl) old_tags
+ in inst { Instance.tags = new_tags }
-- | Update the movable attribute.
updateMovable :: [String] -- ^ Selected instances (if not empty)
-> Instance.Instance -- ^ Target Instance
-> Instance.Instance -- ^ Target Instance with updated attribute
updateMovable selinsts exinsts inst =
- if Instance.sNode inst == Node.noSecondary ||
- Instance.name inst `elem` exinsts ||
- not (null selinsts || Instance.name inst `elem` selinsts)
+ if Instance.sNode inst == Node.noSecondary ||
+ Instance.name inst `elem` exinsts ||
+ not (null selinsts || Instance.name inst `elem` selinsts)
then Instance.setMovable inst False
else inst
longestDomain :: [String] -> String
longestDomain [] = ""
longestDomain (x:xs) =
- foldr (\ suffix accu -> if all (isSuffixOf suffix) xs
- then suffix
- else accu)
- "" $ filter (isPrefixOf ".") (tails x)
+ foldr (\ suffix accu -> if all (isSuffixOf suffix) xs
+ then suffix
+ else accu)
+ "" $ filter (isPrefixOf ".") (tails x)
-- | Extracts the exclusion tags from the cluster configuration.
extractExTags :: [String] -> [String]
extractExTags =
- map (drop (length exTagsPrefix)) .
- filter (isPrefixOf exTagsPrefix)
+ map (drop (length exTagsPrefix)) .
+ filter (isPrefixOf exTagsPrefix)
-- | Extracts the common suffix from node\/instance names.
commonSuffix :: Node.List -> Instance.List -> String
commonSuffix nl il =
- let node_names = map Node.name $ Container.elems nl
- inst_names = map Instance.name $ Container.elems il
- in longestDomain (node_names ++ inst_names)
+ let node_names = map Node.name $ Container.elems nl
+ inst_names = map Instance.name $ Container.elems il
+ in longestDomain (node_names ++ inst_names)
-- | Initializer function that loads the data from a node and instance
-- list and massages it into the correct format.
(\ msgs node ->
let nname = Node.name node
nilst = map (`Container.find` il) (Node.pList node)
- dilst = filter (not . Instance.running) nilst
+ dilst = filter Instance.instanceDown nilst
adj_mem = sum . map Instance.mem $ dilst
delta_mem = truncate (Node.tMem node)
- Node.nMem node
(Node.fMem node - adj_mem)
umsg1 =
if delta_mem > 512 || delta_dsk > 1024
- then (printf "node %s is missing %d MB ram \
- \and %d GB disk"
- nname delta_mem (delta_dsk `div` 1024)):
- msgs
+ then printf "node %s is missing %d MB ram \
+ \and %d GB disk"
+ nname delta_mem (delta_dsk `div` 1024):msgs
else msgs
in (umsg1, newn)
) [] nl
-- | Compute the amount of memory used by primary instances on a node.
nodeImem :: Node.Node -> Instance.List -> Int
nodeImem node il =
- let rfind = flip Container.find il
- in sum . map (Instance.mem . rfind)
- $ Node.pList node
+ let rfind = flip Container.find il
+ il' = map rfind $ Node.pList node
+ oil' = filter (not . Instance.instanceOffline) il'
+ in sum . map Instance.mem $ oil'
+
-- | Compute the amount of disk used by instances on a node (either primary
-- or secondary).
nodeIdsk :: Node.Node -> Instance.List -> Int
nodeIdsk node il =
- let rfind = flip Container.find il
- in sum . map (Instance.dsk . rfind)
- $ Node.pList node ++ Node.sList node
+ let rfind = flip Container.find il
+ in sum . map (Instance.dsk . rfind)
+ $ Node.pList node ++ Node.sList node
-}
module Ganeti.HTools.Luxi
- (
- loadData
- , parseData
- ) where
+ ( loadData
+ , parseData
+ ) where
import qualified Control.Exception as E
import Text.JSON.Types
import qualified Ganeti.HTools.Group as Group
import qualified Ganeti.HTools.Node as Node
import qualified Ganeti.HTools.Instance as Instance
-import Ganeti.HTools.Utils (fromJVal, annotateResult, tryFromObj, asJSObject)
+import Ganeti.HTools.Utils (fromJVal, annotateResult, tryFromObj, asJSObject,
+ fromObj)
+
+{-# ANN module "HLint: ignore Eta reduce" #-}
-- * Utility functions
--- | Ensure a given JSValue is actually a JSArray.
-toArray :: (Monad m) => JSValue -> m [JSValue]
-toArray v =
- case v of
- JSArray arr -> return arr
- o -> fail ("Invalid input, expected array but got " ++ show o)
+-- | Get values behind \"data\" part of the result.
+getData :: (Monad m) => JSValue -> m JSValue
+getData (JSObject o) = fromObj (fromJSObject o) "data"
+getData x = fail $ "Invalid input, expected dict entry but got " ++ show x
+
+-- | Converts a (status, value) into m value, if possible.
+parseQueryField :: (Monad m) => JSValue -> m (JSValue, JSValue)
+parseQueryField (JSArray [status, result]) = return (status, result)
+parseQueryField o =
+ fail $ "Invalid query field, expected (status, value) but got " ++ show o
+
+-- | Parse a result row.
+parseQueryRow :: (Monad m) => JSValue -> m [(JSValue, JSValue)]
+parseQueryRow (JSArray arr) = mapM parseQueryField arr
+parseQueryRow o =
+ fail $ "Invalid query row result, expected array but got " ++ show o
+
+-- | Parse an overall query result and get the [(status, value)] list
+-- for each element queried.
+parseQueryResult :: (Monad m) => JSValue -> m [[(JSValue, JSValue)]]
+parseQueryResult (JSArray arr) = mapM parseQueryRow arr
+parseQueryResult o =
+ fail $ "Invalid query result, expected array but got " ++ show o
+
+-- | Prepare resulting output as parsers expect it.
+extractArray :: (Monad m) => JSValue -> m [[(JSValue, JSValue)]]
+extractArray v =
+ getData v >>= parseQueryResult
+
+-- | Testing result status for more verbose error message.
+fromJValWithStatus :: (Text.JSON.JSON a, Monad m) => (JSValue, JSValue) -> m a
+fromJValWithStatus (st, v) = do
+ st' <- fromJVal st
+ L.checkRS st' v >>= fromJVal
-- | Annotate errors when converting values with owner/attribute for
-- better debugging.
genericConvert :: (Text.JSON.JSON a) =>
- String -- ^ The object type
- -> String -- ^ The object name
- -> String -- ^ The attribute we're trying to convert
- -> JSValue -- ^ The value we try to convert
- -> Result a -- ^ The annotated result
+ String -- ^ The object type
+ -> String -- ^ The object name
+ -> String -- ^ The attribute we're trying to convert
+ -> (JSValue, JSValue) -- ^ The value we're trying to convert
+ -> Result a -- ^ The annotated result
genericConvert otype oname oattr =
- annotateResult (otype ++ " '" ++ oname ++
- "', error while reading attribute '" ++
- oattr ++ "'") . fromJVal
+ annotateResult (otype ++ " '" ++ oname ++
+ "', error while reading attribute '" ++
+ oattr ++ "'") . fromJValWithStatus
-- * Data querying functionality
-- | The input data for node query.
queryNodesMsg :: L.LuxiOp
queryNodesMsg =
- L.QueryNodes [] ["name", "mtotal", "mnode", "mfree", "dtotal", "dfree",
- "ctotal", "offline", "drained", "vm_capable",
- "group.uuid"] False
+ L.Query L.QRNode ["name", "mtotal", "mnode", "mfree", "dtotal", "dfree",
+ "ctotal", "offline", "drained", "vm_capable",
+ "group.uuid"] ()
-- | The input data for instance query.
queryInstancesMsg :: L.LuxiOp
queryInstancesMsg =
- L.QueryInstances [] ["name", "disk_usage", "be/memory", "be/vcpus",
- "status", "pnode", "snodes", "tags", "oper_ram",
- "be/auto_balance", "disk_template"] False
+ L.Query L.QRInstance ["name", "disk_usage", "be/memory", "be/vcpus",
+ "status", "pnode", "snodes", "tags", "oper_ram",
+ "be/auto_balance", "disk_template"] ()
-- | The input data for cluster query.
queryClusterInfoMsg :: L.LuxiOp
-- | The input data for node group query.
queryGroupsMsg :: L.LuxiOp
queryGroupsMsg =
- L.QueryGroups [] ["uuid", "name", "alloc_policy"] False
+ L.Query L.QRGroup ["uuid", "name", "alloc_policy"] ()
-- | Wraper over 'callMethod' doing node query.
queryNodes :: L.Client -> IO (Result JSValue)
getInstances :: NameAssoc
-> JSValue
-> Result [(String, Instance.Instance)]
-getInstances ktn arr = toArray arr >>= mapM (parseInstance ktn)
+getInstances ktn arr = extractArray arr >>= mapM (parseInstance ktn)
-- | Construct an instance from a JSON object.
parseInstance :: NameAssoc
- -> JSValue
+ -> [(JSValue, JSValue)]
-> Result (String, Instance.Instance)
-parseInstance ktn (JSArray [ name, disk, mem, vcpus
- , status, pnode, snodes, tags, oram
- , auto_balance, disk_template ]) = do
- xname <- annotateResult "Parsing new instance" (fromJVal name)
+parseInstance ktn [ name, disk, mem, vcpus
+ , status, pnode, snodes, tags, oram
+ , auto_balance, disk_template ] = do
+ xname <- annotateResult "Parsing new instance" (fromJValWithStatus name)
let convert a = genericConvert "Instance" xname a
xdisk <- convert "disk_usage" disk
- xmem <- (case oram of
- JSRational _ _ -> convert "oper_ram" oram
- _ -> convert "be/memory" mem)
+ xmem <- case oram of -- FIXME: remove the "guessing"
+ (_, JSRational _ _) -> convert "oper_ram" oram
+ _ -> convert "be/memory" mem
xvcpus <- convert "be/vcpus" vcpus
xpnode <- convert "pnode" pnode >>= lookupNode ktn xname
xsnodes <- convert "snodes" snodes::Result [JSString]
- snode <- (if null xsnodes then return Node.noSecondary
- else lookupNode ktn xname (fromJSString $ head xsnodes))
+ snode <- if null xsnodes
+ then return Node.noSecondary
+ else lookupNode ktn xname (fromJSString $ head xsnodes)
xrunning <- convert "status" status
xtags <- convert "tags" tags
xauto_balance <- convert "auto_balance" auto_balance
-- | Parse a node list in JSON format.
getNodes :: NameAssoc -> JSValue -> Result [(String, Node.Node)]
-getNodes ktg arr = toArray arr >>= mapM (parseNode ktg)
+getNodes ktg arr = extractArray arr >>= mapM (parseNode ktg)
-- | Construct a node from a JSON object.
-parseNode :: NameAssoc -> JSValue -> Result (String, Node.Node)
-parseNode ktg (JSArray [ name, mtotal, mnode, mfree, dtotal, dfree
- , ctotal, offline, drained, vm_capable, g_uuid ])
+parseNode :: NameAssoc -> [(JSValue, JSValue)] -> Result (String, Node.Node)
+parseNode ktg [ name, mtotal, mnode, mfree, dtotal, dfree
+ , ctotal, offline, drained, vm_capable, g_uuid ]
= do
- xname <- annotateResult "Parsing new node" (fromJVal name)
+ xname <- annotateResult "Parsing new node" (fromJValWithStatus name)
let convert a = genericConvert "Node" xname a
xoffline <- convert "offline" offline
xdrained <- convert "drained" drained
xvm_capable <- convert "vm_capable" vm_capable
xgdx <- convert "group.uuid" g_uuid >>= lookupGroup ktg xname
- node <- (if xoffline || xdrained || not xvm_capable
- then return $ Node.create xname 0 0 0 0 0 0 True xgdx
- else do
- xmtotal <- convert "mtotal" mtotal
- xmnode <- convert "mnode" mnode
- xmfree <- convert "mfree" mfree
- xdtotal <- convert "dtotal" dtotal
- xdfree <- convert "dfree" dfree
- xctotal <- convert "ctotal" ctotal
- return $ Node.create xname xmtotal xmnode xmfree
- xdtotal xdfree xctotal False xgdx)
+ node <- if xoffline || xdrained || not xvm_capable
+ then return $ Node.create xname 0 0 0 0 0 0 True xgdx
+ else do
+ xmtotal <- convert "mtotal" mtotal
+ xmnode <- convert "mnode" mnode
+ xmfree <- convert "mfree" mfree
+ xdtotal <- convert "dtotal" dtotal
+ xdfree <- convert "dfree" dfree
+ xctotal <- convert "ctotal" ctotal
+ return $ Node.create xname xmtotal xmnode xmfree
+ xdtotal xdfree xctotal False xgdx
return (xname, node)
parseNode _ v = fail ("Invalid node query result: " ++ show v)
-- | Parses the cluster groups.
getGroups :: JSValue -> Result [(String, Group.Group)]
-getGroups arr = toArray arr >>= mapM parseGroup
+getGroups jsv = extractArray jsv >>= mapM parseGroup
-- | Parses a given group information.
-parseGroup :: JSValue -> Result (String, Group.Group)
-parseGroup (JSArray [ uuid, name, apol ]) = do
- xname <- annotateResult "Parsing new group" (fromJVal name)
+parseGroup :: [(JSValue, JSValue)] -> Result (String, Group.Group)
+parseGroup [uuid, name, apol] = do
+ xname <- annotateResult "Parsing new group" (fromJValWithStatus name)
let convert a = genericConvert "Group" xname a
xuuid <- convert "uuid" uuid
xapol <- convert "alloc_policy" apol
-}
module Ganeti.HTools.Node
- ( Node(..)
- , List
- -- * Constructor
- , create
- -- ** Finalization after data loading
- , buildPeers
- , setIdx
- , setAlias
- , setOffline
- , setXmem
- , setFmem
- , setPri
- , setSec
- , setMdsk
- , setMcpu
- -- * Tag maps
- , addTags
- , delTags
- , rejectAddTags
- -- * Instance (re)location
- , removePri
- , removeSec
- , addPri
- , addPriEx
- , addSec
- , addSecEx
- -- * Stats
- , availDisk
- , availMem
- , availCpu
- , iMem
- , iDsk
- , conflictingPrimaries
- -- * Formatting
- , defaultFields
- , showHeader
- , showField
- , list
- -- * Misc stuff
- , AssocList
- , AllocElement
- , noSecondary
- , computeGroups
- ) where
+ ( Node(..)
+ , List
+ -- * Constructor
+ , create
+ -- ** Finalization after data loading
+ , buildPeers
+ , setIdx
+ , setAlias
+ , setOffline
+ , setXmem
+ , setFmem
+ , setPri
+ , setSec
+ , setMdsk
+ , setMcpu
+ -- * Tag maps
+ , addTags
+ , delTags
+ , rejectAddTags
+ -- * Instance (re)location
+ , removePri
+ , removeSec
+ , addPri
+ , addPriEx
+ , addSec
+ , addSecEx
+ -- * Stats
+ , availDisk
+ , availMem
+ , availCpu
+ , iMem
+ , iDsk
+ , conflictingPrimaries
+ -- * Formatting
+ , defaultFields
+ , showHeader
+ , showField
+ , list
+ -- * Misc stuff
+ , AssocList
+ , AllocElement
+ , noSecondary
+ , computeGroups
+ ) where
import Data.List hiding (group)
import qualified Data.Map as Map
-- | The node type.
data Node = Node
- { name :: String -- ^ The node name
- , alias :: String -- ^ The shortened name (for display purposes)
- , tMem :: Double -- ^ Total memory (MiB)
- , nMem :: Int -- ^ Node memory (MiB)
- , fMem :: Int -- ^ Free memory (MiB)
- , xMem :: Int -- ^ Unaccounted memory (MiB)
- , tDsk :: Double -- ^ Total disk space (MiB)
- , fDsk :: Int -- ^ Free disk space (MiB)
- , tCpu :: Double -- ^ Total CPU count
- , uCpu :: Int -- ^ Used VCPU count
- , pList :: [T.Idx] -- ^ List of primary instance indices
- , sList :: [T.Idx] -- ^ List of secondary instance indices
- , idx :: T.Ndx -- ^ Internal index for book-keeping
- , peers :: P.PeerMap -- ^ Pnode to instance mapping
- , failN1 :: Bool -- ^ Whether the node has failed n1
- , rMem :: Int -- ^ Maximum memory needed for failover by
- -- primaries of this node
- , pMem :: Double -- ^ Percent of free memory
- , pDsk :: Double -- ^ Percent of free disk
- , pRem :: Double -- ^ Percent of reserved memory
- , pCpu :: Double -- ^ Ratio of virtual to physical CPUs
- , mDsk :: Double -- ^ Minimum free disk ratio
- , mCpu :: Double -- ^ Max ratio of virt-to-phys CPUs
- , loDsk :: Int -- ^ Autocomputed from mDsk low disk
- -- threshold
- , hiCpu :: Int -- ^ Autocomputed from mCpu high cpu
- -- threshold
- , offline :: Bool -- ^ Whether the node should not be used
- -- for allocations and skipped from score
- -- computations
- , utilPool :: T.DynUtil -- ^ Total utilisation capacity
- , utilLoad :: T.DynUtil -- ^ Sum of instance utilisation
- , pTags :: TagMap -- ^ Map of primary instance tags and their count
- , group :: T.Gdx -- ^ The node's group (index)
- } deriving (Show, Read, Eq)
+ { name :: String -- ^ The node name
+ , alias :: String -- ^ The shortened name (for display purposes)
+ , tMem :: Double -- ^ Total memory (MiB)
+ , nMem :: Int -- ^ Node memory (MiB)
+ , fMem :: Int -- ^ Free memory (MiB)
+ , xMem :: Int -- ^ Unaccounted memory (MiB)
+ , tDsk :: Double -- ^ Total disk space (MiB)
+ , fDsk :: Int -- ^ Free disk space (MiB)
+ , tCpu :: Double -- ^ Total CPU count
+ , uCpu :: Int -- ^ Used VCPU count
+ , pList :: [T.Idx] -- ^ List of primary instance indices
+ , sList :: [T.Idx] -- ^ List of secondary instance indices
+ , idx :: T.Ndx -- ^ Internal index for book-keeping
+ , peers :: P.PeerMap -- ^ Pnode to instance mapping
+ , failN1 :: Bool -- ^ Whether the node has failed n1
+ , rMem :: Int -- ^ Maximum memory needed for failover by
+ -- primaries of this node
+ , pMem :: Double -- ^ Percent of free memory
+ , pDsk :: Double -- ^ Percent of free disk
+ , pRem :: Double -- ^ Percent of reserved memory
+ , pCpu :: Double -- ^ Ratio of virtual to physical CPUs
+ , mDsk :: Double -- ^ Minimum free disk ratio
+ , mCpu :: Double -- ^ Max ratio of virt-to-phys CPUs
+ , loDsk :: Int -- ^ Autocomputed from mDsk low disk
+ -- threshold
+ , hiCpu :: Int -- ^ Autocomputed from mCpu high cpu
+ -- threshold
+ , offline :: Bool -- ^ Whether the node should not be used for
+ -- allocations and skipped from score
+ -- computations
+ , utilPool :: T.DynUtil -- ^ Total utilisation capacity
+ , utilLoad :: T.DynUtil -- ^ Sum of instance utilisation
+ , pTags :: TagMap -- ^ Map of primary instance tags and their count
+ , group :: T.Gdx -- ^ The node's group (index)
+ } deriving (Show, Read, Eq)
instance T.Element Node where
- nameOf = name
- idxOf = idx
- setAlias = setAlias
- setIdx = setIdx
- allNames n = [name n, alias n]
+ nameOf = name
+ idxOf = idx
+ setAlias = setAlias
+ setIdx = setIdx
+ allNames n = [name n, alias n]
-- | A simple name for the int, node association list.
type AssocList = [(T.Ndx, Node)]
-- | Adjust or delete a tag from a tagmap.
delTag :: TagMap -> String -> TagMap
delTag t s = Map.update (\v -> if v > 1
- then Just (v-1)
- else Nothing)
+ then Just (v-1)
+ else Nothing)
s t
-- | Remove multiple tags.
-> Int -> Double -> Bool -> T.Gdx -> Node
create name_init mem_t_init mem_n_init mem_f_init
dsk_t_init dsk_f_init cpu_t_init offline_init group_init =
- Node { name = name_init
- , alias = name_init
- , tMem = mem_t_init
- , nMem = mem_n_init
- , fMem = mem_f_init
- , tDsk = dsk_t_init
- , fDsk = dsk_f_init
- , tCpu = cpu_t_init
- , uCpu = 0
- , pList = []
- , sList = []
- , failN1 = True
- , idx = -1
- , peers = P.empty
- , rMem = 0
- , pMem = fromIntegral mem_f_init / mem_t_init
- , pDsk = fromIntegral dsk_f_init / dsk_t_init
- , pRem = 0
- , pCpu = 0
- , offline = offline_init
- , xMem = 0
- , mDsk = T.defReservedDiskRatio
- , mCpu = T.defVcpuRatio
- , loDsk = mDskToloDsk T.defReservedDiskRatio dsk_t_init
- , hiCpu = mCpuTohiCpu T.defVcpuRatio cpu_t_init
- , utilPool = T.baseUtil
- , utilLoad = T.zeroUtil
- , pTags = Map.empty
- , group = group_init
- }
+ Node { name = name_init
+ , alias = name_init
+ , tMem = mem_t_init
+ , nMem = mem_n_init
+ , fMem = mem_f_init
+ , tDsk = dsk_t_init
+ , fDsk = dsk_f_init
+ , tCpu = cpu_t_init
+ , uCpu = 0
+ , pList = []
+ , sList = []
+ , failN1 = True
+ , idx = -1
+ , peers = P.empty
+ , rMem = 0
+ , pMem = fromIntegral mem_f_init / mem_t_init
+ , pDsk = fromIntegral dsk_f_init / dsk_t_init
+ , pRem = 0
+ , pCpu = 0
+ , offline = offline_init
+ , xMem = 0
+ , mDsk = T.defReservedDiskRatio
+ , mCpu = T.defVcpuRatio
+ , loDsk = mDskToloDsk T.defReservedDiskRatio dsk_t_init
+ , hiCpu = mCpuTohiCpu T.defVcpuRatio cpu_t_init
+ , utilPool = T.baseUtil
+ , utilLoad = T.zeroUtil
+ , pTags = Map.empty
+ , group = group_init
+ }
-- | Conversion formula from mDsk\/tDsk to loDsk.
mDskToloDsk :: Double -> Double -> Int
-mDskToloDsk mval tdsk = floor (mval * tdsk)
+mDskToloDsk mval = floor . (mval *)
-- | Conversion formula from mCpu\/tCpu to hiCpu.
mCpuTohiCpu :: Double -> Double -> Int
-mCpuTohiCpu mval tcpu = floor (mval * tcpu)
+mCpuTohiCpu mval = floor . (mval *)
-- | Changes the index.
--
-- | Builds the peer map for a given node.
buildPeers :: Node -> Instance.List -> Node
buildPeers t il =
- let mdata = map
- (\i_idx -> let inst = Container.find i_idx il
- mem = if Instance.autoBalance inst
+ let mdata = map
+ (\i_idx -> let inst = Container.find i_idx il
+ mem = if Instance.autoBalance inst
then Instance.mem inst
else 0
- in (Instance.pNode inst, mem))
- (sList t)
- pmap = P.accumArray (+) mdata
- new_rmem = computeMaxRes pmap
- new_failN1 = fMem t <= new_rmem
- new_prem = fromIntegral new_rmem / tMem t
- in t {peers=pmap, failN1 = new_failN1, rMem = new_rmem, pRem = new_prem}
+ in (Instance.pNode inst, mem))
+ (sList t)
+ pmap = P.accumArray (+) mdata
+ new_rmem = computeMaxRes pmap
+ new_failN1 = fMem t <= new_rmem
+ new_prem = fromIntegral new_rmem / tMem t
+ in t {peers=pmap, failN1 = new_failN1, rMem = new_rmem, pRem = new_prem}
-- | Assigns an instance to a node as primary and update the used VCPU
-- count, utilisation data and tags map.
, utilLoad = utilLoad t `T.addUtil` Instance.util inst
, pTags = addTags (pTags t) (Instance.tags inst)
}
- where new_count = uCpu t + Instance.vcpus inst
+ where new_count = uCpu t + Instance.vcpus inst
-- | Assigns an instance to a node as secondary without other updates.
setSec :: Node -> Instance.Instance -> Node
, utilLoad = old_load { T.dskWeight = T.dskWeight old_load +
T.dskWeight (Instance.util inst) }
}
- where old_load = utilLoad t
+ where old_load = utilLoad t
-- * Update functions
-- | Sets the free memory.
setFmem :: Node -> Int -> Node
setFmem t new_mem =
- let new_n1 = new_mem <= rMem t
- new_mp = fromIntegral new_mem / tMem t
- in t { fMem = new_mem, failN1 = new_n1, pMem = new_mp }
+ let new_n1 = new_mem <= rMem t
+ new_mp = fromIntegral new_mem / tMem t
+ in t { fMem = new_mem, failN1 = new_n1, pMem = new_mp }
-- | Removes a primary instance.
removePri :: Node -> Instance.Instance -> Node
removePri t inst =
- let iname = Instance.idx inst
- new_plist = delete iname (pList t)
- new_mem = fMem t + Instance.mem inst
- new_dsk = fDsk t + Instance.dsk inst
- new_mp = fromIntegral new_mem / tMem t
- new_dp = fromIntegral new_dsk / tDsk t
- new_failn1 = new_mem <= rMem t
- new_ucpu = uCpu t - Instance.vcpus inst
- new_rcpu = fromIntegral new_ucpu / tCpu t
- new_load = utilLoad t `T.subUtil` Instance.util inst
- in t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
- , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
- , uCpu = new_ucpu, pCpu = new_rcpu, utilLoad = new_load
- , pTags = delTags (pTags t) (Instance.tags inst) }
+ let iname = Instance.idx inst
+ new_plist = delete iname (pList t)
+ new_mem = Instance.applyIfOnline inst (+ Instance.mem inst) (fMem t)
+ new_dsk = fDsk t + Instance.dsk inst
+ new_mp = fromIntegral new_mem / tMem t
+ new_dp = fromIntegral new_dsk / tDsk t
+ new_failn1 = new_mem <= rMem t
+ new_ucpu = Instance.applyIfOnline inst
+ (\x -> x - Instance.vcpus inst) (uCpu t)
+ new_rcpu = fromIntegral new_ucpu / tCpu t
+ new_load = utilLoad t `T.subUtil` Instance.util inst
+ in t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
+ , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
+ , uCpu = new_ucpu, pCpu = new_rcpu, utilLoad = new_load
+ , pTags = delTags (pTags t) (Instance.tags inst) }
-- | Removes a secondary instance.
removeSec :: Node -> Instance.Instance -> Node
removeSec t inst =
- let iname = Instance.idx inst
- uses_disk = Instance.usesLocalStorage inst
- cur_dsk = fDsk t
- pnode = Instance.pNode inst
- new_slist = delete iname (sList t)
- new_dsk = if uses_disk
+ let iname = Instance.idx inst
+ cur_dsk = fDsk t
+ pnode = Instance.pNode inst
+ new_slist = delete iname (sList t)
+ new_dsk = if Instance.usesLocalStorage inst
then cur_dsk + Instance.dsk inst
else cur_dsk
- old_peers = peers t
- old_peem = P.find pnode old_peers
- new_peem = if Instance.autoBalance inst
+ old_peers = peers t
+ old_peem = P.find pnode old_peers
+ new_peem = if Instance.autoBalance inst
then old_peem - Instance.mem inst
else old_peem
- new_peers = if new_peem > 0
+ new_peers = if new_peem > 0
then P.add pnode new_peem old_peers
else P.remove pnode old_peers
- old_rmem = rMem t
- new_rmem = if old_peem < old_rmem
+ old_rmem = rMem t
+ new_rmem = if old_peem < old_rmem
then old_rmem
else computeMaxRes new_peers
- new_prem = fromIntegral new_rmem / tMem t
- new_failn1 = fMem t <= new_rmem
- new_dp = fromIntegral new_dsk / tDsk t
- old_load = utilLoad t
- new_load = old_load { T.dskWeight = T.dskWeight old_load -
- T.dskWeight (Instance.util inst) }
- in t { sList = new_slist, fDsk = new_dsk, peers = new_peers
- , failN1 = new_failn1, rMem = new_rmem, pDsk = new_dp
- , pRem = new_prem, utilLoad = new_load }
+ new_prem = fromIntegral new_rmem / tMem t
+ new_failn1 = fMem t <= new_rmem
+ new_dp = fromIntegral new_dsk / tDsk t
+ old_load = utilLoad t
+ new_load = old_load { T.dskWeight = T.dskWeight old_load -
+ T.dskWeight (Instance.util inst) }
+ in t { sList = new_slist, fDsk = new_dsk, peers = new_peers
+ , failN1 = new_failn1, rMem = new_rmem, pDsk = new_dp
+ , pRem = new_prem, utilLoad = new_load }
-- | Adds a primary instance (basic version).
addPri :: Node -> Instance.Instance -> T.OpResult Node
-- either the new version of the node
-- or a failure mode
addPriEx force t inst =
- let iname = Instance.idx inst
- uses_disk = Instance.usesLocalStorage inst
- cur_dsk = fDsk t
- new_mem = fMem t - Instance.mem inst
- new_dsk = if uses_disk
+ let iname = Instance.idx inst
+ uses_disk = Instance.usesLocalStorage inst
+ cur_dsk = fDsk t
+ new_mem = Instance.applyIfOnline inst
+ (\x -> x - Instance.mem inst) (fMem t)
+ new_dsk = if uses_disk
then cur_dsk - Instance.dsk inst
else cur_dsk
- new_failn1 = new_mem <= rMem t
- new_ucpu = uCpu t + Instance.vcpus inst
- new_pcpu = fromIntegral new_ucpu / tCpu t
- new_dp = fromIntegral new_dsk / tDsk t
- l_cpu = mCpu t
- new_load = utilLoad t `T.addUtil` Instance.util inst
- inst_tags = Instance.tags inst
- old_tags = pTags t
- strict = not force
- in case () of
- _ | new_mem <= 0 -> T.OpFail T.FailMem
- | uses_disk && new_dsk <= 0 -> T.OpFail T.FailDisk
- | uses_disk && mDsk t > new_dp && strict -> T.OpFail T.FailDisk
- | new_failn1 && not (failN1 t) && strict -> T.OpFail T.FailMem
- | l_cpu >= 0 && l_cpu < new_pcpu && strict -> T.OpFail T.FailCPU
- | rejectAddTags old_tags inst_tags -> T.OpFail T.FailTags
- | otherwise ->
- let new_plist = iname:pList t
- new_mp = fromIntegral new_mem / tMem t
- r = t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
- , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
- , uCpu = new_ucpu, pCpu = new_pcpu
- , utilLoad = new_load
- , pTags = addTags old_tags inst_tags }
- in T.OpGood r
+ new_failn1 = new_mem <= rMem t
+ new_ucpu = Instance.applyIfOnline inst (+ Instance.vcpus inst) (uCpu t)
+ new_pcpu = fromIntegral new_ucpu / tCpu t
+ new_dp = fromIntegral new_dsk / tDsk t
+ l_cpu = mCpu t
+ new_load = utilLoad t `T.addUtil` Instance.util inst
+ inst_tags = Instance.tags inst
+ old_tags = pTags t
+ strict = not force
+ in case () of
+ _ | new_mem <= 0 -> T.OpFail T.FailMem
+ | uses_disk && new_dsk <= 0 -> T.OpFail T.FailDisk
+ | uses_disk && mDsk t > new_dp && strict -> T.OpFail T.FailDisk
+ | new_failn1 && not (failN1 t) && strict -> T.OpFail T.FailMem
+ | l_cpu >= 0 && l_cpu < new_pcpu && strict -> T.OpFail T.FailCPU
+ | rejectAddTags old_tags inst_tags -> T.OpFail T.FailTags
+ | otherwise ->
+ let new_plist = iname:pList t
+ new_mp = fromIntegral new_mem / tMem t
+ r = t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
+ , failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
+ , uCpu = new_ucpu, pCpu = new_pcpu
+ , utilLoad = new_load
+ , pTags = addTags old_tags inst_tags }
+ in T.OpGood r
-- | Adds a secondary instance (basic version).
addSec :: Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
-- | Adds a secondary instance (extended version).
addSecEx :: Bool -> Node -> Instance.Instance -> T.Ndx -> T.OpResult Node
addSecEx force t inst pdx =
- let iname = Instance.idx inst
- old_peers = peers t
- old_mem = fMem t
- new_dsk = fDsk t - Instance.dsk inst
- secondary_needed_mem = if Instance.autoBalance inst
+ let iname = Instance.idx inst
+ old_peers = peers t
+ old_mem = fMem t
+ new_dsk = fDsk t - Instance.dsk inst
+ secondary_needed_mem = if Instance.autoBalance inst &&
+ not (Instance.instanceOffline inst)
then Instance.mem inst
else 0
- new_peem = P.find pdx old_peers + secondary_needed_mem
- new_peers = P.add pdx new_peem old_peers
- new_rmem = max (rMem t) new_peem
- new_prem = fromIntegral new_rmem / tMem t
- new_failn1 = old_mem <= new_rmem
- new_dp = fromIntegral new_dsk / tDsk t
- old_load = utilLoad t
- new_load = old_load { T.dskWeight = T.dskWeight old_load +
- T.dskWeight (Instance.util inst) }
- strict = not force
- in case () of
- _ | not (Instance.hasSecondary inst) -> T.OpFail T.FailDisk
- | new_dsk <= 0 -> T.OpFail T.FailDisk
- | mDsk t > new_dp && strict -> T.OpFail T.FailDisk
- | secondary_needed_mem >= old_mem && strict -> T.OpFail T.FailMem
- | new_failn1 && not (failN1 t) && strict -> T.OpFail T.FailMem
- | otherwise ->
- let new_slist = iname:sList t
- r = t { sList = new_slist, fDsk = new_dsk
- , peers = new_peers, failN1 = new_failn1
- , rMem = new_rmem, pDsk = new_dp
- , pRem = new_prem, utilLoad = new_load }
- in T.OpGood r
+ new_peem = P.find pdx old_peers + secondary_needed_mem
+ new_peers = P.add pdx new_peem old_peers
+ new_rmem = max (rMem t) new_peem
+ new_prem = fromIntegral new_rmem / tMem t
+ new_failn1 = old_mem <= new_rmem
+ new_dp = fromIntegral new_dsk / tDsk t
+ old_load = utilLoad t
+ new_load = old_load { T.dskWeight = T.dskWeight old_load +
+ T.dskWeight (Instance.util inst) }
+ strict = not force
+ in case () of
+ _ | not (Instance.hasSecondary inst) -> T.OpFail T.FailDisk
+ | new_dsk <= 0 -> T.OpFail T.FailDisk
+ | mDsk t > new_dp && strict -> T.OpFail T.FailDisk
+ | secondary_needed_mem >= old_mem && strict -> T.OpFail T.FailMem
+ | new_failn1 && not (failN1 t) && strict -> T.OpFail T.FailMem
+ | otherwise ->
+ let new_slist = iname:sList t
+ r = t { sList = new_slist, fDsk = new_dsk
+ , peers = new_peers, failN1 = new_failn1
+ , rMem = new_rmem, pDsk = new_dp
+ , pRem = new_prem, utilLoad = new_load }
+ in T.OpGood r
-- * Stats functions
-- | Computes the amount of available disk on a given node.
availDisk :: Node -> Int
availDisk t =
- let _f = fDsk t
- _l = loDsk t
- in if _f < _l
+ let _f = fDsk t
+ _l = loDsk t
+ in if _f < _l
then 0
else _f - _l
-- | Computes the amount of available memory on a given node.
availMem :: Node -> Int
availMem t =
- let _f = fMem t
- _l = rMem t
- in if _f < _l
+ let _f = fMem t
+ _l = rMem t
+ in if _f < _l
then 0
else _f - _l
-- | Computes the amount of available memory on a given node.
availCpu :: Node -> Int
availCpu t =
- let _u = uCpu t
- _l = hiCpu t
- in if _l >= _u
+ let _u = uCpu t
+ _l = hiCpu t
+ in if _l >= _u
then _l - _u
else 0
-> String -- ^ Field name
-> String -- ^ Field value as string
showField t field =
- case field of
- "idx" -> printf "%4d" $ idx t
- "name" -> alias t
- "fqdn" -> name t
- "status" -> case () of
- _ | offline t -> "-"
- | failN1 t -> "*"
- | otherwise -> " "
- "tmem" -> printf "%5.0f" $ tMem t
- "nmem" -> printf "%5d" $ nMem t
- "xmem" -> printf "%5d" $ xMem t
- "fmem" -> printf "%5d" $ fMem t
- "imem" -> printf "%5d" $ iMem t
- "rmem" -> printf "%5d" $ rMem t
- "amem" -> printf "%5d" $ fMem t - rMem t
- "tdsk" -> printf "%5.0f" $ tDsk t / 1024
- "fdsk" -> printf "%5d" $ fDsk t `div` 1024
- "tcpu" -> printf "%4.0f" $ tCpu t
- "ucpu" -> printf "%4d" $ uCpu t
- "pcnt" -> printf "%3d" $ length (pList t)
- "scnt" -> printf "%3d" $ length (sList t)
- "plist" -> show $ pList t
- "slist" -> show $ sList t
- "pfmem" -> printf "%6.4f" $ pMem t
- "pfdsk" -> printf "%6.4f" $ pDsk t
- "rcpu" -> printf "%5.2f" $ pCpu t
- "cload" -> printf "%5.3f" uC
- "mload" -> printf "%5.3f" uM
- "dload" -> printf "%5.3f" uD
- "nload" -> printf "%5.3f" uN
- "ptags" -> intercalate "," . map (uncurry (printf "%s=%d")) .
- Map.toList $ pTags t
- "peermap" -> show $ peers t
- _ -> T.unknownField
- where
- T.DynUtil { T.cpuWeight = uC, T.memWeight = uM,
- T.dskWeight = uD, T.netWeight = uN } = utilLoad t
+ case field of
+ "idx" -> printf "%4d" $ idx t
+ "name" -> alias t
+ "fqdn" -> name t
+ "status" -> case () of
+ _ | offline t -> "-"
+ | failN1 t -> "*"
+ | otherwise -> " "
+ "tmem" -> printf "%5.0f" $ tMem t
+ "nmem" -> printf "%5d" $ nMem t
+ "xmem" -> printf "%5d" $ xMem t
+ "fmem" -> printf "%5d" $ fMem t
+ "imem" -> printf "%5d" $ iMem t
+ "rmem" -> printf "%5d" $ rMem t
+ "amem" -> printf "%5d" $ fMem t - rMem t
+ "tdsk" -> printf "%5.0f" $ tDsk t / 1024
+ "fdsk" -> printf "%5d" $ fDsk t `div` 1024
+ "tcpu" -> printf "%4.0f" $ tCpu t
+ "ucpu" -> printf "%4d" $ uCpu t
+ "pcnt" -> printf "%3d" $ length (pList t)
+ "scnt" -> printf "%3d" $ length (sList t)
+ "plist" -> show $ pList t
+ "slist" -> show $ sList t
+ "pfmem" -> printf "%6.4f" $ pMem t
+ "pfdsk" -> printf "%6.4f" $ pDsk t
+ "rcpu" -> printf "%5.2f" $ pCpu t
+ "cload" -> printf "%5.3f" uC
+ "mload" -> printf "%5.3f" uM
+ "dload" -> printf "%5.3f" uD
+ "nload" -> printf "%5.3f" uN
+ "ptags" -> intercalate "," . map (uncurry (printf "%s=%d")) .
+ Map.toList $ pTags t
+ "peermap" -> show $ peers t
+ _ -> T.unknownField
+ where
+ T.DynUtil { T.cpuWeight = uC, T.memWeight = uM,
+ T.dskWeight = uD, T.netWeight = uN } = utilLoad t
-- | Returns the header and numeric propery of a field.
showHeader :: String -> (String, Bool)
showHeader field =
- case field of
- "idx" -> ("Index", True)
- "name" -> ("Name", False)
- "fqdn" -> ("Name", False)
- "status" -> ("F", False)
- "tmem" -> ("t_mem", True)
- "nmem" -> ("n_mem", True)
- "xmem" -> ("x_mem", True)
- "fmem" -> ("f_mem", True)
- "imem" -> ("i_mem", True)
- "rmem" -> ("r_mem", True)
- "amem" -> ("a_mem", True)
- "tdsk" -> ("t_dsk", True)
- "fdsk" -> ("f_dsk", True)
- "tcpu" -> ("pcpu", True)
- "ucpu" -> ("vcpu", True)
- "pcnt" -> ("pcnt", True)
- "scnt" -> ("scnt", True)
- "plist" -> ("primaries", True)
- "slist" -> ("secondaries", True)
- "pfmem" -> ("p_fmem", True)
- "pfdsk" -> ("p_fdsk", True)
- "rcpu" -> ("r_cpu", True)
- "cload" -> ("lCpu", True)
- "mload" -> ("lMem", True)
- "dload" -> ("lDsk", True)
- "nload" -> ("lNet", True)
- "ptags" -> ("PrimaryTags", False)
- "peermap" -> ("PeerMap", False)
- -- TODO: add node fields (group.uuid, group)
- _ -> (T.unknownField, False)
+ case field of
+ "idx" -> ("Index", True)
+ "name" -> ("Name", False)
+ "fqdn" -> ("Name", False)
+ "status" -> ("F", False)
+ "tmem" -> ("t_mem", True)
+ "nmem" -> ("n_mem", True)
+ "xmem" -> ("x_mem", True)
+ "fmem" -> ("f_mem", True)
+ "imem" -> ("i_mem", True)
+ "rmem" -> ("r_mem", True)
+ "amem" -> ("a_mem", True)
+ "tdsk" -> ("t_dsk", True)
+ "fdsk" -> ("f_dsk", True)
+ "tcpu" -> ("pcpu", True)
+ "ucpu" -> ("vcpu", True)
+ "pcnt" -> ("pcnt", True)
+ "scnt" -> ("scnt", True)
+ "plist" -> ("primaries", True)
+ "slist" -> ("secondaries", True)
+ "pfmem" -> ("p_fmem", True)
+ "pfdsk" -> ("p_fdsk", True)
+ "rcpu" -> ("r_cpu", True)
+ "cload" -> ("lCpu", True)
+ "mload" -> ("lMem", True)
+ "dload" -> ("lDsk", True)
+ "nload" -> ("lNet", True)
+ "ptags" -> ("PrimaryTags", False)
+ "peermap" -> ("PeerMap", False)
+ -- TODO: add node fields (group.uuid, group)
+ _ -> (T.unknownField, False)
-- | String converter for the node list functionality.
list :: [String] -> Node -> [String]
list fields t = map (showField t) fields
-
-- | Constant holding the fields we're displaying by default.
defaultFields :: [String]
defaultFields =
- [ "status", "name", "tmem", "nmem", "imem", "xmem", "fmem"
- , "rmem", "tdsk", "fdsk", "tcpu", "ucpu", "pcnt", "scnt"
- , "pfmem", "pfdsk", "rcpu"
- , "cload", "mload", "dload", "nload" ]
+ [ "status", "name", "tmem", "nmem", "imem", "xmem", "fmem"
+ , "rmem", "tdsk", "fdsk", "tcpu", "ucpu", "pcnt", "scnt"
+ , "pfmem", "pfdsk", "rcpu"
+ , "cload", "mload", "dload", "nload" ]
-- | Split a list of nodes into a list of (node group UUID, list of
-- associated nodes).
{-
-Copyright (C) 2009 Google Inc.
+Copyright (C) 2009, 2011 Google Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
-}
module Ganeti.HTools.PeerMap
- ( PeerMap
- , Key
- , Elem
- , empty
- , accumArray
- , Ganeti.HTools.PeerMap.find
- , add
- , remove
- , maxElem
- ) where
+ ( PeerMap
+ , Key
+ , Elem
+ , empty
+ , accumArray
+ , Ganeti.HTools.PeerMap.find
+ , add
+ , remove
+ , maxElem
+ ) where
import Data.Maybe (fromMaybe)
import Data.List
-- | Add or update (via a custom function) an element.
addWith :: (Elem -> Elem -> Elem) -> Key -> Elem -> PeerMap -> PeerMap
addWith fn k v lst =
- case lookup k lst of
- Nothing -> insertBy pmCompare (k, v) lst
- Just o -> insertBy pmCompare (k, fn o v) (remove k lst)
+ case lookup k lst of
+ Nothing -> insertBy pmCompare (k, v) lst
+ Just o -> insertBy pmCompare (k, fn o v) (remove k lst)
-- | Create a PeerMap from an association list, with possible duplicates.
accumArray :: (Elem -> Elem -> Elem) -- ^ function used to merge the elements
module Ganeti.HTools.Program.Hail (main) where
import Control.Monad
+import Data.Maybe (fromMaybe)
+import System.Environment (getArgs)
import System.IO
-import qualified System
import qualified Ganeti.HTools.Cluster as Cluster
-- | Options list and functions.
options :: [OptType]
options =
- [ oPrintNodes
- , oSaveCluster
- , oDataFile
- , oNodeSim
- , oVerbose
- , oShowVer
- , oShowHelp
- ]
+ [ oPrintNodes
+ , oSaveCluster
+ , oDataFile
+ , oNodeSim
+ , oVerbose
+ , oShowVer
+ , oShowHelp
+ ]
-- | Main function.
main :: IO ()
main = do
- cmd_args <- System.getArgs
+ cmd_args <- getArgs
(opts, args) <- parseOpts cmd_args "hail" options
let shownodes = optShowNodes opts
maybeSaveData savecluster "pre-ialloc" "before iallocator run" cdata
let (maybe_ni, resp) = runIAllocator request
- (fin_nl, fin_il) = maybe (cdNodes cdata, cdInstances cdata) id maybe_ni
+ (fin_nl, fin_il) = fromMaybe (cdNodes cdata, cdInstances cdata) maybe_ni
putStrLn resp
maybePrintNodes shownodes "Final cluster" (Cluster.printNodes fin_nl)
import Data.List
import Data.Maybe (isJust, isNothing, fromJust)
import Data.IORef
-import System (exitWith, ExitCode(..))
+import System.Environment (getArgs)
+import System.Exit
import System.IO
import System.Posix.Process
import System.Posix.Signals
-import qualified System
import Text.Printf (printf, hPrintf)
-- | Options list and functions.
options :: [OptType]
options =
- [ oPrintNodes
- , oPrintInsts
- , oPrintCommands
- , oOneline
- , oDataFile
- , oEvacMode
- , oRapiMaster
- , oLuxiSocket
- , oExecJobs
- , oGroup
- , oMaxSolLength
- , oVerbose
- , oQuiet
- , oOfflineNode
- , oMinScore
- , oMaxCpu
- , oMinDisk
- , oMinGain
- , oMinGainLim
- , oDiskMoves
- , oSelInst
- , oInstMoves
- , oDynuFile
- , oExTags
- , oExInst
- , oSaveCluster
- , oShowVer
- , oShowHelp
- ]
+ [ oPrintNodes
+ , oPrintInsts
+ , oPrintCommands
+ , oDataFile
+ , oEvacMode
+ , oRapiMaster
+ , oLuxiSocket
+ , oExecJobs
+ , oGroup
+ , oMaxSolLength
+ , oVerbose
+ , oQuiet
+ , oOfflineNode
+ , oMinScore
+ , oMaxCpu
+ , oMinDisk
+ , oMinGain
+ , oMinGainLim
+ , oDiskMoves
+ , oSelInst
+ , oInstMoves
+ , oDynuFile
+ , oExTags
+ , oExInst
+ , oSaveCluster
+ , oShowVer
+ , oShowHelp
+ ]
{- | Start computing the solution at the given depth and recurse until
we find a valid solution or we exceed the maximum depth.
-> Int -- ^ Max node name len
-> Int -- ^ Max instance name len
-> [MoveJob] -- ^ Current command list
- -> Bool -- ^ Whether to be silent
-> Score -- ^ Score at which to stop
-> Score -- ^ Min gain limit
-> Score -- ^ Min score gain
-> IO (Cluster.Table, [MoveJob]) -- ^ The resulting table
-- and commands
iterateDepth ini_tbl max_rounds disk_moves inst_moves nmlen imlen
- cmd_strs oneline min_score mg_limit min_gain evac_mode =
- let Cluster.Table ini_nl ini_il _ _ = ini_tbl
- allowed_next = Cluster.doNextBalance ini_tbl max_rounds min_score
- m_fin_tbl = if allowed_next
+ cmd_strs min_score mg_limit min_gain evac_mode =
+ let Cluster.Table ini_nl ini_il _ _ = ini_tbl
+ allowed_next = Cluster.doNextBalance ini_tbl max_rounds min_score
+ m_fin_tbl = if allowed_next
then Cluster.tryBalance ini_tbl disk_moves inst_moves
evac_mode mg_limit min_gain
else Nothing
- in
- case m_fin_tbl of
- Just fin_tbl ->
- do
- let
- (Cluster.Table _ _ _ fin_plc) = fin_tbl
- fin_plc_len = length fin_plc
- cur_plc@(idx, _, _, move, _) = head fin_plc
- (sol_line, cmds) = Cluster.printSolutionLine ini_nl ini_il
- nmlen imlen cur_plc fin_plc_len
- afn = Cluster.involvedNodes ini_il cur_plc
- upd_cmd_strs = (afn, idx, move, cmds):cmd_strs
- unless oneline $ do
- putStrLn sol_line
- hFlush stdout
- iterateDepth fin_tbl max_rounds disk_moves inst_moves
- nmlen imlen upd_cmd_strs oneline min_score
- mg_limit min_gain evac_mode
- Nothing -> return (ini_tbl, cmd_strs)
-
--- | Formats the solution for the oneline display.
-formatOneline :: Double -> Int -> Double -> String
-formatOneline ini_cv plc_len fin_cv =
- printf "%.8f %d %.8f %8.3f" ini_cv plc_len fin_cv
- (if fin_cv == 0 then 1 else ini_cv / fin_cv)
+ in case m_fin_tbl of
+ Just fin_tbl ->
+ do
+ let (Cluster.Table _ _ _ fin_plc) = fin_tbl
+ fin_plc_len = length fin_plc
+ cur_plc@(idx, _, _, move, _) = head fin_plc
+ (sol_line, cmds) = Cluster.printSolutionLine ini_nl ini_il
+ nmlen imlen cur_plc fin_plc_len
+ afn = Cluster.involvedNodes ini_il cur_plc
+ upd_cmd_strs = (afn, idx, move, cmds):cmd_strs
+ putStrLn sol_line
+ hFlush stdout
+ iterateDepth fin_tbl max_rounds disk_moves inst_moves
+ nmlen imlen upd_cmd_strs min_score
+ mg_limit min_gain evac_mode
+ Nothing -> return (ini_tbl, cmd_strs)
+
+-- | Displays the cluster stats.
+printStats :: Node.List -> Node.List -> IO ()
+printStats ini_nl fin_nl = do
+ let ini_cs = Cluster.totalResources ini_nl
+ fin_cs = Cluster.totalResources fin_nl
+ printf "Original: mem=%d disk=%d\n"
+ (Cluster.csFmem ini_cs) (Cluster.csFdsk ini_cs) :: IO ()
+ printf "Final: mem=%d disk=%d\n"
+ (Cluster.csFmem fin_cs) (Cluster.csFdsk fin_cs)
+
+-- | Saves the rebalance commands to a text file.
+saveBalanceCommands :: Options -> String -> IO ()
+saveBalanceCommands opts cmd_data = do
+ let out_path = fromJust $ optShowCmds opts
+ putStrLn ""
+ if out_path == "-"
+ then printf "Commands to run to reach the above solution:\n%s"
+ (unlines . map (" " ++) .
+ filter (/= " check") .
+ lines $ cmd_data)
+ else do
+ writeFile out_path (shTemplate ++ cmd_data)
+ printf "The commands have been written to file '%s'\n" out_path
-- | Polls a set of jobs at a fixed interval until all are finished
-- one way or another.
-- | Wrapper over execJobSet checking for early termination.
execWrapper :: String -> Node.List
- -> Instance.List -> IORef Int -> [JobSet] -> IO Bool
+ -> Instance.List -> IORef Int -> [JobSet] -> IO Bool
execWrapper _ _ _ _ [] = return True
execWrapper master nl il cref alljss = do
cancel <- readIORef cref
- (if cancel > 0
- then do
- hPrintf stderr "Exiting early due to user request, %d\
- \ jobset(s) remaining." (length alljss)::IO ()
- return False
- else execJobSet master nl il cref alljss)
+ if cancel > 0
+ then do
+ hPrintf stderr "Exiting early due to user request, %d\
+ \ jobset(s) remaining." (length alljss)::IO ()
+ return False
+ else execJobSet master nl il cref alljss
-- | Execute an entire jobset.
execJobSet :: String -> Node.List
putStrLn $ "Got job IDs " ++ commaJoin x
waitForJobs client x
)
- (case jrs of
- Bad x -> do
- hPutStrLn stderr $ "Cannot compute job status, aborting: " ++ show x
- return False
- Ok x -> if checkJobsStatus x
- then execWrapper master nl il cref jss
- else do
- hPutStrLn stderr $ "Not all jobs completed successfully: " ++
- show x
- hPutStrLn stderr "Aborting."
- return False)
+ case jrs of
+ Bad x -> do
+ hPutStrLn stderr $ "Cannot compute job status, aborting: " ++ show x
+ return False
+ Ok x -> if checkJobsStatus x
+ then execWrapper master nl il cref jss
+ else do
+ hPutStrLn stderr $ "Not all jobs completed successfully: " ++
+ show x
+ hPutStrLn stderr "Aborting."
+ return False
+
+-- | Executes the jobs, if possible and desired.
+maybeExecJobs :: Options
+ -> [a]
+ -> Node.List
+ -> Instance.List
+ -> [JobSet]
+ -> IO Bool
+maybeExecJobs opts ord_plc fin_nl il cmd_jobs =
+ if optExecJobs opts && not (null ord_plc)
+ then (case optLuxi opts of
+ Nothing -> do
+ hPutStrLn stderr "Execution of commands possible only on LUXI"
+ return False
+ Just master -> runJobSet master fin_nl il cmd_jobs)
+ else return True
-- | Signal handler for graceful termination.
hangleSigInt :: IORef Int -> IO ()
[(hangleSigTerm, softwareTermination), (hangleSigInt, keyboardSignal)]
execWrapper master fin_nl il cref cmd_jobs
--- | Main function.
-main :: IO ()
-main = do
- cmd_args <- System.getArgs
- (opts, args) <- parseOpts cmd_args "hbal" options
-
- unless (null args) $ do
- hPutStrLn stderr "Error: this program doesn't take any arguments."
- exitWith $ ExitFailure 1
-
- let oneline = optOneline opts
- verbose = optVerbose opts
- shownodes = optShowNodes opts
- showinsts = optShowInsts opts
-
- ini_cdata@(ClusterData gl fixed_nl ilf ctags) <- loadExternalData opts
-
- let offline_passed = optOffline opts
- all_nodes = Container.elems fixed_nl
- offline_lkp = map (lookupName (map Node.name all_nodes)) offline_passed
- offline_wrong = filter (not . goodLookupResult) offline_lkp
- offline_names = map lrContent offline_lkp
- offline_indices = map Node.idx $
- filter (\n -> Node.name n `elem` offline_names)
- all_nodes
- m_cpu = optMcpu opts
- m_dsk = optMdsk opts
- csf = commonSuffix fixed_nl ilf
-
- when (not (null offline_wrong)) $ do
- hPrintf stderr "Error: Wrong node name(s) set as offline: %s\n"
- (commaJoin (map lrContent offline_wrong)) :: IO ()
- exitWith $ ExitFailure 1
-
- let nm = Container.map (\n -> if Node.idx n `elem` offline_indices
- then Node.setOffline n True
- else n) fixed_nl
- nlf = Container.map (flip Node.setMdsk m_dsk . flip Node.setMcpu m_cpu)
- nm
-
- when (not oneline && verbose > 1) $
- putStrLn $ "Loaded cluster tags: " ++ intercalate "," ctags
-
- when (Container.size ilf == 0) $ do
- (if oneline then putStrLn $ formatOneline 0 0 0
- else printf "Cluster is empty, exiting.\n")
- exitWith ExitSuccess
-
- let split_insts = Cluster.findSplitInstances nlf ilf
- unless (null split_insts) $ do
- hPutStrLn stderr "Found instances belonging to multiple node groups:"
- mapM_ (\i -> hPutStrLn stderr $ " " ++ Instance.name i) split_insts
- hPutStrLn stderr "Aborting."
- exitWith $ ExitFailure 1
-
+-- | Select the target node group.
+selectGroup :: Options -> Group.List -> Node.List -> Instance.List
+ -> IO (String, (Node.List, Instance.List))
+selectGroup opts gl nlf ilf = do
let ngroups = Cluster.splitCluster nlf ilf
when (length ngroups > 1 && isNothing (optGroup opts)) $ do
hPutStrLn stderr "Found multiple node groups:"
hPutStrLn stderr "Aborting."
exitWith $ ExitFailure 1
- maybeSaveData (optSaveCluster opts) "original" "before balancing" ini_cdata
-
- unless oneline $ printf "Loaded %d nodes, %d instances\n"
- (Container.size nlf)
- (Container.size ilf)
-
- (gname, (nl, il)) <- case optGroup opts of
+ case optGroup opts of
Nothing -> do
- let (gidx, cdata) = head ngroups
- grp = Container.find gidx gl
- return (Group.name grp, cdata)
+ let (gidx, cdata) = head ngroups
+ grp = Container.find gidx gl
+ return (Group.name grp, cdata)
Just g -> case Container.findByName gl g of
Nothing -> do
hPutStrLn stderr $ "Node group " ++ g ++
exitWith $ ExitFailure 1
Just grp ->
case lookup (Group.idx grp) ngroups of
- Nothing -> do
+ Nothing ->
-- This will only happen if there are no nodes assigned
-- to this group
return (Group.name grp, (Container.empty, Container.empty))
Just cdata -> return (Group.name grp, cdata)
- unless oneline $ printf "Group size %d nodes, %d instances\n"
- (Container.size nl)
- (Container.size il)
+-- | Do a few checks on the cluster data.
+checkCluster :: Int -> Node.List -> Instance.List -> IO ()
+checkCluster verbose nl il = do
+ -- nothing to do on an empty cluster
+ when (Container.null il) $ do
+ printf "Cluster is empty, exiting.\n"::IO ()
+ exitWith ExitSuccess
- putStrLn $ "Selected node group: " ++ gname
+ -- hbal doesn't currently handle split clusters
+ let split_insts = Cluster.findSplitInstances nl il
+ unless (null split_insts) $ do
+ hPutStrLn stderr "Found instances belonging to multiple node groups:"
+ mapM_ (\i -> hPutStrLn stderr $ " " ++ Instance.name i) split_insts
+ hPutStrLn stderr "Aborting."
+ exitWith $ ExitFailure 1
+
+ printf "Loaded %d nodes, %d instances\n"
+ (Container.size nl)
+ (Container.size il)::IO ()
- when (length csf > 0 && not oneline && verbose > 1) $
+ let csf = commonSuffix nl il
+ when (not (null csf) && verbose > 1) $
printf "Note: Stripping common suffix of '%s' from names\n" csf
+-- | Do a few checks on the selected group data.
+checkGroup :: Int -> String -> Node.List -> Instance.List -> IO ()
+checkGroup verbose gname nl il = do
+ printf "Group size %d nodes, %d instances\n"
+ (Container.size nl)
+ (Container.size il)::IO ()
+
+ putStrLn $ "Selected node group: " ++ gname
+
let (bad_nodes, bad_instances) = Cluster.computeBadItems nl il
- unless (oneline || verbose == 0) $ printf
+ unless (verbose == 0) $ printf
"Initial check done: %d bad nodes, %d bad instances.\n"
(length bad_nodes) (length bad_instances)
putStrLn "Cluster is not N+1 happy, continuing but no guarantee \
\that the cluster will end N+1 happy."
+-- | Check that we actually need to rebalance.
+checkNeedRebalance :: Options -> Score -> IO ()
+checkNeedRebalance opts ini_cv = do
+ let min_cv = optMinScore opts
+ when (ini_cv < min_cv) $ do
+ printf "Cluster is already well balanced (initial score %.6g,\n\
+ \minimum score %.6g).\nNothing to do, exiting\n"
+ ini_cv min_cv:: IO ()
+ exitWith ExitSuccess
+
+-- | Main function.
+main :: IO ()
+main = do
+ cmd_args <- getArgs
+ (opts, args) <- parseOpts cmd_args "hbal" options
+
+ unless (null args) $ do
+ hPutStrLn stderr "Error: this program doesn't take any arguments."
+ exitWith $ ExitFailure 1
+
+ let verbose = optVerbose opts
+ shownodes = optShowNodes opts
+ showinsts = optShowInsts opts
+
+ ini_cdata@(ClusterData gl fixed_nl ilf ctags) <- loadExternalData opts
+
+ when (verbose > 1) $
+ putStrLn $ "Loaded cluster tags: " ++ intercalate "," ctags
+
+ nlf <- setNodeStatus opts fixed_nl
+ checkCluster verbose nlf ilf
+
+ maybeSaveData (optSaveCluster opts) "original" "before balancing" ini_cdata
+
+ (gname, (nl, il)) <- selectGroup opts gl nlf ilf
+
+ checkGroup verbose gname nl il
+
maybePrintInsts showinsts "Initial" (Cluster.printInsts nl il)
maybePrintNodes shownodes "Initial cluster" (Cluster.printNodes nl)
ini_tbl = Cluster.Table nl il ini_cv []
min_cv = optMinScore opts
- when (ini_cv < min_cv) $ do
- (if oneline then
- putStrLn $ formatOneline ini_cv 0 ini_cv
- else printf "Cluster is already well balanced (initial score %.6g,\n\
- \minimum score %.6g).\nNothing to do, exiting\n"
- ini_cv min_cv)
- exitWith ExitSuccess
+ checkNeedRebalance opts ini_cv
- unless oneline (if verbose > 2 then
- printf "Initial coefficients: overall %.8f, %s\n"
- ini_cv (Cluster.printStats nl)
- else
- printf "Initial score: %.8f\n" ini_cv)
+ if verbose > 2
+ then printf "Initial coefficients: overall %.8f, %s\n"
+ ini_cv (Cluster.printStats nl)::IO ()
+ else printf "Initial score: %.8f\n" ini_cv
- unless oneline $ putStrLn "Trying to minimize the CV..."
+ putStrLn "Trying to minimize the CV..."
let imlen = maximum . map (length . Instance.alias) $ Container.elems il
nmlen = maximum . map (length . Node.alias) $ Container.elems nl
(fin_tbl, cmd_strs) <- iterateDepth ini_tbl (optMaxLength opts)
(optDiskMoves opts)
(optInstMoves opts)
- nmlen imlen [] oneline min_cv
+ nmlen imlen [] min_cv
(optMinGainLim opts) (optMinGain opts)
(optEvacMode opts)
let (Cluster.Table fin_nl fin_il fin_cv fin_plc) = fin_tbl
printf "Cluster score improved from %.8f to %.8f\n"
ini_cv fin_cv ::String
- unless oneline $ putStr sol_msg
+ putStr sol_msg
- unless (oneline || verbose == 0) $
+ unless (verbose == 0) $
printf "Solution length=%d\n" (length ord_plc)
let cmd_jobs = Cluster.splitJobs cmd_strs
- cmd_data = Cluster.formatCmds cmd_jobs
when (isJust $ optShowCmds opts) $
- do
- let out_path = fromJust $ optShowCmds opts
- putStrLn ""
- (if out_path == "-" then
- printf "Commands to run to reach the above solution:\n%s"
- (unlines . map (" " ++) .
- filter (/= " check") .
- lines $ cmd_data)
- else do
- writeFile out_path (shTemplate ++ cmd_data)
- printf "The commands have been written to file '%s'\n" out_path)
+ saveBalanceCommands opts $ Cluster.formatCmds cmd_jobs
maybeSaveData (optSaveCluster opts) "balanced" "after balancing"
(ClusterData gl fin_nl fin_il ctags)
maybePrintNodes shownodes "Final cluster" (Cluster.printNodes fin_nl)
- when (verbose > 3) $ do
- let ini_cs = Cluster.totalResources nl
- fin_cs = Cluster.totalResources fin_nl
- printf "Original: mem=%d disk=%d\n"
- (Cluster.csFmem ini_cs) (Cluster.csFdsk ini_cs) :: IO ()
- printf "Final: mem=%d disk=%d\n"
- (Cluster.csFmem fin_cs) (Cluster.csFdsk fin_cs)
- when oneline $
- putStrLn $ formatOneline ini_cv (length ord_plc) fin_cv
-
- eval <-
- if optExecJobs opts && not (null ord_plc)
- then (case optLuxi opts of
- Nothing -> do
- hPutStrLn stderr "Execution of commands possible only on LUXI"
- return False
- Just master -> runJobSet master fin_nl il cmd_jobs)
- else return True
+ when (verbose > 3) $ printStats nl fin_nl
+
+ eval <- maybeExecJobs opts ord_plc fin_nl il cmd_jobs
unless eval (exitWith (ExitFailure 1))
import Control.Monad
import Data.Maybe (isJust, fromJust, fromMaybe)
-import System (exitWith, ExitCode(..))
+import System.Environment (getArgs)
+import System.Exit
import System.IO
import System.FilePath
-import qualified System
import Text.Printf (printf)
-- | Options list and functions.
options :: [OptType]
options =
- [ oPrintNodes
- , oOutputDir
- , oLuxiSocket
- , oVerbose
- , oNoHeaders
- , oShowVer
- , oShowHelp
- ]
+ [ oPrintNodes
+ , oOutputDir
+ , oLuxiSocket
+ , oVerbose
+ , oNoHeaders
+ , oShowVer
+ , oShowHelp
+ ]
-- | Return a one-line summary of cluster state.
printCluster :: Node.List -> Instance.List
-> String
printCluster nl il =
- let (bad_nodes, bad_instances) = Cluster.computeBadItems nl il
- ccv = Cluster.compCV nl
- nodes = Container.elems nl
- insts = Container.elems il
- t_ram = sum . map Node.tMem $ nodes
- t_dsk = sum . map Node.tDsk $ nodes
- f_ram = sum . map Node.fMem $ nodes
- f_dsk = sum . map Node.fDsk $ nodes
- in
- printf "%5d %5d %5d %5d %6.0f %6d %6.0f %6d %.8f"
- (length nodes) (length insts)
- (length bad_nodes) (length bad_instances)
- t_ram f_ram
- (t_dsk / 1024) (f_dsk `div` 1024)
- ccv
-
+ let (bad_nodes, bad_instances) = Cluster.computeBadItems nl il
+ ccv = Cluster.compCV nl
+ nodes = Container.elems nl
+ insts = Container.elems il
+ t_ram = sum . map Node.tMem $ nodes
+ t_dsk = sum . map Node.tDsk $ nodes
+ f_ram = sum . map Node.fMem $ nodes
+ f_dsk = sum . map Node.fDsk $ nodes
+ in printf "%5d %5d %5d %5d %6.0f %6d %6.0f %6d %.8f"
+ (length nodes) (length insts)
+ (length bad_nodes) (length bad_instances)
+ t_ram f_ram (t_dsk / 1024) (f_dsk `div` 1024) ccv
-- | Replace slashes with underscore for saving to filesystem.
fixSlash :: String -> String
fixSlash = map (\x -> if x == '/' then '_' else x)
-
-- | Generates serialized data from loader input.
processData :: ClusterData -> Result ClusterData
processData input_data = do
-- | Main function.
main :: IO ()
main = do
- cmd_args <- System.getArgs
+ cmd_args <- getArgs
(opts, clusters) <- parseOpts cmd_args "hscan" options
let local = "LOCAL"
module Ganeti.HTools.Program.Hspace (main) where
import Control.Monad
-import Data.Char (toUpper, isAlphaNum)
+import Data.Char (toUpper, isAlphaNum, toLower)
import Data.Function (on)
import Data.List
-import Data.Maybe (isJust, fromJust)
import Data.Ord (comparing)
-import System (exitWith, ExitCode(..))
+import System.Exit
import System.IO
-import qualified System
+import System.Environment (getArgs)
import Text.Printf (printf, hPrintf)
-- | Options list and functions.
options :: [OptType]
options =
- [ oPrintNodes
- , oDataFile
- , oDiskTemplate
- , oNodeSim
- , oRapiMaster
- , oLuxiSocket
- , oVerbose
- , oQuiet
- , oOfflineNode
- , oIMem
- , oIDisk
- , oIVcpus
- , oMachineReadable
- , oMaxCpu
- , oMinDisk
- , oTieredSpec
- , oSaveCluster
- , oShowVer
- , oShowHelp
- ]
+ [ oPrintNodes
+ , oDataFile
+ , oDiskTemplate
+ , oNodeSim
+ , oRapiMaster
+ , oLuxiSocket
+ , oVerbose
+ , oQuiet
+ , oOfflineNode
+ , oIMem
+ , oIDisk
+ , oIVcpus
+ , oMachineReadable
+ , oMaxCpu
+ , oMaxSolLength
+ , oMinDisk
+ , oTieredSpec
+ , oSaveCluster
+ , oShowVer
+ , oShowHelp
+ ]
-- | The allocation phase we're in (initial, after tiered allocs, or
-- after regular allocation).
-- | The description of a spec.
specDescription :: SpecType -> String
-specDescription SpecNormal = "Normal (fixed-size)"
+specDescription SpecNormal = "Standard (fixed-size)"
specDescription SpecTiered = "Tiered (initial size)"
-- | Efficiency generic function.
PFinal -> "FIN"
PTiered -> "TRL"
+-- | Print failure reason and scores
+printFRScores :: Node.List -> Node.List -> [(FailMode, Int)] -> IO ()
+printFRScores ini_nl fin_nl sreason = do
+ printf " - most likely failure reason: %s\n" $ failureReason sreason::IO ()
+ printClusterScores ini_nl fin_nl
+ printClusterEff (Cluster.totalResources fin_nl)
+
-- | Print final stats and related metrics.
printResults :: Bool -> Node.List -> Node.List -> Int -> Int
-> [(FailMode, Int)] -> IO ()
printResults False ini_nl fin_nl _ allocs sreason = do
putStrLn "Normal (fixed-size) allocation results:"
printf " - %3d instances allocated\n" allocs :: IO ()
- printf " - most likely failure reason: %s\n" $ failureReason sreason::IO ()
- printClusterScores ini_nl fin_nl
- printClusterEff (Cluster.totalResources fin_nl)
+ printFRScores ini_nl fin_nl sreason
-- | Prints the final @OK@ marker in machine readable output.
printFinal :: Bool -> IO ()
tieredSpecMap :: [Instance.Instance]
-> [(RSpec, Int)]
tieredSpecMap trl_ixes =
- let fin_trl_ixes = reverse trl_ixes
- ix_byspec = groupBy ((==) `on` Instance.specOf) fin_trl_ixes
- spec_map = map (\ixs -> (Instance.specOf $ head ixs, length ixs))
- ix_byspec
- in spec_map
+ let fin_trl_ixes = reverse trl_ixes
+ ix_byspec = groupBy ((==) `on` Instance.specOf) fin_trl_ixes
+ spec_map = map (\ixs -> (Instance.specOf $ head ixs, length ixs))
+ ix_byspec
+ in spec_map
-- | Formats a spec map to strings.
formatSpecMap :: [(RSpec, Int)] -> [String]
formatSpecMap =
- map (\(spec, cnt) -> printf "%d,%d,%d=%d" (rspecMem spec)
- (rspecDsk spec) (rspecCpu spec) cnt)
+ map (\(spec, cnt) -> printf "%d,%d,%d=%d" (rspecMem spec)
+ (rspecDsk spec) (rspecCpu spec) cnt)
-- | Formats \"key-metrics\" values.
formatRSpec :: Double -> String -> RSpec -> [(String, String)]
formatRSpec m_cpu s r =
- [ ("KM_" ++ s ++ "_CPU", show $ rspecCpu r)
- , ("KM_" ++ s ++ "_NPU", show $ fromIntegral (rspecCpu r) / m_cpu)
- , ("KM_" ++ s ++ "_MEM", show $ rspecMem r)
- , ("KM_" ++ s ++ "_DSK", show $ rspecDsk r)
- ]
+ [ ("KM_" ++ s ++ "_CPU", show $ rspecCpu r)
+ , ("KM_" ++ s ++ "_NPU", show $ fromIntegral (rspecCpu r) / m_cpu)
+ , ("KM_" ++ s ++ "_MEM", show $ rspecMem r)
+ , ("KM_" ++ s ++ "_DSK", show $ rspecDsk r)
+ ]
-- | Shows allocations stats.
printAllocationStats :: Double -> Node.List -> Node.List -> IO ()
-> Node.List -> [Instance.Instance] -> IO ()
printAllocationMap verbose msg nl ixes =
when (verbose > 1) $ do
- hPutStrLn stderr msg
- hPutStr stderr . unlines . map ((:) ' ' . intercalate " ") $
+ hPutStrLn stderr (msg ++ " map")
+ hPutStr stderr . unlines . map ((:) ' ' . unwords) $
formatTable (map (printInstance nl) (reverse ixes))
-- This is the numberic-or-not field
-- specification; the first three fields are
printISpec True ispec spec disk_template = do
printKeys $ map (\(a, fn) -> (prefix ++ "_" ++ a, fn ispec)) specData
printKeys [ (prefix ++ "_RQN", printf "%d" req_nodes) ]
- printKeys [ (prefix ++ "_DISK_TEMPLATE", dtToString disk_template) ]
+ printKeys [ (prefix ++ "_DISK_TEMPLATE",
+ diskTemplateToRaw disk_template) ]
where req_nodes = Instance.requiredNodes disk_template
prefix = specPrefix spec
printf "%s instance spec is:\n %s, using disk\
\ template '%s'.\n"
(specDescription spec)
- (formatResources ispec specData) (dtToString disk_template)
+ (formatResources ispec specData) (diskTemplateToRaw disk_template)
-- | Prints the tiered results.
printTiered :: Bool -> [(RSpec, Int)] -> Double
-> Node.List -> Node.List -> [(FailMode, Int)] -> IO ()
printTiered True spec_map m_cpu nl trl_nl _ = do
printKeys $ printStats PTiered (Cluster.totalResources trl_nl)
- printKeys [("TSPEC", intercalate " " (formatSpecMap spec_map))]
+ printKeys [("TSPEC", unwords (formatSpecMap spec_map))]
printAllocationStats m_cpu nl trl_nl
printTiered False spec_map _ ini_nl fin_nl sreason = do
mapM_ (\(ispec, cnt) ->
printf " - %3d instances of spec %s\n" cnt
(formatResources ispec specData)) spec_map
- printf " - most likely failure reason: %s\n" $ failureReason sreason::IO ()
- printClusterScores ini_nl fin_nl
- printClusterEff (Cluster.totalResources fin_nl)
+ printFRScores ini_nl fin_nl sreason
-- | Displays the initial/final cluster scores.
printClusterScores :: Node.List -> Node.List -> IO ()
-- | Displays the cluster efficiency.
printClusterEff :: Cluster.CStats -> IO ()
printClusterEff cs =
- mapM_ (\(s, fn) ->
- printf " - %s usage efficiency: %5.2f%%\n" s (fn cs * 100))
+ mapM_ (\(s, fn) ->
+ printf " - %s usage efficiency: %5.2f%%\n" s (fn cs * 100))
[("memory", memEff),
(" disk", dskEff),
(" vcpu", cpuEff)]
sortReasons :: [(FailMode, Int)] -> [(FailMode, Int)]
sortReasons = reverse . sortBy (comparing snd)
+-- | Aborts the program if we get a bad value.
+exitIfBad :: Result a -> IO a
+exitIfBad (Bad s) =
+ hPrintf stderr "Failure: %s\n" s >> exitWith (ExitFailure 1)
+exitIfBad (Ok v) = return v
+
+-- | Runs an allocation algorithm and saves cluster state.
+runAllocation :: ClusterData -- ^ Cluster data
+ -> Maybe Cluster.AllocResult -- ^ Optional stop-allocation
+ -> Result Cluster.AllocResult -- ^ Allocation result
+ -> RSpec -- ^ Requested instance spec
+ -> SpecType -- ^ Allocation type
+ -> Options -- ^ CLI options
+ -> IO (FailStats, Node.List, Int, [(RSpec, Int)])
+runAllocation cdata stop_allocation actual_result spec mode opts = do
+ (reasons, new_nl, new_il, new_ixes, _) <-
+ case stop_allocation of
+ Just result_noalloc -> return result_noalloc
+ Nothing -> exitIfBad actual_result
+
+ let name = head . words . specDescription $ mode
+ descr = name ++ " allocation"
+ ldescr = "after " ++ map toLower descr
+
+ printISpec (optMachineReadable opts) spec mode (optDiskTemplate opts)
+
+ printAllocationMap (optVerbose opts) descr new_nl new_ixes
+
+ maybePrintNodes (optShowNodes opts) descr (Cluster.printNodes new_nl)
+
+ maybeSaveData (optSaveCluster opts) (map toLower name) ldescr
+ (cdata { cdNodes = new_nl, cdInstances = new_il})
+
+ return (sortReasons reasons, new_nl, length new_ixes, tieredSpecMap new_ixes)
+
-- | Main function.
main :: IO ()
main = do
- cmd_args <- System.getArgs
+ cmd_args <- getArgs
(opts, args) <- parseOpts cmd_args "hspace" options
unless (null args) $ do
let verbose = optVerbose opts
ispec = optISpec opts
- shownodes = optShowNodes opts
disk_template = optDiskTemplate opts
req_nodes = Instance.requiredNodes disk_template
machine_r = optMachineReadable opts
(ClusterData gl fixed_nl il ctags) <- loadExternalData opts
+ nl <- setNodeStatus opts fixed_nl
- let num_instances = length $ Container.elems il
-
- let offline_passed = optOffline opts
+ let num_instances = Container.size il
all_nodes = Container.elems fixed_nl
- offline_lkp = map (lookupName (map Node.name all_nodes)) offline_passed
- offline_wrong = filter (not . goodLookupResult) offline_lkp
- offline_names = map lrContent offline_lkp
- offline_indices = map Node.idx $
- filter (\n -> Node.name n `elem` offline_names)
- all_nodes
- m_cpu = optMcpu opts
- m_dsk = optMdsk opts
-
- when (not (null offline_wrong)) $ do
- hPrintf stderr "Error: Wrong node name(s) set as offline: %s\n"
- (commaJoin (map lrContent offline_wrong)) :: IO ()
- exitWith $ ExitFailure 1
-
- when (req_nodes /= 1 && req_nodes /= 2) $ do
- hPrintf stderr "Error: Invalid required nodes (%d)\n"
- req_nodes :: IO ()
- exitWith $ ExitFailure 1
-
- let nm = Container.map (\n -> if Node.idx n `elem` offline_indices
- then Node.setOffline n True
- else n) fixed_nl
- nl = Container.map (flip Node.setMdsk m_dsk . flip Node.setMcpu m_cpu)
- nm
+ cdata = ClusterData gl nl il ctags
csf = commonSuffix fixed_nl il
- when (length csf > 0 && verbose > 1) $
+ when (not (null csf) && verbose > 1) $
hPrintf stderr "Note: Stripping common suffix of '%s' from names\n" csf
- when (isJust shownodes) $
- do
- hPutStrLn stderr "Initial cluster status:"
- hPutStrLn stderr $ Cluster.printNodes nl (fromJust shownodes)
-
- let ini_cv = Cluster.compCV nl
- ini_stats = Cluster.totalResources nl
+ maybePrintNodes (optShowNodes opts) "Initial cluster" (Cluster.printNodes nl)
when (verbose > 2) $
hPrintf stderr "Initial coefficients: overall %.8f, %s\n"
- ini_cv (Cluster.printStats nl)
+ (Cluster.compCV nl) (Cluster.printStats nl)
- printCluster machine_r ini_stats (length all_nodes)
+ printCluster machine_r (Cluster.totalResources nl) (length all_nodes)
- printISpec machine_r ispec SpecNormal disk_template
-
- let bad_nodes = fst $ Cluster.computeBadItems nl il
- stop_allocation = length bad_nodes > 0
- result_noalloc = ([(FailN1, 1)]::FailStats, nl, il, [], [])
+ let stop_allocation = case Cluster.computeBadItems nl il of
+ ([], _) -> Nothing
+ _ -> Just ([(FailN1, 1)]::FailStats, nl, il, [], [])
+ alloclimit = if optMaxLength opts == -1
+ then Nothing
+ else Just (optMaxLength opts)
-- utility functions
let iofspec spx = Instance.create "new" (rspecMem spx) (rspecDsk spx)
- (rspecCpu spx) "running" [] True (-1) (-1) disk_template
- exitifbad val = (case val of
- Bad s -> do
- hPrintf stderr "Failure: %s\n" s :: IO ()
- exitWith $ ExitFailure 1
- Ok x -> return x)
-
+ (rspecCpu spx) Running [] True (-1) (-1) disk_template
- let reqinst = iofspec ispec
-
- allocnodes <- exitifbad $ Cluster.genAllocNodes gl nl req_nodes True
+ allocnodes <- exitIfBad $ Cluster.genAllocNodes gl nl req_nodes True
-- Run the tiered allocation, if enabled
- (case optTieredSpec opts of
- Nothing -> return ()
- Just tspec -> do
- (treason, trl_nl, trl_il, trl_ixes, _) <-
- if stop_allocation
- then return result_noalloc
- else exitifbad (Cluster.tieredAlloc nl il Nothing (iofspec tspec)
- allocnodes [] [])
- let spec_map' = tieredSpecMap trl_ixes
- treason' = sortReasons treason
-
- printAllocationMap verbose "Tiered allocation map" trl_nl trl_ixes
-
- maybePrintNodes shownodes "Tiered allocation"
- (Cluster.printNodes trl_nl)
-
- maybeSaveData (optSaveCluster opts) "tiered" "after tiered allocation"
- (ClusterData gl trl_nl trl_il ctags)
-
- printISpec machine_r tspec SpecTiered disk_template
+ case optTieredSpec opts of
+ Nothing -> return ()
+ Just tspec -> do
+ (treason, trl_nl, _, spec_map) <-
+ runAllocation cdata stop_allocation
+ (Cluster.tieredAlloc nl il alloclimit (iofspec tspec)
+ allocnodes [] []) tspec SpecTiered opts
- printTiered machine_r spec_map' m_cpu nl trl_nl treason'
- )
+ printTiered machine_r spec_map (optMcpu opts) nl trl_nl treason
-- Run the standard (avg-mode) allocation
- (ereason, fin_nl, fin_il, ixes, _) <-
- if stop_allocation
- then return result_noalloc
- else exitifbad (Cluster.iterateAlloc nl il Nothing
- reqinst allocnodes [] [])
-
- let allocs = length ixes
- sreason = sortReasons ereason
-
- printAllocationMap verbose "Standard allocation map" fin_nl ixes
-
- maybePrintNodes shownodes "Standard allocation" (Cluster.printNodes fin_nl)
-
- maybeSaveData (optSaveCluster opts) "alloc" "after standard allocation"
- (ClusterData gl fin_nl fin_il ctags)
+ (sreason, fin_nl, allocs, _) <-
+ runAllocation cdata stop_allocation
+ (Cluster.iterateAlloc nl il alloclimit (iofspec ispec)
+ allocnodes [] []) ispec SpecNormal opts
printResults machine_r nl fin_nl num_instances allocs sreason
+{-# LANGUAGE TemplateHaskell #-}
+
{-| Unittests for ganeti-htools.
-}
-}
module Ganeti.HTools.QC
- ( testUtils
- , testPeerMap
- , testContainer
- , testInstance
- , testNode
- , testText
- , testOpCodes
- , testJobs
- , testCluster
- , testLoader
- , testTypes
- ) where
+ ( testUtils
+ , testPeerMap
+ , testContainer
+ , testInstance
+ , testNode
+ , testText
+ , testOpCodes
+ , testJobs
+ , testCluster
+ , testLoader
+ , testTypes
+ ) where
import Test.QuickCheck
import Data.List (findIndex, intercalate, nub, isPrefixOf)
import qualified Ganeti.HTools.Program.Hscan
import qualified Ganeti.HTools.Program.Hspace
-run :: Testable prop => prop -> Args -> IO Result
-run = flip quickCheckWithResult
+import Ganeti.HTools.QCHelper (testSuite)
-- * Constants
defGroup :: Group.Group
defGroup = flip Group.setIdx 0 $
- Group.create "default" Utils.defaultGroupID
- Types.AllocPreferred
+ Group.create "default" Utils.defaultGroupID Types.AllocPreferred
defGroupList :: Group.List
defGroupList = Container.fromList [(Group.idx defGroup, defGroup)]
isFailure (Types.OpFail _) = True
isFailure _ = False
+-- | Checks for equality with proper annotation.
+(==?) :: (Show a, Eq a) => a -> a -> Property
+(==?) x y = printTestCase
+ ("Expected equality, but '" ++
+ show x ++ "' /= '" ++ show y ++ "'") (x == y)
+infix 3 ==?
+
-- | Update an instance to be smaller than a node.
setInstanceSmallerThanNode node inst =
- inst { Instance.mem = Node.availMem node `div` 2
- , Instance.dsk = Node.availDisk node `div` 2
- , Instance.vcpus = Node.availCpu node `div` 2
- }
+ inst { Instance.mem = Node.availMem node `div` 2
+ , Instance.dsk = Node.availDisk node `div` 2
+ , Instance.vcpus = Node.availCpu node `div` 2
+ }
-- | Create an instance given its spec.
createInstance mem dsk vcpus =
- Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
- Types.DTDrbd8
+ Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1)
+ Types.DTDrbd8
-- | Create a small cluster by repeating a node spec.
makeSmallCluster :: Node.Node -> Int -> Node.List
makeSmallCluster node count =
- let fn = Node.buildPeers node Container.empty
- namelst = map (\n -> (Node.name n, n)) (replicate count fn)
- (_, nlst) = Loader.assignIndices namelst
- in nlst
+ let fn = Node.buildPeers node Container.empty
+ namelst = map (\n -> (Node.name n, n)) (replicate count fn)
+ (_, nlst) = Loader.assignIndices namelst
+ in nlst
+
+-- | Make a small cluster, both nodes and instances.
+makeSmallEmptyCluster :: Node.Node -> Int -> Instance.Instance
+ -> (Node.List, Instance.List, Instance.Instance)
+makeSmallEmptyCluster node count inst =
+ (makeSmallCluster node count, Container.empty,
+ setInstanceSmallerThanNode node inst)
-- | Checks if a node is "big" enough.
isNodeBig :: Node.Node -> Int -> Bool
let pnode = Container.find pdx nl
snode = Container.find sdx nl
maxiidx = if Container.null il
- then 0
- else fst (Container.findMax il) + 1
+ then 0
+ else fst (Container.findMax il) + 1
inst' = inst { Instance.idx = maxiidx,
Instance.pNode = pdx, Instance.sNode = sdx }
pnode' = Node.setPri pnode inst'
newtype DNSChar = DNSChar { dnsGetChar::Char }
instance Arbitrary DNSChar where
- arbitrary = do
- x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
- return (DNSChar x)
+ arbitrary = do
+ x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
+ return (DNSChar x)
getName :: Gen String
getName = do
dn <- vector n::Gen [DNSChar]
return (map dnsGetChar dn)
-
getFQDN :: Gen String
getFQDN = do
felem <- getName
let frest' = map (map dnsGetChar) frest
return (felem ++ "." ++ intercalate "." frest')
+instance Arbitrary Types.InstanceStatus where
+ arbitrary = elements [minBound..maxBound]
+
-- let's generate a random instance
instance Arbitrary Instance.Instance where
- arbitrary = do
- name <- getFQDN
- mem <- choose (0, maxMem)
- dsk <- choose (0, maxDsk)
- run_st <- elements [ C.inststErrorup
- , C.inststErrordown
- , C.inststAdmindown
- , C.inststNodedown
- , C.inststNodeoffline
- , C.inststRunning
- , "no_such_status1"
- , "no_such_status2"]
- pn <- arbitrary
- sn <- arbitrary
- vcpus <- choose (0, maxCpu)
- return $ Instance.create name mem dsk vcpus run_st [] True pn sn
- Types.DTDrbd8
+ arbitrary = do
+ name <- getFQDN
+ mem <- choose (0, maxMem)
+ dsk <- choose (0, maxDsk)
+ run_st <- arbitrary
+ pn <- arbitrary
+ sn <- arbitrary
+ vcpus <- choose (0, maxCpu)
+ return $ Instance.create name mem dsk vcpus run_st [] True pn sn
+ Types.DTDrbd8
-- | Generas an arbitrary node based on sizing information.
genNode :: Maybe Int -- ^ Minimum node size in terms of units
-> Gen Node.Node
genNode min_multiplier max_multiplier = do
let (base_mem, base_dsk, base_cpu) =
- case min_multiplier of
- Just mm -> (mm * Types.unitMem,
- mm * Types.unitDsk,
- mm * Types.unitCpu)
- Nothing -> (0, 0, 0)
+ case min_multiplier of
+ Just mm -> (mm * Types.unitMem,
+ mm * Types.unitDsk,
+ mm * Types.unitCpu)
+ Nothing -> (0, 0, 0)
(top_mem, top_dsk, top_cpu) =
- case max_multiplier of
- Just mm -> (mm * Types.unitMem,
- mm * Types.unitDsk,
- mm * Types.unitCpu)
- Nothing -> (maxMem, maxDsk, maxCpu)
+ case max_multiplier of
+ Just mm -> (mm * Types.unitMem,
+ mm * Types.unitDsk,
+ mm * Types.unitCpu)
+ Nothing -> (maxMem, maxDsk, maxCpu)
name <- getFQDN
mem_t <- choose (base_mem, top_mem)
mem_f <- choose (base_mem, mem_t)
-- and a random node
instance Arbitrary Node.Node where
- arbitrary = genNode Nothing Nothing
+ arbitrary = genNode Nothing Nothing
-- replace disks
instance Arbitrary OpCodes.ReplaceDisksMode where
- arbitrary = elements [ OpCodes.ReplaceOnPrimary
- , OpCodes.ReplaceOnSecondary
- , OpCodes.ReplaceNewSecondary
- , OpCodes.ReplaceAuto
- ]
+ arbitrary = elements [minBound..maxBound]
instance Arbitrary OpCodes.OpCode where
arbitrary = do
, "OP_INSTANCE_FAILOVER"
, "OP_INSTANCE_MIGRATE"
]
- (case op_id of
- "OP_TEST_DELAY" ->
- liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
- "OP_INSTANCE_REPLACE_DISKS" ->
- liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
+ case op_id of
+ "OP_TEST_DELAY" ->
+ liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
+ "OP_INSTANCE_REPLACE_DISKS" ->
+ liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
arbitrary arbitrary arbitrary
- "OP_INSTANCE_FAILOVER" ->
- liftM3 OpCodes.OpInstanceFailover arbitrary arbitrary
- arbitrary
- "OP_INSTANCE_MIGRATE" ->
- liftM5 OpCodes.OpInstanceMigrate arbitrary arbitrary
- arbitrary arbitrary
+ "OP_INSTANCE_FAILOVER" ->
+ liftM3 OpCodes.OpInstanceFailover arbitrary arbitrary
arbitrary
- _ -> fail "Wrong opcode")
+ "OP_INSTANCE_MIGRATE" ->
+ liftM5 OpCodes.OpInstanceMigrate arbitrary arbitrary
+ arbitrary arbitrary arbitrary
+ _ -> fail "Wrong opcode"
instance Arbitrary Jobs.OpStatus where
arbitrary = elements [minBound..maxBound]
newtype SmallRatio = SmallRatio Double deriving Show
instance Arbitrary SmallRatio where
- arbitrary = do
- v <- choose (0, 1)
- return $ SmallRatio v
+ arbitrary = do
+ v <- choose (0, 1)
+ return $ SmallRatio v
instance Arbitrary Types.AllocPolicy where
arbitrary = elements [minBound..maxBound]
arbitrary = elements [minBound..maxBound]
instance Arbitrary Types.FailMode where
- arbitrary = elements [minBound..maxBound]
+ arbitrary = elements [minBound..maxBound]
instance Arbitrary a => Arbitrary (Types.OpResult a) where
- arbitrary = arbitrary >>= \c ->
- case c of
- False -> liftM Types.OpFail arbitrary
- True -> liftM Types.OpGood arbitrary
+ arbitrary = arbitrary >>= \c ->
+ if c
+ then liftM Types.OpGood arbitrary
+ else liftM Types.OpFail arbitrary
-- * Actual tests
-- | If the list is not just an empty element, and if the elements do
-- not contain commas, then join+split should be idempotent.
prop_Utils_commaJoinSplit =
- forAll (arbitrary `suchThat`
- (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
- Utils.sepSplit ',' (Utils.commaJoin lst) == lst
+ forAll (arbitrary `suchThat`
+ (\l -> l /= [""] && all (notElem ',') l )) $ \lst ->
+ Utils.sepSplit ',' (Utils.commaJoin lst) ==? lst
-- | Split and join should always be idempotent.
-prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
+prop_Utils_commaSplitJoin s =
+ Utils.commaJoin (Utils.sepSplit ',' s) ==? s
-- | fromObjWithDefault, we test using the Maybe monad and an integer
-- value.
prop_Utils_fromObjWithDefault def_value random_key =
- -- a missing key will be returned with the default
- Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
- -- a found key will be returned as is, not with default
- Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
- random_key (def_value+1) == Just def_value
- where _types = def_value :: Integer
+ -- a missing key will be returned with the default
+ Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
+ -- a found key will be returned as is, not with default
+ Utils.fromObjWithDefault [(random_key, J.showJSON def_value)]
+ random_key (def_value+1) == Just def_value
+ where _types = def_value :: Integer
-- | Test that functional if' behaves like the syntactic sugar if.
-prop_Utils_if'if :: Bool -> Int -> Int -> Bool
-prop_Utils_if'if cnd a b = Utils.if' cnd a b == if cnd then a else b
+prop_Utils_if'if :: Bool -> Int -> Int -> Gen Prop
+prop_Utils_if'if cnd a b =
+ Utils.if' cnd a b ==? if cnd then a else b
-- | Test basic select functionality
-prop_Utils_select :: Int -- ^ Default result
- -> [Int] -- ^ List of False values
- -> [Int] -- ^ List of True values
- -> Bool -- ^ Test result
+prop_Utils_select :: Int -- ^ Default result
+ -> [Int] -- ^ List of False values
+ -> [Int] -- ^ List of True values
+ -> Gen Prop -- ^ Test result
prop_Utils_select def lst1 lst2 =
- Utils.select def cndlist == expectedresult
- where expectedresult = Utils.if' (null lst2) def (head lst2)
- flist = map (\e -> (False, e)) lst1
- tlist = map (\e -> (True, e)) lst2
- cndlist = flist ++ tlist
+ Utils.select def (flist ++ tlist) ==? expectedresult
+ where expectedresult = Utils.if' (null lst2) def (head lst2)
+ flist = zip (repeat False) lst1
+ tlist = zip (repeat True) lst2
-- | Test basic select functionality with undefined default
-prop_Utils_select_undefd :: [Int] -- ^ List of False values
+prop_Utils_select_undefd :: [Int] -- ^ List of False values
-> NonEmptyList Int -- ^ List of True values
- -> Bool -- ^ Test result
+ -> Gen Prop -- ^ Test result
prop_Utils_select_undefd lst1 (NonEmpty lst2) =
- Utils.select undefined cndlist == head lst2
- where flist = map (\e -> (False, e)) lst1
- tlist = map (\e -> (True, e)) lst2
- cndlist = flist ++ tlist
+ Utils.select undefined (flist ++ tlist) ==? head lst2
+ where flist = zip (repeat False) lst1
+ tlist = zip (repeat True) lst2
-- | Test basic select functionality with undefined list values
-prop_Utils_select_undefv :: [Int] -- ^ List of False values
+prop_Utils_select_undefv :: [Int] -- ^ List of False values
-> NonEmptyList Int -- ^ List of True values
- -> Bool -- ^ Test result
+ -> Gen Prop -- ^ Test result
prop_Utils_select_undefv lst1 (NonEmpty lst2) =
- Utils.select undefined cndlist == head lst2
- where flist = map (\e -> (False, e)) lst1
- tlist = map (\e -> (True, e)) lst2
- cndlist = flist ++ tlist ++ [undefined]
+ Utils.select undefined cndlist ==? head lst2
+ where flist = zip (repeat False) lst1
+ tlist = zip (repeat True) lst2
+ cndlist = flist ++ tlist ++ [undefined]
prop_Utils_parseUnit (NonNegative n) =
- Utils.parseUnit (show n) == Types.Ok n &&
- Utils.parseUnit (show n ++ "m") == Types.Ok n &&
- (case Utils.parseUnit (show n ++ "M") of
- Types.Ok m -> if n > 0
- then m < n -- for positive values, X MB is less than X MiB
- else m == 0 -- but for 0, 0 MB == 0 MiB
- Types.Bad _ -> False) &&
- Utils.parseUnit (show n ++ "g") == Types.Ok (n*1024) &&
- Utils.parseUnit (show n ++ "t") == Types.Ok (n*1048576) &&
- Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int)
+ Utils.parseUnit (show n) == Types.Ok n &&
+ Utils.parseUnit (show n ++ "m") == Types.Ok n &&
+ (case Utils.parseUnit (show n ++ "M") of
+ Types.Ok m -> if n > 0
+ then m < n -- for positive values, X MB is < than X MiB
+ else m == 0 -- but for 0, 0 MB == 0 MiB
+ Types.Bad _ -> False) &&
+ Utils.parseUnit (show n ++ "g") == Types.Ok (n*1024) &&
+ Utils.parseUnit (show n ++ "t") == Types.Ok (n*1048576) &&
+ Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int)
where _types = n::Int
-- | Test list for the Utils module.
-testUtils =
- [ run prop_Utils_commaJoinSplit
- , run prop_Utils_commaSplitJoin
- , run prop_Utils_fromObjWithDefault
- , run prop_Utils_if'if
- , run prop_Utils_select
- , run prop_Utils_select_undefd
- , run prop_Utils_select_undefv
- , run prop_Utils_parseUnit
- ]
+testSuite "Utils"
+ [ 'prop_Utils_commaJoinSplit
+ , 'prop_Utils_commaSplitJoin
+ , 'prop_Utils_fromObjWithDefault
+ , 'prop_Utils_if'if
+ , 'prop_Utils_select
+ , 'prop_Utils_select_undefd
+ , 'prop_Utils_select_undefv
+ , 'prop_Utils_parseUnit
+ ]
-- ** PeerMap tests
-- | Make sure add is idempotent.
prop_PeerMap_addIdempotent pmap key em =
- fn puniq == fn (fn puniq)
+ fn puniq ==? fn (fn puniq)
where _types = (pmap::PeerMap.PeerMap,
key::PeerMap.Key, em::PeerMap.Elem)
fn = PeerMap.add key em
-- | Make sure remove is idempotent.
prop_PeerMap_removeIdempotent pmap key =
- fn puniq == fn (fn puniq)
+ fn puniq ==? fn (fn puniq)
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
fn = PeerMap.remove key
puniq = PeerMap.accumArray const pmap
-- | Make sure a missing item returns 0.
prop_PeerMap_findMissing pmap key =
- PeerMap.find key (PeerMap.remove key puniq) == 0
+ PeerMap.find key (PeerMap.remove key puniq) ==? 0
where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
puniq = PeerMap.accumArray const pmap
-- | Make sure an added item is found.
prop_PeerMap_addFind pmap key em =
- PeerMap.find key (PeerMap.add key em puniq) == em
+ PeerMap.find key (PeerMap.add key em puniq) ==? em
where _types = (pmap::PeerMap.PeerMap,
key::PeerMap.Key, em::PeerMap.Elem)
puniq = PeerMap.accumArray const pmap
-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
prop_PeerMap_maxElem pmap =
- PeerMap.maxElem puniq == if null puniq then 0
- else (maximum . snd . unzip) puniq
+ PeerMap.maxElem puniq ==? if null puniq then 0
+ else (maximum . snd . unzip) puniq
where _types = pmap::PeerMap.PeerMap
puniq = PeerMap.accumArray const pmap
-- | List of tests for the PeerMap module.
-testPeerMap =
- [ run prop_PeerMap_addIdempotent
- , run prop_PeerMap_removeIdempotent
- , run prop_PeerMap_maxElem
- , run prop_PeerMap_addFind
- , run prop_PeerMap_findMissing
- ]
+testSuite "PeerMap"
+ [ 'prop_PeerMap_addIdempotent
+ , 'prop_PeerMap_removeIdempotent
+ , 'prop_PeerMap_maxElem
+ , 'prop_PeerMap_addFind
+ , 'prop_PeerMap_findMissing
+ ]
-- ** Container tests
+-- we silence the following due to hlint bug fixed in later versions
+{-# ANN prop_Container_addTwo "HLint: ignore Avoid lambda" #-}
prop_Container_addTwo cdata i1 i2 =
- fn i1 i2 cont == fn i2 i1 cont &&
- fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
+ fn i1 i2 cont == fn i2 i1 cont &&
+ fn i1 i2 cont == fn i1 i2 (fn i1 i2 cont)
where _types = (cdata::[Int],
i1::Int, i2::Int)
cont = foldl (\c x -> Container.add x x c) Container.empty cdata
prop_Container_nameOf node =
let nl = makeSmallCluster node 1
fnode = head (Container.elems nl)
- in Container.nameOf nl (Node.idx fnode) == Node.name fnode
+ in Container.nameOf nl (Node.idx fnode) ==? Node.name fnode
-- | We test that in a cluster, given a random node, we can find it by
-- its name and alias, as long as all names and aliases are unique,
forAll (vector cnt) $ \ names ->
(length . nub) (map fst names ++ map snd names) ==
length names * 2 &&
- not (othername `elem` (map fst names ++ map snd names)) ==>
+ othername `notElem` (map fst names ++ map snd names) ==>
let nl = makeSmallCluster node cnt
nodes = Container.elems nl
nodes' = map (\((name, alias), nn) -> (Node.idx nn,
target = snd (nodes' !! fidx)
in Container.findByName nl' (Node.name target) == Just target &&
Container.findByName nl' (Node.alias target) == Just target &&
- Container.findByName nl' othername == Nothing
+ isNothing (Container.findByName nl' othername)
-testContainer =
- [ run prop_Container_addTwo
- , run prop_Container_nameOf
- , run prop_Container_findByName
- ]
+testSuite "Container"
+ [ 'prop_Container_addTwo
+ , 'prop_Container_nameOf
+ , 'prop_Container_findByName
+ ]
-- ** Instance tests
-- Simple instance tests, we only have setter/getters
prop_Instance_creat inst =
- Instance.name inst == Instance.alias inst
+ Instance.name inst ==? Instance.alias inst
prop_Instance_setIdx inst idx =
- Instance.idx (Instance.setIdx inst idx) == idx
+ Instance.idx (Instance.setIdx inst idx) ==? idx
where _types = (inst::Instance.Instance, idx::Types.Idx)
prop_Instance_setName inst name =
- Instance.name newinst == name &&
- Instance.alias newinst == name
+ Instance.name newinst == name &&
+ Instance.alias newinst == name
where _types = (inst::Instance.Instance, name::String)
newinst = Instance.setName inst name
prop_Instance_setAlias inst name =
- Instance.name newinst == Instance.name inst &&
- Instance.alias newinst == name
+ Instance.name newinst == Instance.name inst &&
+ Instance.alias newinst == name
where _types = (inst::Instance.Instance, name::String)
newinst = Instance.setAlias inst name
prop_Instance_setPri inst pdx =
- Instance.pNode (Instance.setPri inst pdx) == pdx
+ Instance.pNode (Instance.setPri inst pdx) ==? pdx
where _types = (inst::Instance.Instance, pdx::Types.Ndx)
prop_Instance_setSec inst sdx =
- Instance.sNode (Instance.setSec inst sdx) == sdx
+ Instance.sNode (Instance.setSec inst sdx) ==? sdx
where _types = (inst::Instance.Instance, sdx::Types.Ndx)
prop_Instance_setBoth inst pdx sdx =
- Instance.pNode si == pdx && Instance.sNode si == sdx
+ Instance.pNode si == pdx && Instance.sNode si == sdx
where _types = (inst::Instance.Instance, pdx::Types.Ndx, sdx::Types.Ndx)
si = Instance.setBoth inst pdx sdx
-prop_Instance_runStatus_True =
- forAll (arbitrary `suchThat`
- ((`elem` Instance.runningStates) . Instance.runSt))
- Instance.running
-
-prop_Instance_runStatus_False inst =
- let run_st = Instance.running inst
- run_tx = Instance.runSt inst
- in
- run_tx `notElem` Instance.runningStates ==> not run_st
-
prop_Instance_shrinkMG inst =
- Instance.mem inst >= 2 * Types.unitMem ==>
- case Instance.shrinkByType inst Types.FailMem of
- Types.Ok inst' ->
- Instance.mem inst' == Instance.mem inst - Types.unitMem
- _ -> False
+ Instance.mem inst >= 2 * Types.unitMem ==>
+ case Instance.shrinkByType inst Types.FailMem of
+ Types.Ok inst' -> Instance.mem inst' == Instance.mem inst - Types.unitMem
+ _ -> False
prop_Instance_shrinkMF inst =
- forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
+ forAll (choose (0, 2 * Types.unitMem - 1)) $ \mem ->
let inst' = inst { Instance.mem = mem}
in Types.isBad $ Instance.shrinkByType inst' Types.FailMem
prop_Instance_shrinkCG inst =
- Instance.vcpus inst >= 2 * Types.unitCpu ==>
- case Instance.shrinkByType inst Types.FailCPU of
- Types.Ok inst' ->
- Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
- _ -> False
+ Instance.vcpus inst >= 2 * Types.unitCpu ==>
+ case Instance.shrinkByType inst Types.FailCPU of
+ Types.Ok inst' ->
+ Instance.vcpus inst' == Instance.vcpus inst - Types.unitCpu
+ _ -> False
prop_Instance_shrinkCF inst =
- forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
+ forAll (choose (0, 2 * Types.unitCpu - 1)) $ \vcpus ->
let inst' = inst { Instance.vcpus = vcpus }
in Types.isBad $ Instance.shrinkByType inst' Types.FailCPU
prop_Instance_shrinkDG inst =
- Instance.dsk inst >= 2 * Types.unitDsk ==>
- case Instance.shrinkByType inst Types.FailDisk of
- Types.Ok inst' ->
- Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
- _ -> False
+ Instance.dsk inst >= 2 * Types.unitDsk ==>
+ case Instance.shrinkByType inst Types.FailDisk of
+ Types.Ok inst' ->
+ Instance.dsk inst' == Instance.dsk inst - Types.unitDsk
+ _ -> False
prop_Instance_shrinkDF inst =
- forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
+ forAll (choose (0, 2 * Types.unitDsk - 1)) $ \dsk ->
let inst' = inst { Instance.dsk = dsk }
in Types.isBad $ Instance.shrinkByType inst' Types.FailDisk
prop_Instance_setMovable inst m =
- Instance.movable inst' == m
+ Instance.movable inst' ==? m
where inst' = Instance.setMovable inst m
-testInstance =
- [ run prop_Instance_creat
- , run prop_Instance_setIdx
- , run prop_Instance_setName
- , run prop_Instance_setAlias
- , run prop_Instance_setPri
- , run prop_Instance_setSec
- , run prop_Instance_setBoth
- , run prop_Instance_runStatus_True
- , run prop_Instance_runStatus_False
- , run prop_Instance_shrinkMG
- , run prop_Instance_shrinkMF
- , run prop_Instance_shrinkCG
- , run prop_Instance_shrinkCF
- , run prop_Instance_shrinkDG
- , run prop_Instance_shrinkDF
- , run prop_Instance_setMovable
- ]
+testSuite "Instance"
+ [ 'prop_Instance_creat
+ , 'prop_Instance_setIdx
+ , 'prop_Instance_setName
+ , 'prop_Instance_setAlias
+ , 'prop_Instance_setPri
+ , 'prop_Instance_setSec
+ , 'prop_Instance_setBoth
+ , 'prop_Instance_shrinkMG
+ , 'prop_Instance_shrinkMF
+ , 'prop_Instance_shrinkCG
+ , 'prop_Instance_shrinkCF
+ , 'prop_Instance_shrinkDG
+ , 'prop_Instance_shrinkDF
+ , 'prop_Instance_setMovable
+ ]
-- ** Text backend tests
prop_Text_Load_Instance name mem dsk vcpus status
(NonEmpty pnode) snode
(NonNegative pdx) (NonNegative sdx) autobal dt =
- pnode /= snode && pdx /= sdx ==>
- let vcpus_s = show vcpus
- dsk_s = show dsk
- mem_s = show mem
- ndx = if null snode
+ pnode /= snode && pdx /= sdx ==>
+ let vcpus_s = show vcpus
+ dsk_s = show dsk
+ mem_s = show mem
+ status_s = Types.instanceStatusToRaw status
+ ndx = if null snode
then [(pnode, pdx)]
else [(pnode, pdx), (snode, sdx)]
- nl = Data.Map.fromList ndx
- tags = ""
- sbal = if autobal then "Y" else "N"
- sdt = Types.dtToString dt
- inst = Text.loadInst nl
- [name, mem_s, dsk_s, vcpus_s, status,
- sbal, pnode, snode, sdt, tags]
- fail1 = Text.loadInst nl
- [name, mem_s, dsk_s, vcpus_s, status,
- sbal, pnode, pnode, tags]
- _types = ( name::String, mem::Int, dsk::Int
- , vcpus::Int, status::String
- , snode::String
- , autobal::Bool)
- in
- case inst of
- Types.Bad msg -> printTestCase ("Failed to load instance: " ++ msg)
- False
- Types.Ok (_, i) -> printTestCase "Mismatch in some field while\
- \ loading the instance" $
- Instance.name i == name &&
- Instance.vcpus i == vcpus &&
- Instance.mem i == mem &&
- Instance.pNode i == pdx &&
- Instance.sNode i == (if null snode
- then Node.noSecondary
- else sdx) &&
- Instance.autoBalance i == autobal &&
- Types.isBad fail1
+ nl = Data.Map.fromList ndx
+ tags = ""
+ sbal = if autobal then "Y" else "N"
+ sdt = Types.diskTemplateToRaw dt
+ inst = Text.loadInst nl
+ [name, mem_s, dsk_s, vcpus_s, status_s,
+ sbal, pnode, snode, sdt, tags]
+ fail1 = Text.loadInst nl
+ [name, mem_s, dsk_s, vcpus_s, status_s,
+ sbal, pnode, pnode, tags]
+ _types = ( name::String, mem::Int, dsk::Int
+ , vcpus::Int, status::Types.InstanceStatus
+ , snode::String
+ , autobal::Bool)
+ in case inst of
+ Types.Bad msg -> printTestCase ("Failed to load instance: " ++ msg)
+ False
+ Types.Ok (_, i) -> printTestCase "Mismatch in some field while\
+ \ loading the instance" $
+ Instance.name i == name &&
+ Instance.vcpus i == vcpus &&
+ Instance.mem i == mem &&
+ Instance.pNode i == pdx &&
+ Instance.sNode i == (if null snode
+ then Node.noSecondary
+ else sdx) &&
+ Instance.autoBalance i == autobal &&
+ Types.isBad fail1
prop_Text_Load_InstanceFail ktn fields =
- length fields /= 10 ==>
+ length fields /= 10 ==>
case Text.loadInst nl fields of
Types.Ok _ -> printTestCase "Managed to load instance from invalid\
\ data" False
where nl = Data.Map.fromList ktn
prop_Text_Load_Node name tm nm fm td fd tc fo =
- let conv v = if v < 0
- then "?"
- else show v
- tm_s = conv tm
- nm_s = conv nm
- fm_s = conv fm
- td_s = conv td
- fd_s = conv fd
- tc_s = conv tc
- fo_s = if fo
+ let conv v = if v < 0
+ then "?"
+ else show v
+ tm_s = conv tm
+ nm_s = conv nm
+ fm_s = conv fm
+ td_s = conv td
+ fd_s = conv fd
+ tc_s = conv tc
+ fo_s = if fo
then "Y"
else "N"
- any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
- gid = Group.uuid defGroup
- in case Text.loadNode defGroupAssoc
- [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
- Nothing -> False
- Just (name', node) ->
- if fo || any_broken
- then Node.offline node
- else Node.name node == name' && name' == name &&
- Node.alias node == name &&
- Node.tMem node == fromIntegral tm &&
- Node.nMem node == nm &&
- Node.fMem node == fm &&
- Node.tDsk node == fromIntegral td &&
- Node.fDsk node == fd &&
- Node.tCpu node == fromIntegral tc
+ any_broken = any (< 0) [tm, nm, fm, td, fd, tc]
+ gid = Group.uuid defGroup
+ in case Text.loadNode defGroupAssoc
+ [name, tm_s, nm_s, fm_s, td_s, fd_s, tc_s, fo_s, gid] of
+ Nothing -> False
+ Just (name', node) ->
+ if fo || any_broken
+ then Node.offline node
+ else Node.name node == name' && name' == name &&
+ Node.alias node == name &&
+ Node.tMem node == fromIntegral tm &&
+ Node.nMem node == nm &&
+ Node.fMem node == fm &&
+ Node.tDsk node == fromIntegral td &&
+ Node.fDsk node == fd &&
+ Node.tCpu node == fromIntegral tc
prop_Text_Load_NodeFail fields =
- length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
+ length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
prop_Text_NodeLSIdempotent node =
- (Text.loadNode defGroupAssoc.
- Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
- Just (Node.name n, n)
+ (Text.loadNode defGroupAssoc.
+ Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==
+ Just (Node.name n, n)
-- override failN1 to what loadNode returns by default
where n = node { Node.failN1 = True, Node.offline = False }
-testText =
- [ run prop_Text_Load_Instance
- , run prop_Text_Load_InstanceFail
- , run prop_Text_Load_Node
- , run prop_Text_Load_NodeFail
- , run prop_Text_NodeLSIdempotent
- ]
+testSuite "Text"
+ [ 'prop_Text_Load_Instance
+ , 'prop_Text_Load_InstanceFail
+ , 'prop_Text_Load_Node
+ , 'prop_Text_Load_NodeFail
+ , 'prop_Text_NodeLSIdempotent
+ ]
-- ** Node tests
prop_Node_setAlias node name =
- Node.name newnode == Node.name node &&
- Node.alias newnode == name
+ Node.name newnode == Node.name node &&
+ Node.alias newnode == name
where _types = (node::Node.Node, name::String)
newnode = Node.setAlias node name
prop_Node_setOffline node status =
- Node.offline newnode == status
+ Node.offline newnode ==? status
where newnode = Node.setOffline node status
prop_Node_setXmem node xm =
- Node.xMem newnode == xm
+ Node.xMem newnode ==? xm
where newnode = Node.setXmem node xm
prop_Node_setMcpu node mc =
- Node.mCpu newnode == mc
+ Node.mCpu newnode ==? mc
where newnode = Node.setMcpu node mc
-- | Check that an instance add with too high memory or disk will be
-- rejected.
-prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
- not (Node.failN1 node)
- ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailMem -> True
- _ -> False
- where _types = (node::Node.Node, inst::Instance.Instance)
- inst' = setInstanceSmallerThanNode node inst
- inst'' = inst' { Instance.mem = Instance.mem inst }
-
-prop_Node_addPriFD node inst = Instance.dsk inst >= Node.fDsk node &&
- not (Node.failN1 node)
- ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailDisk -> True
- _ -> False
+prop_Node_addPriFM node inst =
+ Instance.mem inst >= Node.fMem node && not (Node.failN1 node) &&
+ not (Instance.instanceOffline inst) ==>
+ case Node.addPri node inst'' of
+ Types.OpFail Types.FailMem -> True
+ _ -> False
+ where _types = (node::Node.Node, inst::Instance.Instance)
+ inst' = setInstanceSmallerThanNode node inst
+ inst'' = inst' { Instance.mem = Instance.mem inst }
+
+prop_Node_addPriFD node inst =
+ Instance.dsk inst >= Node.fDsk node && not (Node.failN1 node) ==>
+ case Node.addPri node inst'' of
+ Types.OpFail Types.FailDisk -> True
+ _ -> False
where _types = (node::Node.Node, inst::Instance.Instance)
inst' = setInstanceSmallerThanNode node inst
inst'' = inst' { Instance.dsk = Instance.dsk inst }
prop_Node_addPriFC node inst (Positive extra) =
- not (Node.failN1 node) ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailCPU -> True
- _ -> False
+ not (Node.failN1 node) && not (Instance.instanceOffline inst) ==>
+ case Node.addPri node inst'' of
+ Types.OpFail Types.FailCPU -> True
+ _ -> False
where _types = (node::Node.Node, inst::Instance.Instance)
inst' = setInstanceSmallerThanNode node inst
inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
-- | Check that an instance add with too high memory or disk will be
-- rejected.
prop_Node_addSec node inst pdx =
- (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
- Instance.dsk inst >= Node.fDsk node) &&
- not (Node.failN1 node)
- ==> isFailure (Node.addSec node inst pdx)
+ ((Instance.mem inst >= (Node.fMem node - Node.rMem node) &&
+ not (Instance.instanceOffline inst)) ||
+ Instance.dsk inst >= Node.fDsk node) &&
+ not (Node.failN1 node) ==>
+ isFailure (Node.addSec node inst pdx)
where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
+-- | Check that an offline instance with reasonable disk size can always
+-- be added.
+prop_Node_addPriOffline =
+ forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
+ forAll (arbitrary `suchThat`
+ (\ x -> (Instance.dsk x < Node.fDsk node) &&
+ Instance.instanceOffline x)) $ \inst ->
+ case Node.addPri node inst of
+ Types.OpGood _ -> True
+ _ -> False
+
+prop_Node_addSecOffline pdx =
+ forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
+ forAll (arbitrary `suchThat`
+ (\ x -> (Instance.dsk x < Node.fDsk node) &&
+ Instance.instanceOffline x)) $ \inst ->
+ case Node.addSec node inst pdx of
+ Types.OpGood _ -> True
+ _ -> False
+
-- | Checks for memory reservation changes.
prop_Node_rMem inst =
- forAll (arbitrary `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node ->
- -- ab = auto_balance, nb = non-auto_balance
- -- we use -1 as the primary node of the instance
- let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
- inst_ab = setInstanceSmallerThanNode node inst'
- inst_nb = inst_ab { Instance.autoBalance = False }
- -- now we have the two instances, identical except the
- -- autoBalance attribute
- orig_rmem = Node.rMem node
- inst_idx = Instance.idx inst_ab
- node_add_ab = Node.addSec node inst_ab (-1)
- node_add_nb = Node.addSec node inst_nb (-1)
- node_del_ab = liftM (`Node.removeSec` inst_ab) node_add_ab
- node_del_nb = liftM (`Node.removeSec` inst_nb) node_add_nb
- in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
- (Types.OpGood a_ab, Types.OpGood a_nb,
- Types.OpGood d_ab, Types.OpGood d_nb) ->
- printTestCase "Consistency checks failed" $
- Node.rMem a_ab > orig_rmem &&
- Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
- Node.rMem a_nb == orig_rmem &&
- Node.rMem d_ab == orig_rmem &&
- Node.rMem d_nb == orig_rmem &&
- -- this is not related to rMem, but as good a place to
- -- test as any
- inst_idx `elem` Node.sList a_ab &&
- not (inst_idx `elem` Node.sList d_ab)
- x -> printTestCase ("Failed to add/remove instances: " ++ show x)
- False
+ not (Instance.instanceOffline inst) ==>
+ forAll (arbitrary `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node ->
+ -- ab = auto_balance, nb = non-auto_balance
+ -- we use -1 as the primary node of the instance
+ let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
+ inst_ab = setInstanceSmallerThanNode node inst'
+ inst_nb = inst_ab { Instance.autoBalance = False }
+ -- now we have the two instances, identical except the
+ -- autoBalance attribute
+ orig_rmem = Node.rMem node
+ inst_idx = Instance.idx inst_ab
+ node_add_ab = Node.addSec node inst_ab (-1)
+ node_add_nb = Node.addSec node inst_nb (-1)
+ node_del_ab = liftM (`Node.removeSec` inst_ab) node_add_ab
+ node_del_nb = liftM (`Node.removeSec` inst_nb) node_add_nb
+ in case (node_add_ab, node_add_nb, node_del_ab, node_del_nb) of
+ (Types.OpGood a_ab, Types.OpGood a_nb,
+ Types.OpGood d_ab, Types.OpGood d_nb) ->
+ printTestCase "Consistency checks failed" $
+ Node.rMem a_ab > orig_rmem &&
+ Node.rMem a_ab - orig_rmem == Instance.mem inst_ab &&
+ Node.rMem a_nb == orig_rmem &&
+ Node.rMem d_ab == orig_rmem &&
+ Node.rMem d_nb == orig_rmem &&
+ -- this is not related to rMem, but as good a place to
+ -- test as any
+ inst_idx `elem` Node.sList a_ab &&
+ inst_idx `notElem` Node.sList d_ab
+ x -> printTestCase ("Failed to add/remove instances: " ++ show x) False
-- | Check mdsk setting.
prop_Node_setMdsk node mx =
- Node.loDsk node' >= 0 &&
- fromIntegral (Node.loDsk node') <= Node.tDsk node &&
- Node.availDisk node' >= 0 &&
- Node.availDisk node' <= Node.fDsk node' &&
- fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
- Node.mDsk node' == mx'
+ Node.loDsk node' >= 0 &&
+ fromIntegral (Node.loDsk node') <= Node.tDsk node &&
+ Node.availDisk node' >= 0 &&
+ Node.availDisk node' <= Node.fDsk node' &&
+ fromIntegral (Node.availDisk node') <= Node.tDsk node' &&
+ Node.mDsk node' == mx'
where _types = (node::Node.Node, mx::SmallRatio)
node' = Node.setMdsk node mx'
SmallRatio mx' = mx
-- Check tag maps
prop_Node_tagMaps_idempotent tags =
- Node.delTags (Node.addTags m tags) tags == m
+ Node.delTags (Node.addTags m tags) tags ==? m
where m = Data.Map.empty
prop_Node_tagMaps_reject tags =
- not (null tags) ==>
- any (\t -> Node.rejectAddTags m [t]) tags
+ not (null tags) ==>
+ all (\t -> Node.rejectAddTags m [t]) tags
where m = Node.addTags Data.Map.empty tags
prop_Node_showField node =
fst (Node.showHeader field) /= Types.unknownField &&
Node.showField node field /= Types.unknownField
-
prop_Node_computeGroups nodes =
let ng = Node.computeGroups nodes
onlyuuid = map fst ng
length (nub onlyuuid) == length onlyuuid &&
(null nodes || not (null ng))
-testNode =
- [ run prop_Node_setAlias
- , run prop_Node_setOffline
- , run prop_Node_setMcpu
- , run prop_Node_setXmem
- , run prop_Node_addPriFM
- , run prop_Node_addPriFD
- , run prop_Node_addPriFC
- , run prop_Node_addSec
- , run prop_Node_rMem
- , run prop_Node_setMdsk
- , run prop_Node_tagMaps_idempotent
- , run prop_Node_tagMaps_reject
- , run prop_Node_showField
- , run prop_Node_computeGroups
- ]
-
+testSuite "Node"
+ [ 'prop_Node_setAlias
+ , 'prop_Node_setOffline
+ , 'prop_Node_setMcpu
+ , 'prop_Node_setXmem
+ , 'prop_Node_addPriFM
+ , 'prop_Node_addPriFD
+ , 'prop_Node_addPriFC
+ , 'prop_Node_addSec
+ , 'prop_Node_addPriOffline
+ , 'prop_Node_addSecOffline
+ , 'prop_Node_rMem
+ , 'prop_Node_setMdsk
+ , 'prop_Node_tagMaps_idempotent
+ , 'prop_Node_tagMaps_reject
+ , 'prop_Node_showField
+ , 'prop_Node_computeGroups
+ ]
-- ** Cluster tests
-- | Check that the cluster score is close to zero for a homogeneous
-- cluster.
prop_Score_Zero node =
- forAll (choose (1, 1024)) $ \count ->
+ forAll (choose (1, 1024)) $ \count ->
(not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
(Node.tDsk node > 0) && (Node.tMem node > 0)) ==>
- let fn = Node.buildPeers node Container.empty
- nlst = replicate count fn
- score = Cluster.compCVNodes nlst
- -- we can't say == 0 here as the floating point errors accumulate;
- -- this should be much lower than the default score in CLI.hs
- in score <= 1e-12
+ let fn = Node.buildPeers node Container.empty
+ nlst = replicate count fn
+ score = Cluster.compCVNodes nlst
+ -- we can't say == 0 here as the floating point errors accumulate;
+ -- this should be much lower than the default score in CLI.hs
+ in score <= 1e-12
-- | Check that cluster stats are sane.
prop_CStats_sane node =
- forAll (choose (1, 1024)) $ \count ->
+ forAll (choose (1, 1024)) $ \count ->
(not (Node.offline node) && not (Node.failN1 node) &&
(Node.availDisk node > 0) && (Node.availMem node > 0)) ==>
- let fn = Node.buildPeers node Container.empty
- nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
- nl = Container.fromList nlst
- cstats = Cluster.totalResources nl
- in Cluster.csAdsk cstats >= 0 &&
- Cluster.csAdsk cstats <= Cluster.csFdsk cstats
+ let fn = Node.buildPeers node Container.empty
+ nlst = zip [1..] $ replicate count fn::[(Types.Ndx, Node.Node)]
+ nl = Container.fromList nlst
+ cstats = Cluster.totalResources nl
+ in Cluster.csAdsk cstats >= 0 &&
+ Cluster.csAdsk cstats <= Cluster.csFdsk cstats
-- | Check that one instance is allocated correctly, without
-- rebalances needed.
prop_ClusterAlloc_sane node inst =
- forAll (choose (5, 20)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && Node.availDisk node > 0
- && Node.availMem node > 0
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- inst' = setInstanceSmallerThanNode node inst
- in case Cluster.genAllocNodes defGroupList nl 2 True >>=
- Cluster.tryAlloc nl il inst' of
- Types.Bad _ -> False
- Types.Ok as ->
- case Cluster.asSolutions as of
- [] -> False
- (xnl, xi, _, cv):[] ->
- let il' = Container.add (Instance.idx xi) xi il
- tbl = Cluster.Table xnl il' cv []
- in not (canBalance tbl True True False)
- _ -> False
+ forAll (choose (5, 20)) $ \count ->
+ not (Node.offline node)
+ && not (Node.failN1 node)
+ && Node.availDisk node > 0
+ && Node.availMem node > 0
+ ==>
+ let (nl, il, inst') = makeSmallEmptyCluster node count inst
+ in case Cluster.genAllocNodes defGroupList nl 2 True >>=
+ Cluster.tryAlloc nl il inst' of
+ Types.Bad _ -> False
+ Types.Ok as ->
+ case Cluster.asSolution as of
+ Nothing -> False
+ Just (xnl, xi, _, cv) ->
+ let il' = Container.add (Instance.idx xi) xi il
+ tbl = Cluster.Table xnl il' cv []
+ in not (canBalance tbl True True False)
-- | Checks that on a 2-5 node cluster, we can allocate a random
-- instance spec via tiered allocation (whatever the original instance
-- spec), on either one or two nodes.
prop_ClusterCanTieredAlloc node inst =
- forAll (choose (2, 5)) $ \count ->
- forAll (choose (1, 2)) $ \rqnodes ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
- in case allocnodes >>= \allocnodes' ->
- Cluster.tieredAlloc nl il (Just 1) inst allocnodes' [] [] of
- Types.Bad _ -> False
- Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
- IntMap.size il' == length ixes &&
- length ixes == length cstats
+ forAll (choose (2, 5)) $ \count ->
+ forAll (choose (1, 2)) $ \rqnodes ->
+ not (Node.offline node)
+ && not (Node.failN1 node)
+ && isNodeBig node 4
+ ==>
+ let nl = makeSmallCluster node count
+ il = Container.empty
+ allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
+ in case allocnodes >>= \allocnodes' ->
+ Cluster.tieredAlloc nl il (Just 1) inst allocnodes' [] [] of
+ Types.Bad _ -> False
+ Types.Ok (_, _, il', ixes, cstats) -> not (null ixes) &&
+ IntMap.size il' == length ixes &&
+ length ixes == length cstats
-- | Checks that on a 4-8 node cluster, once we allocate an instance,
-- we can also evacuate it.
prop_ClusterAllocEvac node inst =
- forAll (choose (4, 8)) $ \count ->
- not (Node.offline node)
- && not (Node.failN1 node)
- && isNodeBig node 4
- ==>
- let nl = makeSmallCluster node count
- il = Container.empty
- inst' = setInstanceSmallerThanNode node inst
- in case Cluster.genAllocNodes defGroupList nl 2 True >>=
- Cluster.tryAlloc nl il inst' of
- Types.Bad _ -> False
- Types.Ok as ->
- case Cluster.asSolutions as of
- [] -> False
- (xnl, xi, _, _):[] ->
- let sdx = Instance.sNode xi
- il' = Container.add (Instance.idx xi) xi il
- in case IAlloc.processRelocate defGroupList xnl il'
- (Instance.idx xi) 1 [sdx] of
- Types.Ok _ -> True
- _ -> False
- _ -> False
+ forAll (choose (4, 8)) $ \count ->
+ not (Node.offline node)
+ && not (Node.failN1 node)
+ && isNodeBig node 4
+ ==>
+ let (nl, il, inst') = makeSmallEmptyCluster node count inst
+ in case Cluster.genAllocNodes defGroupList nl 2 True >>=
+ Cluster.tryAlloc nl il inst' of
+ Types.Bad _ -> False
+ Types.Ok as ->
+ case Cluster.asSolution as of
+ Nothing -> False
+ Just (xnl, xi, _, _) ->
+ let sdx = Instance.sNode xi
+ il' = Container.add (Instance.idx xi) xi il
+ in case IAlloc.processRelocate defGroupList xnl il'
+ (Instance.idx xi) 1 [sdx] of
+ Types.Ok _ -> True
+ _ -> False
-- | Check that allocating multiple instances on a cluster, then
-- adding an empty node, results in a valid rebalance.
prop_ClusterAllocBalance =
- forAll (genNode (Just 5) (Just 128)) $ \node ->
- forAll (choose (3, 5)) $ \count ->
- not (Node.offline node) && not (Node.failN1 node) ==>
- let nl = makeSmallCluster node count
- (hnode, nl') = IntMap.deleteFindMax nl
- il = Container.empty
- allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
- i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
- in case allocnodes >>= \allocnodes' ->
- Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of
- Types.Bad _ -> False
- Types.Ok (_, xnl, il', _, _) ->
- let ynl = Container.add (Node.idx hnode) hnode xnl
- cv = Cluster.compCV ynl
- tbl = Cluster.Table ynl il' cv []
- in canBalance tbl True True False
+ forAll (genNode (Just 5) (Just 128)) $ \node ->
+ forAll (choose (3, 5)) $ \count ->
+ not (Node.offline node) && not (Node.failN1 node) ==>
+ let nl = makeSmallCluster node count
+ (hnode, nl') = IntMap.deleteFindMax nl
+ il = Container.empty
+ allocnodes = Cluster.genAllocNodes defGroupList nl' 2 True
+ i_templ = createInstance Types.unitMem Types.unitDsk Types.unitCpu
+ in case allocnodes >>= \allocnodes' ->
+ Cluster.iterateAlloc nl' il (Just 5) i_templ allocnodes' [] [] of
+ Types.Bad _ -> False
+ Types.Ok (_, xnl, il', _, _) ->
+ let ynl = Container.add (Node.idx hnode) hnode xnl
+ cv = Cluster.compCV ynl
+ tbl = Cluster.Table ynl il' cv []
+ in canBalance tbl True True False
-- | Checks consistency.
prop_ClusterCheckConsistency node inst =
all (\(guuid, (nl'', _)) -> all ((== guuid) . Node.group)
(Container.elems nl'')) gni
-testCluster =
- [ run prop_Score_Zero
- , run prop_CStats_sane
- , run prop_ClusterAlloc_sane
- , run prop_ClusterCanTieredAlloc
- , run prop_ClusterAllocEvac
- , run prop_ClusterAllocBalance
- , run prop_ClusterCheckConsistency
- , run prop_ClusterSplitCluster
- ]
+testSuite "Cluster"
+ [ 'prop_Score_Zero
+ , 'prop_CStats_sane
+ , 'prop_ClusterAlloc_sane
+ , 'prop_ClusterCanTieredAlloc
+ , 'prop_ClusterAllocEvac
+ , 'prop_ClusterAllocBalance
+ , 'prop_ClusterCheckConsistency
+ , 'prop_ClusterSplitCluster
+ ]
-- ** OpCodes tests
-- | Check that opcode serialization is idempotent.
prop_OpCodes_serialization op =
case J.readJSON (J.showJSON op) of
- J.Error _ -> False
- J.Ok op' -> op == op'
+ J.Error e -> printTestCase ("Cannot deserialise: " ++ e) False
+ J.Ok op' -> op ==? op'
where _types = op::OpCodes.OpCode
-testOpCodes =
- [ run prop_OpCodes_serialization
- ]
+testSuite "OpCodes"
+ [ 'prop_OpCodes_serialization ]
-- ** Jobs tests
-- | Check that (queued) job\/opcode status serialization is idempotent.
prop_OpStatus_serialization os =
case J.readJSON (J.showJSON os) of
- J.Error _ -> False
- J.Ok os' -> os == os'
+ J.Error e -> printTestCase ("Cannot deserialise: " ++ e) False
+ J.Ok os' -> os ==? os'
where _types = os::Jobs.OpStatus
prop_JobStatus_serialization js =
case J.readJSON (J.showJSON js) of
- J.Error _ -> False
- J.Ok js' -> js == js'
+ J.Error e -> printTestCase ("Cannot deserialise: " ++ e) False
+ J.Ok js' -> js ==? js'
where _types = js::Jobs.JobStatus
-testJobs =
- [ run prop_OpStatus_serialization
- , run prop_JobStatus_serialization
- ]
+testSuite "Jobs"
+ [ 'prop_OpStatus_serialization
+ , 'prop_JobStatus_serialization
+ ]
-- ** Loader tests
prop_Loader_lookupNode ktn inst node =
- Loader.lookupNode nl inst node == Data.Map.lookup node nl
- where nl = Data.Map.fromList ktn
+ Loader.lookupNode nl inst node ==? Data.Map.lookup node nl
+ where nl = Data.Map.fromList ktn
prop_Loader_lookupInstance kti inst =
- Loader.lookupInstance il inst == Data.Map.lookup inst il
- where il = Data.Map.fromList kti
+ Loader.lookupInstance il inst ==? Data.Map.lookup inst il
+ where il = Data.Map.fromList kti
prop_Loader_assignIndices nodes =
Data.Map.size nassoc == length nodes &&
(if not (null nodes)
then maximum (IntMap.keys kt) == length nodes - 1
else True)
- where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
+ where (nassoc, kt) =
+ Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
-- | Checks that the number of primary instances recorded on the nodes
-- is zero.
Loader.compareNameComponent (s1 ++ "." ++ s2) s1 ==
Loader.LookupResult Loader.PartialMatch s1
-testLoader =
- [ run prop_Loader_lookupNode
- , run prop_Loader_lookupInstance
- , run prop_Loader_assignIndices
- , run prop_Loader_mergeData
- , run prop_Loader_compareNameComponent_equal
- , run prop_Loader_compareNameComponent_prefix
- ]
+testSuite "Loader"
+ [ 'prop_Loader_lookupNode
+ , 'prop_Loader_lookupInstance
+ , 'prop_Loader_assignIndices
+ , 'prop_Loader_mergeData
+ , 'prop_Loader_compareNameComponent_equal
+ , 'prop_Loader_compareNameComponent_prefix
+ ]
-- ** Types tests
prop_Types_AllocPolicy_serialisation apol =
- case J.readJSON (J.showJSON apol) of
- J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
- p == apol
- J.Error s -> printTestCase ("failed to deserialise: " ++ s) False
- where _types = apol::Types.AllocPolicy
+ case J.readJSON (J.showJSON apol) of
+ J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
+ p == apol
+ J.Error s -> printTestCase ("failed to deserialise: " ++ s) False
+ where _types = apol::Types.AllocPolicy
prop_Types_DiskTemplate_serialisation dt =
- case J.readJSON (J.showJSON dt) of
- J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
- p == dt
- J.Error s -> printTestCase ("failed to deserialise: " ++ s)
- False
- where _types = dt::Types.DiskTemplate
+ case J.readJSON (J.showJSON dt) of
+ J.Ok p -> printTestCase ("invalid deserialisation " ++ show p) $
+ p == dt
+ J.Error s -> printTestCase ("failed to deserialise: " ++ s)
+ False
+ where _types = dt::Types.DiskTemplate
prop_Types_opToResult op =
- case op of
- Types.OpFail _ -> Types.isBad r
- Types.OpGood v -> case r of
- Types.Bad _ -> False
- Types.Ok v' -> v == v'
- where r = Types.opToResult op
- _types = op::Types.OpResult Int
+ case op of
+ Types.OpFail _ -> Types.isBad r
+ Types.OpGood v -> case r of
+ Types.Bad _ -> False
+ Types.Ok v' -> v == v'
+ where r = Types.opToResult op
+ _types = op::Types.OpResult Int
prop_Types_eitherToResult ei =
- case ei of
- Left _ -> Types.isBad r
- Right v -> case r of
- Types.Bad _ -> False
- Types.Ok v' -> v == v'
+ case ei of
+ Left _ -> Types.isBad r
+ Right v -> case r of
+ Types.Bad _ -> False
+ Types.Ok v' -> v == v'
where r = Types.eitherToResult ei
_types = ei::Either String Int
-testTypes =
- [ run prop_Types_AllocPolicy_serialisation
- , run prop_Types_DiskTemplate_serialisation
- , run prop_Types_opToResult
- , run prop_Types_eitherToResult
- ]
+testSuite "Types"
+ [ 'prop_Types_AllocPolicy_serialisation
+ , 'prop_Types_DiskTemplate_serialisation
+ , 'prop_Types_opToResult
+ , 'prop_Types_eitherToResult
+ ]
--- /dev/null
+{-# LANGUAGE TemplateHaskell #-}
+
+{-| Unittest helpers for ganeti-htools
+
+-}
+
+{-
+
+Copyright (C) 2011 Google Inc.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+-}
+
+module Ganeti.HTools.QCHelper
+ ( testSuite
+ ) where
+
+import Test.QuickCheck
+import Language.Haskell.TH
+
+run :: Testable prop => prop -> Args -> IO Result
+run = flip quickCheckWithResult
+
+testSuite :: String -> [Name] -> Q [Dec]
+testSuite tsname tdef = do
+ let fullname = mkName $ "test" ++ tsname
+ tests <- mapM (\n -> [| (run $(varE n), $(litE . StringL . nameBase $ n)) |])
+ tdef
+ sigtype <- [t| (String, [(Args -> IO Result, String)]) |]
+ return [ SigD fullname sigtype
+ , ValD (VarP fullname) (NormalB (TupE [LitE (StringL tsname),
+ ListE tests])) []
+ ]
{-# LANGUAGE BangPatterns, CPP #-}
module Ganeti.HTools.Rapi
- (
- loadData
- , parseData
- ) where
+ ( loadData
+ , parseData
+ ) where
import Data.Maybe (fromMaybe)
#ifndef NO_CURL
import qualified Ganeti.HTools.Instance as Instance
import qualified Ganeti.Constants as C
+{-# ANN module "HLint: ignore Eta reduce" #-}
+
-- | Read an URL via curl and return the body if successful.
getUrl :: (Monad m) => String -> IO (m String)
-- | Append the default port if not passed in.
formatHost :: String -> String
formatHost master =
- if ':' `elem` master then master
+ if ':' `elem` master
+ then master
else "https://" ++ master ++ ":" ++ show C.defaultRapiPort
-- | Parse a instance list in JSON format.
-> String
-> Result [(String, Instance.Instance)]
getInstances ktn body =
- loadJSArray "Parsing instance data" body >>=
- mapM (parseInstance ktn . fromJSObject)
+ loadJSArray "Parsing instance data" body >>=
+ mapM (parseInstance ktn . fromJSObject)
-- | Parse a node list in JSON format.
getNodes :: NameAssoc -> String -> Result [(String, Node.Node)]
getNodes ktg body = loadJSArray "Parsing node data" body >>=
- mapM (parseNode ktg . fromJSObject)
+ mapM (parseNode ktg . fromJSObject)
-- | Parse a group list in JSON format.
getGroups :: String -> Result [(String, Group.Group)]
getGroups body = loadJSArray "Parsing group data" body >>=
- mapM (parseGroup . fromJSObject)
+ mapM (parseGroup . fromJSObject)
-- | Construct an instance from a JSON object.
parseInstance :: NameAssoc
disk <- extract "disk_usage" a
beparams <- liftM fromJSObject (extract "beparams" a)
omem <- extract "oper_ram" a
- mem <- (case omem of
- JSRational _ _ -> annotateResult owner_name (fromJVal omem)
- _ -> extract "memory" beparams)
+ mem <- case omem of
+ JSRational _ _ -> annotateResult owner_name (fromJVal omem)
+ _ -> extract "memory" beparams
vcpus <- extract "vcpus" beparams
pnode <- extract "pnode" a >>= lookupNode ktn name
snodes <- extract "snodes" a
- snode <- (if null snodes then return Node.noSecondary
- else readEitherString (head snodes) >>= lookupNode ktn name)
+ snode <- if null snodes
+ then return Node.noSecondary
+ else readEitherString (head snodes) >>= lookupNode ktn name
running <- extract "status" a
tags <- extract "tags" a
auto_balance <- extract "auto_balance" beparams
let vm_cap' = fromMaybe True vm_cap
guuid <- annotateResult desc $ maybeFromObj a "group.uuid"
guuid' <- lookupGroup ktg name (fromMaybe defaultGroupID guuid)
- node <- (if offline || drained || not vm_cap'
- then return $ Node.create name 0 0 0 0 0 0 True guuid'
- else do
- mtotal <- extract "mtotal"
- mnode <- extract "mnode"
- mfree <- extract "mfree"
- dtotal <- extract "dtotal"
- dfree <- extract "dfree"
- ctotal <- extract "ctotal"
- return $ Node.create name mtotal mnode mfree
- dtotal dfree ctotal False guuid')
+ node <- if offline || drained || not vm_cap'
+ then return $ Node.create name 0 0 0 0 0 0 True guuid'
+ else do
+ mtotal <- extract "mtotal"
+ mnode <- extract "mnode"
+ mfree <- extract "mfree"
+ dtotal <- extract "dtotal"
+ dfree <- extract "dfree"
+ ctotal <- extract "ctotal"
+ return $ Node.create name mtotal mnode mfree
+ dtotal dfree ctotal False guuid'
return (name, node)
-- | Construct a group from a JSON object.
-}
module Ganeti.HTools.Simu
- (
- loadData
- , parseData
- ) where
+ ( loadData
+ , parseData
+ ) where
+import Control.Monad (mplus)
import Text.Printf (printf)
import Ganeti.HTools.Utils
import qualified Ganeti.HTools.Group as Group
import qualified Ganeti.HTools.Node as Node
+-- | Parse a shortened policy string (for command line usage).
+apolAbbrev :: String -> Result AllocPolicy
+apolAbbrev c | c == "p" = return AllocPreferred
+ | c == "a" = return AllocLastResort
+ | c == "u" = return AllocUnallocable
+ | otherwise = fail $ "Cannot parse AllocPolicy abbreviation '"
+ ++ c ++ "'"
+
-- | Parse the string description into nodes.
parseDesc :: String -> Result (AllocPolicy, Int, Int, Int, Int)
parseDesc desc =
- case sepSplit ',' desc of
- [a, n, d, m, c] -> do
- apol <- apolFromString a
- ncount <- tryRead "node count" n
- disk <- annotateResult "disk size" (parseUnit d)
- mem <- annotateResult "memory size" (parseUnit m)
- cpu <- tryRead "cpu count" c
- return (apol, ncount, disk, mem, cpu)
- es -> fail $ printf
- "Invalid cluster specification, expected 5 comma-separated\
- \ sections (allocation policy, node count, disk size,\
- \ memory size, number of CPUs) but got %d: '%s'" (length es) desc
+ case sepSplit ',' desc of
+ [a, n, d, m, c] -> do
+ apol <- allocPolicyFromRaw a `mplus` apolAbbrev a
+ ncount <- tryRead "node count" n
+ disk <- annotateResult "disk size" (parseUnit d)
+ mem <- annotateResult "memory size" (parseUnit m)
+ cpu <- tryRead "cpu count" c
+ return (apol, ncount, disk, mem, cpu)
+ es -> fail $ printf
+ "Invalid cluster specification, expected 5 comma-separated\
+ \ sections (allocation policy, node count, disk size,\
+ \ memory size, number of CPUs) but got %d: '%s'" (length es) desc
-- | Creates a node group with the given specifications.
createGroup :: Int -- ^ The group index
createGroup grpIndex spec = do
(apol, ncount, disk, mem, cpu) <- parseDesc spec
let nodes = map (\idx ->
- Node.create (printf "node-%02d-%03d" grpIndex idx)
- (fromIntegral mem) 0 mem
- (fromIntegral disk) disk
- (fromIntegral cpu) False grpIndex
+ Node.create (printf "node-%02d-%03d" grpIndex idx)
+ (fromIntegral mem) 0 mem
+ (fromIntegral disk) disk
+ (fromIntegral cpu) False grpIndex
) [1..ncount]
grp = Group.create (printf "group-%02d" grpIndex)
(printf "fake-uuid-%02d" grpIndex) apol
-}
module Ganeti.HTools.Text
- (
- loadData
- , parseData
- , loadInst
- , loadNode
- , serializeInstances
- , serializeNode
- , serializeNodes
- , serializeCluster
- ) where
+ ( loadData
+ , parseData
+ , loadInst
+ , loadNode
+ , serializeInstances
+ , serializeNode
+ , serializeNodes
+ , serializeCluster
+ ) where
import Control.Monad
import Data.List
-- | Serialize a single group.
serializeGroup :: Group.Group -> String
serializeGroup grp =
- printf "%s|%s|%s" (Group.name grp) (Group.uuid grp)
- (apolToString (Group.allocPolicy grp))
+ printf "%s|%s|%s" (Group.name grp) (Group.uuid grp)
+ (allocPolicyToRaw (Group.allocPolicy grp))
-- | Generate group file data from a group list.
serializeGroups :: Group.List -> String
-> Node.Node -- ^ The node to be serialised
-> String
serializeNode gl node =
- printf "%s|%.0f|%d|%d|%.0f|%d|%.0f|%c|%s" (Node.name node)
- (Node.tMem node) (Node.nMem node) (Node.fMem node)
- (Node.tDsk node) (Node.fDsk node) (Node.tCpu node)
- (if Node.offline node then 'Y' else 'N')
- (Group.uuid grp)
+ printf "%s|%.0f|%d|%d|%.0f|%d|%.0f|%c|%s" (Node.name node)
+ (Node.tMem node) (Node.nMem node) (Node.fMem node)
+ (Node.tDsk node) (Node.fDsk node) (Node.tCpu node)
+ (if Node.offline node then 'Y' else 'N')
+ (Group.uuid grp)
where grp = Container.find (Node.group node) gl
-- | Generate node file data from node objects.
-> Instance.Instance -- ^ The instance to be serialised
-> String
serializeInstance nl inst =
- let
- iname = Instance.name inst
- pnode = Container.nameOf nl (Instance.pNode inst)
- sidx = Instance.sNode inst
- snode = (if sidx == Node.noSecondary
- then ""
- else Container.nameOf nl sidx)
- in
- printf "%s|%d|%d|%d|%s|%s|%s|%s|%s|%s"
- iname (Instance.mem inst) (Instance.dsk inst)
- (Instance.vcpus inst) (Instance.runSt inst)
- (if Instance.autoBalance inst then "Y" else "N")
- pnode snode (dtToString (Instance.diskTemplate inst))
- (intercalate "," (Instance.tags inst))
+ let iname = Instance.name inst
+ pnode = Container.nameOf nl (Instance.pNode inst)
+ sidx = Instance.sNode inst
+ snode = (if sidx == Node.noSecondary
+ then ""
+ else Container.nameOf nl sidx)
+ in printf "%s|%d|%d|%d|%s|%s|%s|%s|%s|%s"
+ iname (Instance.mem inst) (Instance.dsk inst)
+ (Instance.vcpus inst) (instanceStatusToRaw (Instance.runSt inst))
+ (if Instance.autoBalance inst then "Y" else "N")
+ pnode snode (diskTemplateToRaw (Instance.diskTemplate inst))
+ (intercalate "," (Instance.tags inst))
-- | Generate instance file data from instance objects.
serializeInstances :: Node.List -> Instance.List -> String
serializeInstances nl =
- unlines . map (serializeInstance nl) . Container.elems
+ unlines . map (serializeInstance nl) . Container.elems
-- | Generate complete cluster data from node and instance lists.
serializeCluster :: ClusterData -> String
-> m (String, Group.Group) -- ^ The result, a tuple of group
-- UUID and group object
loadGroup [name, gid, apol] = do
- xapol <- apolFromString apol
+ xapol <- allocPolicyFromRaw apol
return (gid, Group.create name gid xapol)
loadGroup s = fail $ "Invalid/incomplete group data: '" ++ show s ++ "'"
loadInst ktn [ name, mem, dsk, vcpus, status, auto_bal, pnode, snode
, dt, tags ] = do
pidx <- lookupNode ktn name pnode
- sidx <- (if null snode then return Node.noSecondary
- else lookupNode ktn name snode)
+ sidx <- if null snode
+ then return Node.noSecondary
+ else lookupNode ktn name snode
vmem <- tryRead name mem
vdsk <- tryRead name dsk
vvcpus <- tryRead name vcpus
+ vstatus <- instanceStatusFromRaw status
auto_balance <- case auto_bal of
"Y" -> return True
"N" -> return False
_ -> fail $ "Invalid auto_balance value '" ++ auto_bal ++
"' for instance " ++ name
- disk_template <- annotateResult ("Instance " ++ name) (dtFromString dt)
+ disk_template <- annotateResult ("Instance " ++ name)
+ (diskTemplateFromRaw dt)
when (sidx == pidx) $ fail $ "Instance " ++ name ++
" has same primary and secondary node - " ++ pnode
let vtags = sepSplit ',' tags
- newinst = Instance.create name vmem vdsk vvcpus status vtags
+ newinst = Instance.create name vmem vdsk vvcpus vstatus vtags
auto_balance pidx sidx disk_template
return (name, newinst)
loadInst _ s = fail $ "Invalid/incomplete instance data: '" ++ show s ++ "'"
+{-# LANGUAGE TemplateHaskell #-}
+
{-| Some common types.
-}
-}
module Ganeti.HTools.Types
- ( Idx
- , Ndx
- , Gdx
- , NameAssoc
- , Score
- , Weight
- , GroupID
- , AllocPolicy(..)
- , apolFromString
- , apolToString
- , RSpec(..)
- , DynUtil(..)
- , zeroUtil
- , baseUtil
- , addUtil
- , subUtil
- , defVcpuRatio
- , defReservedDiskRatio
- , unitMem
- , unitCpu
- , unitDsk
- , unknownField
- , Placement
- , IMove(..)
- , DiskTemplate(..)
- , dtToString
- , dtFromString
- , MoveJob
- , JobSet
- , Result(..)
- , isOk
- , isBad
- , eitherToResult
- , Element(..)
- , FailMode(..)
- , FailStats
- , OpResult(..)
- , opToResult
- , connTimeout
- , queryTimeout
- , EvacMode(..)
- ) where
-
+ ( Idx
+ , Ndx
+ , Gdx
+ , NameAssoc
+ , Score
+ , Weight
+ , GroupID
+ , AllocPolicy(..)
+ , allocPolicyFromRaw
+ , allocPolicyToRaw
+ , InstanceStatus(..)
+ , instanceStatusFromRaw
+ , instanceStatusToRaw
+ , RSpec(..)
+ , DynUtil(..)
+ , zeroUtil
+ , baseUtil
+ , addUtil
+ , subUtil
+ , defVcpuRatio
+ , defReservedDiskRatio
+ , unitMem
+ , unitCpu
+ , unitDsk
+ , unknownField
+ , Placement
+ , IMove(..)
+ , DiskTemplate(..)
+ , diskTemplateToRaw
+ , diskTemplateFromRaw
+ , MoveJob
+ , JobSet
+ , Result(..)
+ , isOk
+ , isBad
+ , eitherToResult
+ , Element(..)
+ , FailMode(..)
+ , FailStats
+ , OpResult(..)
+ , opToResult
+ , connTimeout
+ , queryTimeout
+ , EvacMode(..)
+ ) where
+
+import Control.Monad
import qualified Data.Map as M
import qualified Text.JSON as JSON
import qualified Ganeti.Constants as C
+import qualified Ganeti.THH as THH
-- | The instance index type.
type Idx = Int
-- Ord instance will order them in the order they are defined, so when
-- changing this data type be careful about the interaction with the
-- desired sorting order.
-data AllocPolicy
- = AllocPreferred -- ^ This is the normal status, the group
- -- should be used normally during allocations
- | AllocLastResort -- ^ This group should be used only as
- -- last-resort, after the preferred groups
- | AllocUnallocable -- ^ This group must not be used for new
- -- allocations
- deriving (Show, Read, Eq, Ord, Enum, Bounded)
-
--- | Convert a string to an alloc policy.
-apolFromString :: (Monad m) => String -> m AllocPolicy
-apolFromString s =
- case () of
- _ | s == C.allocPolicyPreferred -> return AllocPreferred
- | s == C.allocPolicyLastResort -> return AllocLastResort
- | s == C.allocPolicyUnallocable -> return AllocUnallocable
- | otherwise -> fail $ "Invalid alloc policy mode: " ++ s
-
--- | Convert an alloc policy to the Ganeti string equivalent.
-apolToString :: AllocPolicy -> String
-apolToString AllocPreferred = C.allocPolicyPreferred
-apolToString AllocLastResort = C.allocPolicyLastResort
-apolToString AllocUnallocable = C.allocPolicyUnallocable
-
-instance JSON.JSON AllocPolicy where
- showJSON = JSON.showJSON . apolToString
- readJSON s = case JSON.readJSON s of
- JSON.Ok s' -> apolFromString s'
- JSON.Error e -> JSON.Error $
- "Can't parse alloc_policy: " ++ e
+$(THH.declareSADT "AllocPolicy"
+ [ ("AllocPreferred", 'C.allocPolicyPreferred)
+ , ("AllocLastResort", 'C.allocPolicyLastResort)
+ , ("AllocUnallocable", 'C.allocPolicyUnallocable)
+ ])
+$(THH.makeJSONInstance ''AllocPolicy)
+
+-- | The Instance real state type.
+$(THH.declareSADT "InstanceStatus"
+ [ ("AdminDown", 'C.inststAdmindown)
+ , ("AdminOffline", 'C.inststAdminoffline)
+ , ("ErrorDown", 'C.inststErrordown)
+ , ("ErrorUp", 'C.inststErrorup)
+ , ("NodeDown", 'C.inststNodedown)
+ , ("NodeOffline", 'C.inststNodeoffline)
+ , ("Running", 'C.inststRunning)
+ , ("WrongNode", 'C.inststWrongnode)
+ ])
+$(THH.makeJSONInstance ''InstanceStatus)
-- | The resource spec type.
data RSpec = RSpec
- { rspecCpu :: Int -- ^ Requested VCPUs
- , rspecMem :: Int -- ^ Requested memory
- , rspecDsk :: Int -- ^ Requested disk
- } deriving (Show, Read, Eq)
+ { rspecCpu :: Int -- ^ Requested VCPUs
+ , rspecMem :: Int -- ^ Requested memory
+ , rspecDsk :: Int -- ^ Requested disk
+ } deriving (Show, Read, Eq)
-- | The dynamic resource specs of a machine (i.e. load or load
-- capacity, as opposed to size).
data DynUtil = DynUtil
- { cpuWeight :: Weight -- ^ Standardised CPU usage
- , memWeight :: Weight -- ^ Standardised memory load
- , dskWeight :: Weight -- ^ Standardised disk I\/O usage
- , netWeight :: Weight -- ^ Standardised network usage
- } deriving (Show, Read, Eq)
+ { cpuWeight :: Weight -- ^ Standardised CPU usage
+ , memWeight :: Weight -- ^ Standardised memory load
+ , dskWeight :: Weight -- ^ Standardised disk I\/O usage
+ , netWeight :: Weight -- ^ Standardised network usage
+ } deriving (Show, Read, Eq)
-- | Initial empty utilisation.
zeroUtil :: DynUtil
-- | Sum two utilisation records.
addUtil :: DynUtil -> DynUtil -> DynUtil
addUtil (DynUtil a1 a2 a3 a4) (DynUtil b1 b2 b3 b4) =
- DynUtil (a1+b1) (a2+b2) (a3+b3) (a4+b4)
+ DynUtil (a1+b1) (a2+b2) (a3+b3) (a4+b4)
-- | Substracts one utilisation record from another.
subUtil :: DynUtil -> DynUtil -> DynUtil
subUtil (DynUtil a1 a2 a3 a4) (DynUtil b1 b2 b3 b4) =
- DynUtil (a1-b1) (a2-b2) (a3-b3) (a4-b4)
+ DynUtil (a1-b1) (a2-b2) (a3-b3) (a4-b4)
-- | The description of an instance placement. It contains the
-- instance index, the new primary and secondary node, the move being
deriving (Show, Read)
-- | Instance disk template type.
-data DiskTemplate = DTDiskless
- | DTFile
- | DTSharedFile
- | DTPlain
- | DTBlock
- | DTDrbd8
- deriving (Show, Read, Eq, Enum, Bounded)
-
--- | Converts a DiskTemplate to String.
-dtToString :: DiskTemplate -> String
-dtToString DTDiskless = C.dtDiskless
-dtToString DTFile = C.dtFile
-dtToString DTSharedFile = C.dtSharedFile
-dtToString DTPlain = C.dtPlain
-dtToString DTBlock = C.dtBlock
-dtToString DTDrbd8 = C.dtDrbd8
-
--- | Converts a DiskTemplate from String.
-dtFromString :: (Monad m) => String -> m DiskTemplate
-dtFromString s =
- case () of
- _ | s == C.dtDiskless -> return DTDiskless
- | s == C.dtFile -> return DTFile
- | s == C.dtSharedFile -> return DTSharedFile
- | s == C.dtPlain -> return DTPlain
- | s == C.dtBlock -> return DTBlock
- | s == C.dtDrbd8 -> return DTDrbd8
- | otherwise -> fail $ "Invalid disk template: " ++ s
-
-instance JSON.JSON DiskTemplate where
- showJSON = JSON.showJSON . dtToString
- readJSON s = case JSON.readJSON s of
- JSON.Ok s' -> dtFromString s'
- JSON.Error e -> JSON.Error $
- "Can't parse disk_template as string: " ++ e
+$(THH.declareSADT "DiskTemplate"
+ [ ("DTDiskless", 'C.dtDiskless)
+ , ("DTFile", 'C.dtFile)
+ , ("DTSharedFile", 'C.dtSharedFile)
+ , ("DTPlain", 'C.dtPlain)
+ , ("DTBlock", 'C.dtBlock)
+ , ("DTDrbd8", 'C.dtDrbd8)
+ ])
+$(THH.makeJSONInstance ''DiskTemplate)
-- | Formatted solution output for one move (involved nodes and
-- commands.
deriving (Show, Read, Eq)
instance Monad Result where
- (>>=) (Bad x) _ = Bad x
- (>>=) (Ok x) fn = fn x
- return = Ok
- fail = Bad
+ (>>=) (Bad x) _ = Bad x
+ (>>=) (Ok x) fn = fn x
+ return = Ok
+ fail = Bad
+
+instance MonadPlus Result where
+ mzero = Bad "zero Result when used as MonadPlus"
+ -- for mplus, when we 'add' two Bad values, we concatenate their
+ -- error descriptions
+ (Bad x) `mplus` (Bad y) = Bad (x ++ "; " ++ y)
+ (Bad _) `mplus` x = x
+ x@(Ok _) `mplus` _ = x
-- | Simple checker for whether a 'Result' is OK.
isOk :: Result a -> Bool
deriving (Show, Read)
instance Monad OpResult where
- (OpGood x) >>= fn = fn x
- (OpFail y) >>= _ = OpFail y
- return = OpGood
+ (OpGood x) >>= fn = fn x
+ (OpFail y) >>= _ = OpFail y
+ return = OpGood
-- | Conversion from 'OpResult' to 'Result'.
opToResult :: OpResult a -> Result a
-- | A generic class for items that have updateable names and indices.
class Element a where
- -- | Returns the name of the element
- nameOf :: a -> String
- -- | Returns all the known names of the element
- allNames :: a -> [String]
- -- | Returns the index of the element
- idxOf :: a -> Int
- -- | Updates the alias of the element
- setAlias :: a -> String -> a
- -- | Compute the alias by stripping a given suffix (domain) from
- -- the name
- computeAlias :: String -> a -> a
- computeAlias dom e = setAlias e alias
- where alias = take (length name - length dom) name
- name = nameOf e
- -- | Updates the index of the element
- setIdx :: a -> Int -> a
+ -- | Returns the name of the element
+ nameOf :: a -> String
+ -- | Returns all the known names of the element
+ allNames :: a -> [String]
+ -- | Returns the index of the element
+ idxOf :: a -> Int
+ -- | Updates the alias of the element
+ setAlias :: a -> String -> a
+ -- | Compute the alias by stripping a given suffix (domain) from
+ -- the name
+ computeAlias :: String -> a -> a
+ computeAlias dom e = setAlias e alias
+ where alias = take (length name - length dom) name
+ name = nameOf e
+ -- | Updates the index of the element
+ setIdx :: a -> Int -> a
-- | The iallocator node-evacuate evac_mode type.
-data EvacMode = ChangePrimary
- | ChangeSecondary
- | ChangeAll
- deriving (Show, Read)
-
-instance JSON.JSON EvacMode where
- showJSON mode = case mode of
- ChangeAll -> JSON.showJSON C.iallocatorNevacAll
- ChangePrimary -> JSON.showJSON C.iallocatorNevacPri
- ChangeSecondary -> JSON.showJSON C.iallocatorNevacSec
- readJSON v =
- case JSON.readJSON v of
- JSON.Ok s | s == C.iallocatorNevacAll -> return ChangeAll
- | s == C.iallocatorNevacPri -> return ChangePrimary
- | s == C.iallocatorNevacSec -> return ChangeSecondary
- | otherwise -> fail $ "Invalid evacuate mode " ++ s
- JSON.Error e -> JSON.Error $
- "Can't parse evacuate mode as string: " ++ e
+$(THH.declareSADT "EvacMode"
+ [ ("ChangePrimary", 'C.iallocatorNevacPri)
+ , ("ChangeSecondary", 'C.iallocatorNevacSec)
+ , ("ChangeAll", 'C.iallocatorNevacAll)
+ ])
+$(THH.makeJSONInstance ''EvacMode)
-}
module Ganeti.HTools.Utils
- (
- debug
- , debugFn
- , debugXy
- , sepSplit
- , stdDev
- , if'
- , select
- , commaJoin
- , readEitherString
- , JSRecord
- , loadJSArray
- , fromObj
- , fromObjWithDefault
- , maybeFromObj
- , tryFromObj
- , fromJVal
- , asJSObject
- , asObjectList
- , fromJResult
- , tryRead
- , formatTable
- , annotateResult
- , defaultGroupID
- , parseUnit
- ) where
-
-import Control.Monad (liftM)
+ ( debug
+ , debugFn
+ , debugXy
+ , sepSplit
+ , stdDev
+ , if'
+ , select
+ , applyIf
+ , commaJoin
+ , readEitherString
+ , JSRecord
+ , loadJSArray
+ , fromObj
+ , fromObjWithDefault
+ , maybeFromObj
+ , tryFromObj
+ , fromJVal
+ , asJSObject
+ , asObjectList
+ , fromJResult
+ , tryRead
+ , formatTable
+ , annotateResult
+ , defaultGroupID
+ , parseUnit
+ ) where
+
import Data.Char (toUpper)
import Data.List
-import Data.Maybe (fromMaybe)
import qualified Text.JSON as J
-import Text.Printf (printf)
import Debug.Trace
import Ganeti.HTools.Types
+-- we will re-export these for our existing users
+import Ganeti.HTools.JSON
-- * Debug functions
-- | Show the first parameter before returning the second one.
debugXy :: Show a => a -> b -> b
-debugXy a b = debug a `seq` b
+debugXy = seq . debug
-- * Miscellaneous
+-- | Apply the function if condition holds, otherwise use default value.
+applyIf :: Bool -> (a -> a) -> a -> a
+applyIf b f x = if b then f x else x
+
-- | Comma-join a string list.
commaJoin :: [String] -> String
commaJoin = intercalate ","
-- | Split a list on a separator and return an array.
sepSplit :: Eq a => a -> [a] -> [[a]]
sepSplit sep s
- | null s = []
- | null xs = [x]
- | null ys = [x,[]]
- | otherwise = x:sepSplit sep ys
- where (x, xs) = break (== sep) s
- ys = drop 1 xs
+ | null s = []
+ | null xs = [x]
+ | null ys = [x,[]]
+ | otherwise = x:sepSplit sep ys
+ where (x, xs) = break (== sep) s
+ ys = drop 1 xs
-- * Mathematical functions
-> a -- ^ first result which has a True condition, or default
select def = maybe def snd . find fst
--- * JSON-related functions
-
--- | A type alias for the list-based representation of J.JSObject.
-type JSRecord = [(String, J.JSValue)]
-
--- | Converts a JSON Result into a monadic value.
-fromJResult :: Monad m => String -> J.Result a -> m a
-fromJResult s (J.Error x) = fail (s ++ ": " ++ x)
-fromJResult _ (J.Ok x) = return x
-
--- | Tries to read a string from a JSON value.
---
--- In case the value was not a string, we fail the read (in the
--- context of the current monad.
-readEitherString :: (Monad m) => J.JSValue -> m String
-readEitherString v =
- case v of
- J.JSString s -> return $ J.fromJSString s
- _ -> fail "Wrong JSON type"
-
--- | Converts a JSON message into an array of JSON objects.
-loadJSArray :: (Monad m)
- => String -- ^ Operation description (for error reporting)
- -> String -- ^ Input message
- -> m [J.JSObject J.JSValue]
-loadJSArray s = fromJResult s . J.decodeStrict
-
--- | Reads the value of a key in a JSON object.
-fromObj :: (J.JSON a, Monad m) => JSRecord -> String -> m a
-fromObj o k =
- case lookup k o of
- Nothing -> fail $ printf "key '%s' not found, object contains only %s"
- k (show (map fst o))
- Just val -> fromKeyValue k val
-
--- | Reads the value of an optional key in a JSON object.
-maybeFromObj :: (J.JSON a, Monad m) =>
- JSRecord -> String -> m (Maybe a)
-maybeFromObj o k =
- case lookup k o of
- Nothing -> return Nothing
- Just val -> liftM Just (fromKeyValue k val)
-
--- | Reads the value of a key in a JSON object with a default if missing.
-fromObjWithDefault :: (J.JSON a, Monad m) =>
- JSRecord -> String -> a -> m a
-fromObjWithDefault o k d = liftM (fromMaybe d) $ maybeFromObj o k
-
--- | Reads a JValue, that originated from an object key.
-fromKeyValue :: (J.JSON a, Monad m)
- => String -- ^ The key name
- -> J.JSValue -- ^ The value to read
- -> m a
-fromKeyValue k val =
- fromJResult (printf "key '%s', value '%s'" k (show val)) (J.readJSON val)
-
-- | Annotate a Result with an ownership information.
annotateResult :: String -> Result a -> Result a
annotateResult owner (Bad s) = Bad $ owner ++ ": " ++ s
-> Result a
tryFromObj t o = annotateResult t . fromObj o
--- | Small wrapper over readJSON.
-fromJVal :: (Monad m, J.JSON a) => J.JSValue -> m a
-fromJVal v =
- case J.readJSON v of
- J.Error s -> fail ("Cannot convert value '" ++ show v ++
- "', error: " ++ s)
- J.Ok x -> return x
-
--- | Converts a JSON value into a JSON object.
-asJSObject :: (Monad m) => J.JSValue -> m (J.JSObject J.JSValue)
-asJSObject (J.JSObject a) = return a
-asJSObject _ = fail "not an object"
-
--- | Coneverts a list of JSON values into a list of JSON objects.
-asObjectList :: (Monad m) => [J.JSValue] -> m [J.JSObject J.JSValue]
-asObjectList = mapM asJSObject
-- * Parsing utility functions
-- value in MiB.
parseUnit :: (Monad m, Integral a, Read a) => String -> m a
parseUnit str =
- -- TODO: enhance this by splitting the unit parsing code out and
- -- accepting floating-point numbers
- case reads str of
- [(v, suffix)] ->
- let unit = dropWhile (== ' ') suffix
- upper = map toUpper unit
- siConvert x = x * 1000000 `div` 1048576
- in case () of
- _ | null unit -> return v
- | unit == "m" || upper == "MIB" -> return v
- | unit == "M" || upper == "MB" -> return $ siConvert v
- | unit == "g" || upper == "GIB" -> return $ v * 1024
- | unit == "G" || upper == "GB" -> return $ siConvert
- (v * 1000)
- | unit == "t" || upper == "TIB" -> return $ v * 1048576
- | unit == "T" || upper == "TB" -> return $
- siConvert (v * 1000000)
- | otherwise -> fail $ "Unknown unit '" ++ unit ++ "'"
- _ -> fail $ "Can't parse string '" ++ str ++ "'"
+ -- TODO: enhance this by splitting the unit parsing code out and
+ -- accepting floating-point numbers
+ case reads str of
+ [(v, suffix)] ->
+ let unit = dropWhile (== ' ') suffix
+ upper = map toUpper unit
+ siConvert x = x * 1000000 `div` 1048576
+ in case () of
+ _ | null unit -> return v
+ | unit == "m" || upper == "MIB" -> return v
+ | unit == "M" || upper == "MB" -> return $ siConvert v
+ | unit == "g" || upper == "GIB" -> return $ v * 1024
+ | unit == "G" || upper == "GB" -> return $ siConvert
+ (v * 1000)
+ | unit == "t" || upper == "TIB" -> return $ v * 1048576
+ | unit == "T" || upper == "TB" -> return $
+ siConvert (v * 1000000)
+ | otherwise -> fail $ "Unknown unit '" ++ unit ++ "'"
+ _ -> fail $ "Can't parse string '" ++ str ++ "'"
+{-# LANGUAGE TemplateHaskell #-}
+
{-| Implementation of the job information.
-}
-}
module Ganeti.Jobs
- ( OpStatus(..)
- , JobStatus(..)
- ) where
+ ( OpStatus(..)
+ , JobStatus(..)
+ ) where
import Text.JSON (readJSON, showJSON, JSON)
import qualified Text.JSON as J
import qualified Ganeti.Constants as C
+import qualified Ganeti.THH as THH
-- | Our ADT for the OpCode status at runtime (while in a job).
-data OpStatus = OP_STATUS_QUEUED
- | OP_STATUS_WAITING
- | OP_STATUS_CANCELING
- | OP_STATUS_RUNNING
- | OP_STATUS_CANCELED
- | OP_STATUS_SUCCESS
- | OP_STATUS_ERROR
- deriving (Eq, Enum, Bounded, Show, Read)
-
-instance JSON OpStatus where
- showJSON os = showJSON w
- where w = case os of
- OP_STATUS_QUEUED -> C.opStatusQueued
- OP_STATUS_WAITING -> C.opStatusWaiting
- OP_STATUS_CANCELING -> C.opStatusCanceling
- OP_STATUS_RUNNING -> C.opStatusRunning
- OP_STATUS_CANCELED -> C.opStatusCanceled
- OP_STATUS_SUCCESS -> C.opStatusSuccess
- OP_STATUS_ERROR -> C.opStatusError
- readJSON s = case readJSON s of
- J.Ok v | v == C.opStatusQueued -> J.Ok OP_STATUS_QUEUED
- | v == C.opStatusWaiting -> J.Ok OP_STATUS_WAITING
- | v == C.opStatusCanceling -> J.Ok OP_STATUS_CANCELING
- | v == C.opStatusRunning -> J.Ok OP_STATUS_RUNNING
- | v == C.opStatusCanceled -> J.Ok OP_STATUS_CANCELED
- | v == C.opStatusSuccess -> J.Ok OP_STATUS_SUCCESS
- | v == C.opStatusError -> J.Ok OP_STATUS_ERROR
- | otherwise -> J.Error ("Unknown opcode status " ++ v)
- _ -> J.Error ("Cannot parse opcode status " ++ show s)
+$(THH.declareSADT "OpStatus"
+ [ ("OP_STATUS_QUEUED", 'C.opStatusQueued)
+ , ("OP_STATUS_WAITING", 'C.opStatusWaiting)
+ , ("OP_STATUS_CANCELING", 'C.opStatusCanceling)
+ , ("OP_STATUS_RUNNING", 'C.opStatusRunning)
+ , ("OP_STATUS_CANCELED", 'C.opStatusCanceled)
+ , ("OP_STATUS_SUCCESS", 'C.opStatusSuccess)
+ , ("OP_STATUS_ERROR", 'C.opStatusError)
+ ])
+$(THH.makeJSONInstance ''OpStatus)
-- | The JobStatus data type. Note that this is ordered especially
-- such that greater\/lesser comparison on values of this type makes
-- sense.
-data JobStatus = JOB_STATUS_QUEUED
- | JOB_STATUS_WAITING
- | JOB_STATUS_RUNNING
- | JOB_STATUS_SUCCESS
- | JOB_STATUS_CANCELING
- | JOB_STATUS_CANCELED
- | JOB_STATUS_ERROR
- deriving (Eq, Enum, Ord, Bounded, Show, Read)
-
-instance JSON JobStatus where
- showJSON js = showJSON w
- where w = case js of
- JOB_STATUS_QUEUED -> C.jobStatusQueued
- JOB_STATUS_WAITING -> C.jobStatusWaiting
- JOB_STATUS_CANCELING -> C.jobStatusCanceling
- JOB_STATUS_RUNNING -> C.jobStatusRunning
- JOB_STATUS_CANCELED -> C.jobStatusCanceled
- JOB_STATUS_SUCCESS -> C.jobStatusSuccess
- JOB_STATUS_ERROR -> C.jobStatusError
- readJSON s = case readJSON s of
- J.Ok v | v == C.jobStatusQueued -> J.Ok JOB_STATUS_QUEUED
- | v == C.jobStatusWaiting -> J.Ok JOB_STATUS_WAITING
- | v == C.jobStatusCanceling -> J.Ok JOB_STATUS_CANCELING
- | v == C.jobStatusRunning -> J.Ok JOB_STATUS_RUNNING
- | v == C.jobStatusSuccess -> J.Ok JOB_STATUS_SUCCESS
- | v == C.jobStatusCanceled -> J.Ok JOB_STATUS_CANCELED
- | v == C.jobStatusError -> J.Ok JOB_STATUS_ERROR
- | otherwise -> J.Error ("Unknown job status " ++ v)
- _ -> J.Error ("Unknown job status " ++ show s)
+$(THH.declareSADT "JobStatus"
+ [ ("JOB_STATUS_QUEUED", 'C.jobStatusQueued)
+ , ("JOB_STATUS_WAITING", 'C.jobStatusWaiting)
+ , ("JOB_STATUS_CANCELING", 'C.jobStatusCanceling)
+ , ("JOB_STATUS_RUNNING", 'C.jobStatusRunning)
+ , ("JOB_STATUS_CANCELED", 'C.jobStatusCanceled)
+ , ("JOB_STATUS_SUCCESS", 'C.jobStatusSuccess)
+ , ("JOB_STATUS_ERROR", 'C.jobStatusError)
+ ])
+$(THH.makeJSONInstance ''JobStatus)
+{-# LANGUAGE TemplateHaskell #-}
+
{-| Implementation of the Ganeti LUXI interface.
-}
-}
module Ganeti.Luxi
- ( LuxiOp(..)
- , Client
- , getClient
- , closeClient
- , callMethod
- , submitManyJobs
- , queryJobsStatus
- ) where
+ ( LuxiOp(..)
+ , QrViaLuxi(..)
+ , ResultStatus(..)
+ , Client
+ , checkRS
+ , getClient
+ , closeClient
+ , callMethod
+ , submitManyJobs
+ , queryJobsStatus
+ ) where
import Data.IORef
import Control.Monad
import Ganeti.HTools.Utils
import Ganeti.HTools.Types
+import Ganeti.Constants
import Ganeti.Jobs (JobStatus)
import Ganeti.OpCodes (OpCode)
+import Ganeti.THH
-- * Utility functions
-- | Wrapper over System.Timeout.timeout that fails in the IO monad.
withTimeout :: Int -> String -> IO a -> IO a
withTimeout secs descr action = do
- result <- timeout (secs * 1000000) action
- (case result of
- Nothing -> fail $ "Timeout in " ++ descr
- Just v -> return v)
+ result <- timeout (secs * 1000000) action
+ case result of
+ Nothing -> fail $ "Timeout in " ++ descr
+ Just v -> return v
-- * Generic protocol functionality
--- | Currently supported Luxi operations.
-data LuxiOp = QueryInstances [String] [String] Bool
- | QueryNodes [String] [String] Bool
- | QueryGroups [String] [String] Bool
- | QueryJobs [Int] [String]
- | QueryExports [String] Bool
- | QueryConfigValues [String]
- | QueryClusterInfo
- | QueryTags String String
- | SubmitJob [OpCode]
- | SubmitManyJobs [[OpCode]]
- | WaitForJobChange Int [String] JSValue JSValue Int
- | ArchiveJob Int
- | AutoArchiveJobs Int Int
- | CancelJob Int
- | SetDrainFlag Bool
- | SetWatcherPause Double
- deriving (Show, Read)
+$(declareSADT "QrViaLuxi"
+ [ ("QRLock", 'qrLock)
+ , ("QRInstance", 'qrInstance)
+ , ("QRNode", 'qrNode)
+ , ("QRGroup", 'qrGroup)
+ , ("QROs", 'qrOs)
+ ])
+$(makeJSONInstance ''QrViaLuxi)
+
+-- | Currently supported Luxi operations and JSON serialization.
+$(genLuxiOp "LuxiOp"
+ [("Query" ,
+ [ ("what", [t| QrViaLuxi |], [| id |])
+ , ("fields", [t| [String] |], [| id |])
+ , ("qfilter", [t| () |], [| const JSNull |])
+ ])
+ , ("QueryNodes",
+ [ ("names", [t| [String] |], [| id |])
+ , ("fields", [t| [String] |], [| id |])
+ , ("lock", [t| Bool |], [| id |])
+ ])
+ , ("QueryGroups",
+ [ ("names", [t| [String] |], [| id |])
+ , ("fields", [t| [String] |], [| id |])
+ , ("lock", [t| Bool |], [| id |])
+ ])
+ , ("QueryInstances",
+ [ ("names", [t| [String] |], [| id |])
+ , ("fields", [t| [String] |], [| id |])
+ , ("lock", [t| Bool |], [| id |])
+ ])
+ , ("QueryJobs",
+ [ ("ids", [t| [Int] |], [| map show |])
+ , ("fields", [t| [String] |], [| id |])
+ ])
+ , ("QueryExports",
+ [ ("nodes", [t| [String] |], [| id |])
+ , ("lock", [t| Bool |], [| id |])
+ ])
+ , ("QueryConfigValues",
+ [ ("fields", [t| [String] |], [| id |]) ]
+ )
+ , ("QueryClusterInfo", [])
+ , ("QueryTags",
+ [ ("kind", [t| String |], [| id |])
+ , ("name", [t| String |], [| id |])
+ ])
+ , ("SubmitJob",
+ [ ("job", [t| [OpCode] |], [| id |]) ]
+ )
+ , ("SubmitManyJobs",
+ [ ("ops", [t| [[OpCode]] |], [| id |]) ]
+ )
+ , ("WaitForJobChange",
+ [ ("job", [t| Int |], [| id |])
+ , ("fields", [t| [String]|], [| id |])
+ , ("prev_job", [t| JSValue |], [| id |])
+ , ("prev_log", [t| JSValue |], [| id |])
+ , ("tmout", [t| Int |], [| id |])
+ ])
+ , ("ArchiveJob",
+ [ ("job", [t| Int |], [| show |]) ]
+ )
+ , ("AutoArchiveJobs",
+ [ ("age", [t| Int |], [| id |])
+ , ("tmout", [t| Int |], [| id |])
+ ])
+ , ("CancelJob",
+ [ ("job", [t| Int |], [| show |]) ]
+ )
+ , ("SetDrainFlag",
+ [ ("flag", [t| Bool |], [| id |]) ]
+ )
+ , ("SetWatcherPause",
+ [ ("duration", [t| Double |], [| id |]) ]
+ )
+ ])
-- | The serialisation of LuxiOps into strings in messages.
-strOfOp :: LuxiOp -> String
-strOfOp QueryNodes {} = "QueryNodes"
-strOfOp QueryGroups {} = "QueryGroups"
-strOfOp QueryInstances {} = "QueryInstances"
-strOfOp QueryJobs {} = "QueryJobs"
-strOfOp QueryExports {} = "QueryExports"
-strOfOp QueryConfigValues {} = "QueryConfigValues"
-strOfOp QueryClusterInfo {} = "QueryClusterInfo"
-strOfOp QueryTags {} = "QueryTags"
-strOfOp SubmitManyJobs {} = "SubmitManyJobs"
-strOfOp WaitForJobChange {} = "WaitForJobChange"
-strOfOp SubmitJob {} = "SubmitJob"
-strOfOp ArchiveJob {} = "ArchiveJob"
-strOfOp AutoArchiveJobs {} = "AutoArchiveJobs"
-strOfOp CancelJob {} = "CancelJob"
-strOfOp SetDrainFlag {} = "SetDrainFlag"
-strOfOp SetWatcherPause {} = "SetWatcherPause"
+$(genStrOfOp ''LuxiOp "strOfOp")
+
+$(declareIADT "ResultStatus"
+ [ ("RSNormal", 'rsNormal)
+ , ("RSUnknown", 'rsUnknown)
+ , ("RSNoData", 'rsNodata)
+ , ("RSUnavailable", 'rsUnavail)
+ , ("RSOffline", 'rsOffline)
+ ])
+
+$(makeJSONInstance ''ResultStatus)
+
+-- | Check that ResultStatus is success or fail with descriptive message.
+checkRS :: (Monad m) => ResultStatus -> a -> m a
+checkRS RSNormal val = return val
+checkRS RSUnknown _ = fail "Unknown field"
+checkRS RSNoData _ = fail "No data for a field"
+checkRS RSUnavailable _ = fail "Ganeti reports unavailable data"
+checkRS RSOffline _ = fail "Ganeti reports resource as offline"
-- | The end-of-message separator.
eOM :: Char
| Result
-- | The serialisation of MsgKeys into strings in messages.
-strOfKey :: MsgKeys -> String
-strOfKey Method = "method"
-strOfKey Args = "args"
-strOfKey Success = "success"
-strOfKey Result = "result"
+$(genStrOfKey ''MsgKeys "strOfKey")
-- | Luxi client encapsulation.
data Client = Client { socket :: S.Socket -- ^ The socket of the client
-- | Connects to the master daemon and returns a luxi Client.
getClient :: String -> IO Client
getClient path = do
- s <- S.socket S.AF_UNIX S.Stream S.defaultProtocol
- withTimeout connTimeout "creating luxi connection" $
- S.connect s (S.SockAddrUnix path)
- rf <- newIORef ""
- return Client { socket=s, rbuf=rf}
+ s <- S.socket S.AF_UNIX S.Stream S.defaultProtocol
+ withTimeout connTimeout "creating luxi connection" $
+ S.connect s (S.SockAddrUnix path)
+ rf <- newIORef ""
+ return Client { socket=s, rbuf=rf}
-- | Closes the client socket.
closeClient :: Client -> IO ()
-- | Sends a message over a luxi transport.
sendMsg :: Client -> String -> IO ()
sendMsg s buf =
- let _send obuf = do
- sbytes <- withTimeout queryTimeout
- "sending luxi message" $
- S.send (socket s) obuf
- unless (sbytes == length obuf) $ _send (drop sbytes obuf)
- in _send (buf ++ [eOM])
+ let _send obuf = do
+ sbytes <- withTimeout queryTimeout
+ "sending luxi message" $
+ S.send (socket s) obuf
+ unless (sbytes == length obuf) $ _send (drop sbytes obuf)
+ in _send (buf ++ [eOM])
-- | Waits for a message over a luxi transport.
recvMsg :: Client -> IO String
nbuf <- withTimeout queryTimeout "reading luxi response" $
S.recv (socket s) 4096
let (msg, remaining) = break (eOM ==) nbuf
- (if null remaining
- then _recv (obuf ++ msg)
- else return (obuf ++ msg, tail remaining))
+ if null remaining
+ then _recv (obuf ++ msg)
+ else return (obuf ++ msg, tail remaining)
cbuf <- readIORef $ rbuf s
let (imsg, ibuf) = break (eOM ==) cbuf
(msg, nbuf) <-
- (if null ibuf -- if old buffer didn't contain a full message
- then _recv cbuf -- then we read from network
- else return (imsg, tail ibuf)) -- else we return data from our buffer
+ if null ibuf -- if old buffer didn't contain a full message
+ then _recv cbuf -- then we read from network
+ else return (imsg, tail ibuf) -- else we return data from our buffer
writeIORef (rbuf s) nbuf
return msg
--- | Compute the serialized form of a Luxi operation.
-opToArgs :: LuxiOp -> JSValue
-opToArgs (QueryNodes names fields lock) = J.showJSON (names, fields, lock)
-opToArgs (QueryGroups names fields lock) = J.showJSON (names, fields, lock)
-opToArgs (QueryInstances names fields lock) = J.showJSON (names, fields, lock)
-opToArgs (QueryJobs ids fields) = J.showJSON (map show ids, fields)
-opToArgs (QueryExports nodes lock) = J.showJSON (nodes, lock)
-opToArgs (QueryConfigValues fields) = J.showJSON fields
-opToArgs (QueryClusterInfo) = J.showJSON ()
-opToArgs (QueryTags kind name) = J.showJSON (kind, name)
-opToArgs (SubmitJob j) = J.showJSON j
-opToArgs (SubmitManyJobs ops) = J.showJSON ops
--- This is special, since the JSON library doesn't export an instance
--- of a 5-tuple
-opToArgs (WaitForJobChange a b c d e) =
- JSArray [ J.showJSON a, J.showJSON b, J.showJSON c
- , J.showJSON d, J.showJSON e]
-opToArgs (ArchiveJob a) = J.showJSON (show a)
-opToArgs (AutoArchiveJobs a b) = J.showJSON (a, b)
-opToArgs (CancelJob a) = J.showJSON (show a)
-opToArgs (SetDrainFlag flag) = J.showJSON flag
-opToArgs (SetWatcherPause duration) = J.showJSON [duration]
-
-- | Serialize a request to String.
buildCall :: LuxiOp -- ^ The method
-> String -- ^ The serialized form
buildCall lo =
- let ja = [ (strOfKey Method, JSString $ toJSString $ strOfOp lo::JSValue)
- , (strOfKey Args, opToArgs lo::JSValue)
- ]
- jo = toJSObject ja
- in encodeStrict jo
+ let ja = [ (strOfKey Method, JSString $ toJSString $ strOfOp lo::JSValue)
+ , (strOfKey Args, opToArgs lo::JSValue)
+ ]
+ jo = toJSObject ja
+ in encodeStrict jo
-- | Check that luxi responses contain the required keys and that the
-- call was successful.
let arr = J.fromJSObject oarr
status <- fromObj arr (strOfKey Success)::Result Bool
let rkey = strOfKey Result
- (if status
- then fromObj arr rkey
- else fromObj arr rkey >>= fail)
+ if status
+ then fromObj arr rkey
+ else fromObj arr rkey >>= fail
-- | Generic luxi method call.
callMethod :: LuxiOp -> Client -> IO (Result JSValue)
+{-# LANGUAGE TemplateHaskell #-}
+
{-| Implementation of the opcodes.
-}
-}
module Ganeti.OpCodes
- ( OpCode(..)
- , ReplaceDisksMode(..)
- , opID
- ) where
+ ( OpCode(..)
+ , ReplaceDisksMode(..)
+ , opID
+ ) where
-import Control.Monad
import Text.JSON (readJSON, showJSON, makeObj, JSON)
import qualified Text.JSON as J
-import Text.JSON.Types
+
+import qualified Ganeti.Constants as C
+import Ganeti.THH
import Ganeti.HTools.Utils
-- | Replace disks type.
-data ReplaceDisksMode = ReplaceOnPrimary
- | ReplaceOnSecondary
- | ReplaceNewSecondary
- | ReplaceAuto
- deriving (Show, Read, Eq)
-
-instance JSON ReplaceDisksMode where
- showJSON m = case m of
- ReplaceOnPrimary -> showJSON "replace_on_primary"
- ReplaceOnSecondary -> showJSON "replace_on_secondary"
- ReplaceNewSecondary -> showJSON "replace_new_secondary"
- ReplaceAuto -> showJSON "replace_auto"
- readJSON s = case readJSON s of
- J.Ok "replace_on_primary" -> J.Ok ReplaceOnPrimary
- J.Ok "replace_on_secondary" -> J.Ok ReplaceOnSecondary
- J.Ok "replace_new_secondary" -> J.Ok ReplaceNewSecondary
- J.Ok "replace_auto" -> J.Ok ReplaceAuto
- _ -> J.Error "Can't parse a valid ReplaceDisksMode"
+$(declareSADT "ReplaceDisksMode"
+ [ ("ReplaceOnPrimary", 'C.replaceDiskPri)
+ , ("ReplaceOnSecondary", 'C.replaceDiskSec)
+ , ("ReplaceNewSecondary", 'C.replaceDiskChg)
+ , ("ReplaceAuto", 'C.replaceDiskAuto)
+ ])
+$(makeJSONInstance ''ReplaceDisksMode)
-- | OpCode representation.
--
-- We only implement a subset of Ganeti opcodes, but only what we
-- actually use in the htools codebase.
-data OpCode = OpTestDelay Double Bool [String]
- | OpInstanceReplaceDisks String (Maybe String) ReplaceDisksMode
- [Int] (Maybe String)
- | OpInstanceFailover String Bool (Maybe String)
- | OpInstanceMigrate String Bool Bool Bool (Maybe String)
- deriving (Show, Read, Eq)
-
-
--- | Computes the OP_ID for an OpCode.
-opID :: OpCode -> String
-opID (OpTestDelay _ _ _) = "OP_TEST_DELAY"
-opID (OpInstanceReplaceDisks _ _ _ _ _) = "OP_INSTANCE_REPLACE_DISKS"
-opID (OpInstanceFailover {}) = "OP_INSTANCE_FAILOVER"
-opID (OpInstanceMigrate {}) = "OP_INSTANCE_MIGRATE"
-
--- | Loads an OpCode from the JSON serialised form.
-loadOpCode :: JSValue -> J.Result OpCode
-loadOpCode v = do
- o <- liftM J.fromJSObject (readJSON v)
- let extract x = fromObj o x
- op_id <- extract "OP_ID"
- case op_id of
- "OP_TEST_DELAY" -> do
- on_nodes <- extract "on_nodes"
- on_master <- extract "on_master"
- duration <- extract "duration"
- return $ OpTestDelay duration on_master on_nodes
- "OP_INSTANCE_REPLACE_DISKS" -> do
- inst <- extract "instance_name"
- node <- maybeFromObj o "remote_node"
- mode <- extract "mode"
- disks <- extract "disks"
- ialloc <- maybeFromObj o "iallocator"
- return $ OpInstanceReplaceDisks inst node mode disks ialloc
- "OP_INSTANCE_FAILOVER" -> do
- inst <- extract "instance_name"
- consist <- extract "ignore_consistency"
- tnode <- maybeFromObj o "target_node"
- return $ OpInstanceFailover inst consist tnode
- "OP_INSTANCE_MIGRATE" -> do
- inst <- extract "instance_name"
- live <- extract "live"
- cleanup <- extract "cleanup"
- allow_failover <- fromObjWithDefault o "allow_failover" False
- tnode <- maybeFromObj o "target_node"
- return $ OpInstanceMigrate inst live cleanup
- allow_failover tnode
- _ -> J.Error $ "Unknown opcode " ++ op_id
-
--- | Serialises an opcode to JSON.
-saveOpCode :: OpCode -> JSValue
-saveOpCode op@(OpTestDelay duration on_master on_nodes) =
- let ol = [ ("OP_ID", showJSON $ opID op)
- , ("duration", showJSON duration)
- , ("on_master", showJSON on_master)
- , ("on_nodes", showJSON on_nodes) ]
- in makeObj ol
-
-saveOpCode op@(OpInstanceReplaceDisks inst node mode disks iallocator) =
- let ol = [ ("OP_ID", showJSON $ opID op)
- , ("instance_name", showJSON inst)
- , ("mode", showJSON mode)
- , ("disks", showJSON disks)]
- ol2 = case node of
- Just n -> ("remote_node", showJSON n):ol
- Nothing -> ol
- ol3 = case iallocator of
- Just i -> ("iallocator", showJSON i):ol2
- Nothing -> ol2
- in makeObj ol3
-
-saveOpCode op@(OpInstanceFailover inst consist tnode) =
- let ol = [ ("OP_ID", showJSON $ opID op)
- , ("instance_name", showJSON inst)
- , ("ignore_consistency", showJSON consist) ]
- ol' = case tnode of
- Nothing -> ol
- Just node -> ("target_node", showJSON node):ol
- in makeObj ol'
-
-saveOpCode op@(OpInstanceMigrate inst live cleanup allow_failover tnode) =
- let ol = [ ("OP_ID", showJSON $ opID op)
- , ("instance_name", showJSON inst)
- , ("live", showJSON live)
- , ("cleanup", showJSON cleanup)
- , ("allow_failover", showJSON allow_failover) ]
- ol' = case tnode of
- Nothing -> ol
- Just node -> ("target_node", showJSON node):ol
- in makeObj ol'
+$(genOpCode "OpCode"
+ [ ("OpTestDelay",
+ [ simpleField "duration" [t| Double |]
+ , simpleField "on_master" [t| Bool |]
+ , simpleField "on_nodes" [t| [String] |]
+ ])
+ , ("OpInstanceReplaceDisks",
+ [ simpleField "instance_name" [t| String |]
+ , optionalField $ simpleField "remote_node" [t| String |]
+ , simpleField "mode" [t| ReplaceDisksMode |]
+ , simpleField "disks" [t| [Int] |]
+ , optionalField $ simpleField "iallocator" [t| String |]
+ ])
+ , ("OpInstanceFailover",
+ [ simpleField "instance_name" [t| String |]
+ , simpleField "ignore_consistency" [t| Bool |]
+ , optionalField $ simpleField "target_node" [t| String |]
+ ])
+ , ("OpInstanceMigrate",
+ [ simpleField "instance_name" [t| String |]
+ , simpleField "live" [t| Bool |]
+ , simpleField "cleanup" [t| Bool |]
+ , defaultField [| False |] $ simpleField "allow_failover" [t| Bool |]
+ , optionalField $ simpleField "target_node" [t| String |]
+ ])
+ ])
+
+$(genOpID ''OpCode "opID")
instance JSON OpCode where
- readJSON = loadOpCode
- showJSON = saveOpCode
+ readJSON = loadOpCode
+ showJSON = saveOpCode
--- /dev/null
+{-# LANGUAGE TemplateHaskell #-}
+
+{-| TemplateHaskell helper for HTools.
+
+As TemplateHaskell require that splices be defined in a separate
+module, we combine all the TemplateHaskell functionality that HTools
+needs in this module (except the one for unittests).
+
+-}
+
+{-
+
+Copyright (C) 2011 Google Inc.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+-}
+
+module Ganeti.THH ( declareSADT
+ , declareIADT
+ , makeJSONInstance
+ , genOpID
+ , genOpCode
+ , genStrOfOp
+ , genStrOfKey
+ , genLuxiOp
+ , Field
+ , simpleField
+ , defaultField
+ , optionalField
+ , renameField
+ , containerField
+ , customField
+ , timeStampFields
+ , uuidFields
+ , serialFields
+ , buildObject
+ , buildObjectSerialisation
+ , buildParam
+ , Container
+ ) where
+
+import Control.Arrow
+import Control.Monad (liftM, liftM2)
+import Data.Char
+import Data.List
+import qualified Data.Map as M
+import Language.Haskell.TH
+
+import qualified Text.JSON as JSON
+
+import Ganeti.HTools.JSON
+
+-- * Exported types
+
+type Container = M.Map String
+
+-- | Serialised field data type.
+data Field = Field { fieldName :: String
+ , fieldType :: Q Type
+ , fieldRead :: Maybe (Q Exp)
+ , fieldShow :: Maybe (Q Exp)
+ , fieldDefault :: Maybe (Q Exp)
+ , fieldConstr :: Maybe String
+ , fieldIsContainer :: Bool
+ , fieldIsOptional :: Bool
+ }
+
+-- | Generates a simple field.
+simpleField :: String -> Q Type -> Field
+simpleField fname ftype =
+ Field { fieldName = fname
+ , fieldType = ftype
+ , fieldRead = Nothing
+ , fieldShow = Nothing
+ , fieldDefault = Nothing
+ , fieldConstr = Nothing
+ , fieldIsContainer = False
+ , fieldIsOptional = False
+ }
+
+-- | Sets the renamed constructor field.
+renameField :: String -> Field -> Field
+renameField constrName field = field { fieldConstr = Just constrName }
+
+-- | Sets the default value on a field (makes it optional with a
+-- default value).
+defaultField :: Q Exp -> Field -> Field
+defaultField defval field = field { fieldDefault = Just defval }
+
+-- | Marks a field optional (turning its base type into a Maybe).
+optionalField :: Field -> Field
+optionalField field = field { fieldIsOptional = True }
+
+-- | Marks a field as a container.
+containerField :: Field -> Field
+containerField field = field { fieldIsContainer = True }
+
+-- | Sets custom functions on a field.
+customField :: Q Exp -> Q Exp -> Field -> Field
+customField readfn showfn field =
+ field { fieldRead = Just readfn, fieldShow = Just showfn }
+
+fieldRecordName :: Field -> String
+fieldRecordName (Field { fieldName = name, fieldConstr = alias }) =
+ maybe (camelCase name) id alias
+
+-- | Computes the preferred variable name to use for the value of this
+-- field. If the field has a specific constructor name, then we use a
+-- first-letter-lowercased version of that; otherwise, we simply use
+-- the field name. See also 'fieldRecordName'.
+fieldVariable :: Field -> String
+fieldVariable f =
+ case (fieldConstr f) of
+ Just name -> ensureLower name
+ _ -> fieldName f
+
+actualFieldType :: Field -> Q Type
+actualFieldType f | fieldIsContainer f = [t| Container $t |]
+ | fieldIsOptional f = [t| Maybe $t |]
+ | otherwise = t
+ where t = fieldType f
+
+checkNonOptDef :: (Monad m) => Field -> m ()
+checkNonOptDef (Field { fieldIsOptional = True, fieldName = name }) =
+ fail $ "Optional field " ++ name ++ " used in parameter declaration"
+checkNonOptDef (Field { fieldDefault = (Just _), fieldName = name }) =
+ fail $ "Default field " ++ name ++ " used in parameter declaration"
+checkNonOptDef _ = return ()
+
+loadFn :: Field -> Q Exp -> Q Exp
+loadFn (Field { fieldIsContainer = True }) expr = [| $expr >>= readContainer |]
+loadFn (Field { fieldRead = Just readfn }) expr = [| $expr >>= $readfn |]
+loadFn _ expr = expr
+
+saveFn :: Field -> Q Exp -> Q Exp
+saveFn (Field { fieldIsContainer = True }) expr = [| showContainer $expr |]
+saveFn (Field { fieldRead = Just readfn }) expr = [| $readfn $expr |]
+saveFn _ expr = expr
+
+-- * Common field declarations
+
+timeStampFields :: [Field]
+timeStampFields =
+ [ defaultField [| 0::Double |] $ simpleField "ctime" [t| Double |]
+ , defaultField [| 0::Double |] $ simpleField "mtime" [t| Double |]
+ ]
+
+serialFields :: [Field]
+serialFields =
+ [ renameField "Serial" $ simpleField "serial_no" [t| Int |] ]
+
+uuidFields :: [Field]
+uuidFields = [ simpleField "uuid" [t| String |] ]
+
+-- * Helper functions
+
+-- | Ensure first letter is lowercase.
+--
+-- Used to convert type name to function prefix, e.g. in @data Aa ->
+-- aaToRaw@.
+ensureLower :: String -> String
+ensureLower [] = []
+ensureLower (x:xs) = toLower x:xs
+
+-- | Ensure first letter is uppercase.
+--
+-- Used to convert constructor name to component
+ensureUpper :: String -> String
+ensureUpper [] = []
+ensureUpper (x:xs) = toUpper x:xs
+
+-- | Helper for quoted expressions.
+varNameE :: String -> Q Exp
+varNameE = varE . mkName
+
+-- | showJSON as an expression, for reuse.
+showJSONE :: Q Exp
+showJSONE = varNameE "showJSON"
+
+-- | ToRaw function name.
+toRawName :: String -> Name
+toRawName = mkName . (++ "ToRaw") . ensureLower
+
+-- | FromRaw function name.
+fromRawName :: String -> Name
+fromRawName = mkName . (++ "FromRaw") . ensureLower
+
+-- | Converts a name to it's varE/litE representations.
+--
+reprE :: Either String Name -> Q Exp
+reprE = either stringE varE
+
+-- | Smarter function application.
+--
+-- This does simply f x, except that if is 'id', it will skip it, in
+-- order to generate more readable code when using -ddump-splices.
+appFn :: Exp -> Exp -> Exp
+appFn f x | f == VarE 'id = x
+ | otherwise = AppE f x
+
+-- | Container loader
+readContainer :: (Monad m, JSON.JSON a) =>
+ JSON.JSObject JSON.JSValue -> m (Container a)
+readContainer obj = do
+ let kjvlist = JSON.fromJSObject obj
+ kalist <- mapM (\(k, v) -> fromKeyValue k v >>= \a -> return (k, a)) kjvlist
+ return $ M.fromList kalist
+
+-- | Container dumper
+showContainer :: (JSON.JSON a) => Container a -> JSON.JSValue
+showContainer = JSON.makeObj . map (second JSON.showJSON) . M.toList
+
+-- * Template code for simple raw type-equivalent ADTs
+
+-- | Generates a data type declaration.
+--
+-- The type will have a fixed list of instances.
+strADTDecl :: Name -> [String] -> Dec
+strADTDecl name constructors =
+ DataD [] name []
+ (map (flip NormalC [] . mkName) constructors)
+ [''Show, ''Read, ''Eq, ''Enum, ''Bounded, ''Ord]
+
+-- | Generates a toRaw function.
+--
+-- This generates a simple function of the form:
+--
+-- @
+-- nameToRaw :: Name -> /traw/
+-- nameToRaw Cons1 = var1
+-- nameToRaw Cons2 = \"value2\"
+-- @
+genToRaw :: Name -> Name -> Name -> [(String, Either String Name)] -> Q [Dec]
+genToRaw traw fname tname constructors = do
+ sigt <- [t| $(conT tname) -> $(conT traw) |]
+ -- the body clauses, matching on the constructor and returning the
+ -- raw value
+ clauses <- mapM (\(c, v) -> clause [recP (mkName c) []]
+ (normalB (reprE v)) []) constructors
+ return [SigD fname sigt, FunD fname clauses]
+
+-- | Generates a fromRaw function.
+--
+-- The function generated is monadic and can fail parsing the
+-- raw value. It is of the form:
+--
+-- @
+-- nameFromRaw :: (Monad m) => /traw/ -> m Name
+-- nameFromRaw s | s == var1 = Cons1
+-- | s == \"value2\" = Cons2
+-- | otherwise = fail /.../
+-- @
+genFromRaw :: Name -> Name -> Name -> [(String, Name)] -> Q [Dec]
+genFromRaw traw fname tname constructors = do
+ -- signature of form (Monad m) => String -> m $name
+ sigt <- [t| (Monad m) => $(conT traw) -> m $(conT tname) |]
+ -- clauses for a guarded pattern
+ let varp = mkName "s"
+ varpe = varE varp
+ clauses <- mapM (\(c, v) -> do
+ -- the clause match condition
+ g <- normalG [| $varpe == $(varE v) |]
+ -- the clause result
+ r <- [| return $(conE (mkName c)) |]
+ return (g, r)) constructors
+ -- the otherwise clause (fallback)
+ oth_clause <- do
+ g <- normalG [| otherwise |]
+ r <- [|fail ("Invalid string value for type " ++
+ $(litE (stringL (nameBase tname))) ++ ": " ++ show $varpe) |]
+ return (g, r)
+ let fun = FunD fname [Clause [VarP varp]
+ (GuardedB (clauses++[oth_clause])) []]
+ return [SigD fname sigt, fun]
+
+-- | Generates a data type from a given raw format.
+--
+-- The format is expected to multiline. The first line contains the
+-- type name, and the rest of the lines must contain two words: the
+-- constructor name and then the string representation of the
+-- respective constructor.
+--
+-- The function will generate the data type declaration, and then two
+-- functions:
+--
+-- * /name/ToRaw, which converts the type to a raw type
+--
+-- * /name/FromRaw, which (monadically) converts from a raw type to the type
+--
+-- Note that this is basically just a custom show/read instance,
+-- nothing else.
+declareADT :: Name -> String -> [(String, Name)] -> Q [Dec]
+declareADT traw sname cons = do
+ let name = mkName sname
+ ddecl = strADTDecl name (map fst cons)
+ -- process cons in the format expected by genToRaw
+ cons' = map (\(a, b) -> (a, Right b)) cons
+ toraw <- genToRaw traw (toRawName sname) name cons'
+ fromraw <- genFromRaw traw (fromRawName sname) name cons
+ return $ ddecl:toraw ++ fromraw
+
+declareIADT :: String -> [(String, Name)] -> Q [Dec]
+declareIADT = declareADT ''Int
+
+declareSADT :: String -> [(String, Name)] -> Q [Dec]
+declareSADT = declareADT ''String
+
+-- | Creates the showJSON member of a JSON instance declaration.
+--
+-- This will create what is the equivalent of:
+--
+-- @
+-- showJSON = showJSON . /name/ToRaw
+-- @
+--
+-- in an instance JSON /name/ declaration
+genShowJSON :: String -> Q [Dec]
+genShowJSON name = [d| showJSON = JSON.showJSON . $(varE (toRawName name)) |]
+
+-- | Creates the readJSON member of a JSON instance declaration.
+--
+-- This will create what is the equivalent of:
+--
+-- @
+-- readJSON s = case readJSON s of
+-- Ok s' -> /name/FromRaw s'
+-- Error e -> Error /description/
+-- @
+--
+-- in an instance JSON /name/ declaration
+genReadJSON :: String -> Q Dec
+genReadJSON name = do
+ let s = mkName "s"
+ body <- [| case JSON.readJSON $(varE s) of
+ JSON.Ok s' -> $(varE (fromRawName name)) s'
+ JSON.Error e ->
+ JSON.Error $ "Can't parse raw value for type " ++
+ $(stringE name) ++ ": " ++ e ++ " from " ++
+ show $(varE s)
+ |]
+ return $ FunD (mkName "readJSON") [Clause [VarP s] (NormalB body) []]
+
+-- | Generates a JSON instance for a given type.
+--
+-- This assumes that the /name/ToRaw and /name/FromRaw functions
+-- have been defined as by the 'declareSADT' function.
+makeJSONInstance :: Name -> Q [Dec]
+makeJSONInstance name = do
+ let base = nameBase name
+ showJ <- genShowJSON base
+ readJ <- genReadJSON base
+ return [InstanceD [] (AppT (ConT ''JSON.JSON) (ConT name)) (readJ:showJ)]
+
+-- * Template code for opcodes
+
+-- | Transforms a CamelCase string into an_underscore_based_one.
+deCamelCase :: String -> String
+deCamelCase =
+ intercalate "_" . map (map toUpper) . groupBy (\_ b -> not $ isUpper b)
+
+-- | Transform an underscore_name into a CamelCase one.
+camelCase :: String -> String
+camelCase = concatMap (ensureUpper . drop 1) .
+ groupBy (\_ b -> b /= '_') . ('_':)
+
+-- | Computes the name of a given constructor.
+constructorName :: Con -> Q Name
+constructorName (NormalC name _) = return name
+constructorName (RecC name _) = return name
+constructorName x = fail $ "Unhandled constructor " ++ show x
+
+-- | Builds the generic constructor-to-string function.
+--
+-- This generates a simple function of the following form:
+--
+-- @
+-- fname (ConStructorOne {}) = trans_fun("ConStructorOne")
+-- fname (ConStructorTwo {}) = trans_fun("ConStructorTwo")
+-- @
+--
+-- This builds a custom list of name/string pairs and then uses
+-- 'genToRaw' to actually generate the function
+genConstrToStr :: (String -> String) -> Name -> String -> Q [Dec]
+genConstrToStr trans_fun name fname = do
+ TyConI (DataD _ _ _ cons _) <- reify name
+ cnames <- mapM (liftM nameBase . constructorName) cons
+ let svalues = map (Left . trans_fun) cnames
+ genToRaw ''String (mkName fname) name $ zip cnames svalues
+
+-- | Constructor-to-string for OpCode.
+genOpID :: Name -> String -> Q [Dec]
+genOpID = genConstrToStr deCamelCase
+
+-- | OpCode parameter (field) type.
+type OpParam = (String, Q Type, Q Exp)
+
+-- | Generates the OpCode data type.
+--
+-- This takes an opcode logical definition, and builds both the
+-- datatype and the JSON serialisation out of it. We can't use a
+-- generic serialisation since we need to be compatible with Ganeti's
+-- own, so we have a few quirks to work around.
+genOpCode :: String -- ^ Type name to use
+ -> [(String, [Field])] -- ^ Constructor name and parameters
+ -> Q [Dec]
+genOpCode name cons = do
+ decl_d <- mapM (\(cname, fields) -> do
+ -- we only need the type of the field, without Q
+ fields' <- mapM actualFieldType fields
+ let fields'' = zip (repeat NotStrict) fields'
+ return $ NormalC (mkName cname) fields'')
+ cons
+ let declD = DataD [] (mkName name) [] decl_d [''Show, ''Read, ''Eq]
+
+ (savesig, savefn) <- genSaveOpCode cons
+ (loadsig, loadfn) <- genLoadOpCode cons
+ return [declD, loadsig, loadfn, savesig, savefn]
+
+-- | Checks whether a given parameter is options.
+--
+-- This requires that it's a 'Maybe'.
+isOptional :: Type -> Bool
+isOptional (AppT (ConT dt) _) | dt == ''Maybe = True
+isOptional _ = False
+
+-- | Generates the \"save\" clause for an entire opcode constructor.
+--
+-- This matches the opcode with variables named the same as the
+-- constructor fields (just so that the spliced in code looks nicer),
+-- and passes those name plus the parameter definition to 'saveObjectField'.
+saveConstructor :: String -- ^ The constructor name
+ -> [Field] -- ^ The parameter definitions for this
+ -- constructor
+ -> Q Clause -- ^ Resulting clause
+saveConstructor sname fields = do
+ let cname = mkName sname
+ let fnames = map (mkName . fieldVariable) fields
+ let pat = conP cname (map varP fnames)
+ let felems = map (uncurry saveObjectField) (zip fnames fields)
+ -- now build the OP_ID serialisation
+ opid = [| [( $(stringE "OP_ID"),
+ $showJSONE $(stringE . deCamelCase $ sname) )] |]
+ flist = listE (opid:felems)
+ -- and finally convert all this to a json object
+ flist' = [| $(varNameE "makeObj") (concat $flist) |]
+ clause [pat] (normalB flist') []
+
+-- | Generates the main save opcode function.
+--
+-- This builds a per-constructor match clause that contains the
+-- respective constructor-serialisation code.
+genSaveOpCode :: [(String, [Field])] -> Q (Dec, Dec)
+genSaveOpCode opdefs = do
+ cclauses <- mapM (uncurry saveConstructor) opdefs
+ let fname = mkName "saveOpCode"
+ sigt <- [t| $(conT (mkName "OpCode")) -> JSON.JSValue |]
+ return $ (SigD fname sigt, FunD fname cclauses)
+
+loadConstructor :: String -> [Field] -> Q Exp
+loadConstructor sname fields = do
+ let name = mkName sname
+ fbinds <- mapM loadObjectField fields
+ let (fnames, fstmts) = unzip fbinds
+ let cval = foldl (\accu fn -> AppE accu (VarE fn)) (ConE name) fnames
+ fstmts' = fstmts ++ [NoBindS (AppE (VarE 'return) cval)]
+ return $ DoE fstmts'
+
+genLoadOpCode :: [(String, [Field])] -> Q (Dec, Dec)
+genLoadOpCode opdefs = do
+ let fname = mkName "loadOpCode"
+ arg1 = mkName "v"
+ objname = mkName "o"
+ opid = mkName "op_id"
+ st1 <- bindS (varP objname) [| liftM JSON.fromJSObject
+ (JSON.readJSON $(varE arg1)) |]
+ st2 <- bindS (varP opid) [| $(varNameE "fromObj")
+ $(varE objname) $(stringE "OP_ID") |]
+ -- the match results (per-constructor blocks)
+ mexps <- mapM (uncurry loadConstructor) opdefs
+ fails <- [| fail $ "Unknown opcode " ++ $(varE opid) |]
+ let mpats = map (\(me, c) ->
+ let mp = LitP . StringL . deCamelCase . fst $ c
+ in Match mp (NormalB me) []
+ ) $ zip mexps opdefs
+ defmatch = Match WildP (NormalB fails) []
+ cst = NoBindS $ CaseE (VarE opid) $ mpats++[defmatch]
+ body = DoE [st1, st2, cst]
+ sigt <- [t| JSON.JSValue -> JSON.Result $(conT (mkName "OpCode")) |]
+ return $ (SigD fname sigt, FunD fname [Clause [VarP arg1] (NormalB body) []])
+
+-- * Template code for luxi
+
+-- | Constructor-to-string for LuxiOp.
+genStrOfOp :: Name -> String -> Q [Dec]
+genStrOfOp = genConstrToStr id
+
+-- | Constructor-to-string for MsgKeys.
+genStrOfKey :: Name -> String -> Q [Dec]
+genStrOfKey = genConstrToStr ensureLower
+
+-- | LuxiOp parameter type.
+type LuxiParam = (String, Q Type, Q Exp)
+
+-- | Generates the LuxiOp data type.
+--
+-- This takes a Luxi operation definition and builds both the
+-- datatype and the function trnasforming the arguments to JSON.
+-- We can't use anything less generic, because the way different
+-- operations are serialized differs on both parameter- and top-level.
+--
+-- There are three things to be defined for each parameter:
+--
+-- * name
+--
+-- * type
+--
+-- * operation; this is the operation performed on the parameter before
+-- serialization
+--
+genLuxiOp :: String -> [(String, [LuxiParam])] -> Q [Dec]
+genLuxiOp name cons = do
+ decl_d <- mapM (\(cname, fields) -> do
+ fields' <- mapM (\(_, qt, _) ->
+ qt >>= \t -> return (NotStrict, t))
+ fields
+ return $ NormalC (mkName cname) fields')
+ cons
+ let declD = DataD [] (mkName name) [] decl_d [''Show, ''Read]
+ (savesig, savefn) <- genSaveLuxiOp cons
+ return [declD, savesig, savefn]
+
+-- | Generates the \"save\" expression for a single luxi parameter.
+saveLuxiField :: Name -> LuxiParam -> Q Exp
+saveLuxiField fvar (_, qt, fn) =
+ [| JSON.showJSON ( $(liftM2 appFn fn $ varE fvar) ) |]
+
+-- | Generates the \"save\" clause for entire LuxiOp constructor.
+saveLuxiConstructor :: (String, [LuxiParam]) -> Q Clause
+saveLuxiConstructor (sname, fields) = do
+ let cname = mkName sname
+ fnames = map (\(nm, _, _) -> mkName nm) fields
+ pat = conP cname (map varP fnames)
+ flist = map (uncurry saveLuxiField) (zip fnames fields)
+ finval = if null flist
+ then [| JSON.showJSON () |]
+ else [| JSON.showJSON $(listE flist) |]
+ clause [pat] (normalB finval) []
+
+-- | Generates the main save LuxiOp function.
+genSaveLuxiOp :: [(String, [LuxiParam])]-> Q (Dec, Dec)
+genSaveLuxiOp opdefs = do
+ sigt <- [t| $(conT (mkName "LuxiOp")) -> JSON.JSValue |]
+ let fname = mkName "opToArgs"
+ cclauses <- mapM saveLuxiConstructor opdefs
+ return $ (SigD fname sigt, FunD fname cclauses)
+
+-- * "Objects" functionality
+
+-- | Extract the field's declaration from a Field structure.
+fieldTypeInfo :: String -> Field -> Q (Name, Strict, Type)
+fieldTypeInfo field_pfx fd = do
+ t <- actualFieldType fd
+ let n = mkName . (field_pfx ++) . fieldRecordName $ fd
+ return (n, NotStrict, t)
+
+-- | Build an object declaration.
+buildObject :: String -> String -> [Field] -> Q [Dec]
+buildObject sname field_pfx fields = do
+ let name = mkName sname
+ fields_d <- mapM (fieldTypeInfo field_pfx) fields
+ let decl_d = RecC name fields_d
+ let declD = DataD [] name [] [decl_d] [''Show, ''Read]
+ ser_decls <- buildObjectSerialisation sname fields
+ return $ declD:ser_decls
+
+buildObjectSerialisation :: String -> [Field] -> Q [Dec]
+buildObjectSerialisation sname fields = do
+ let name = mkName sname
+ savedecls <- genSaveObject saveObjectField sname fields
+ (loadsig, loadfn) <- genLoadObject loadObjectField sname fields
+ shjson <- objectShowJSON sname
+ rdjson <- objectReadJSON sname
+ let instdecl = InstanceD [] (AppT (ConT ''JSON.JSON) (ConT name))
+ (rdjson:shjson)
+ return $ savedecls ++ [loadsig, loadfn, instdecl]
+
+genSaveObject :: (Name -> Field -> Q Exp)
+ -> String -> [Field] -> Q [Dec]
+genSaveObject save_fn sname fields = do
+ let name = mkName sname
+ let fnames = map (mkName . fieldVariable) fields
+ let pat = conP name (map varP fnames)
+ let tdname = mkName ("toDict" ++ sname)
+ tdsigt <- [t| $(conT name) -> [(String, JSON.JSValue)] |]
+
+ let felems = map (uncurry save_fn) (zip fnames fields)
+ flist = listE felems
+ -- and finally convert all this to a json object
+ tdlist = [| concat $flist |]
+ iname = mkName "i"
+ tclause <- clause [pat] (normalB tdlist) []
+ cclause <- [| $(varNameE "makeObj") . $(varE tdname) |]
+ let fname = mkName ("save" ++ sname)
+ sigt <- [t| $(conT name) -> JSON.JSValue |]
+ return [SigD tdname tdsigt, FunD tdname [tclause],
+ SigD fname sigt, ValD (VarP fname) (NormalB cclause) []]
+
+saveObjectField :: Name -> Field -> Q Exp
+saveObjectField fvar field
+ | isContainer = [| [( $nameE , $showJSONE . showContainer $ $fvarE)] |]
+ | fisOptional = [| case $(varE fvar) of
+ Nothing -> []
+ Just v -> [( $nameE, $showJSONE v)]
+ |]
+ | otherwise = case fieldShow field of
+ Nothing -> [| [( $nameE, $showJSONE $fvarE)] |]
+ Just fn -> [| [( $nameE, $showJSONE . $fn $ $fvarE)] |]
+ where isContainer = fieldIsContainer field
+ fisOptional = fieldIsOptional field
+ nameE = stringE (fieldName field)
+ fvarE = varE fvar
+
+objectShowJSON :: String -> Q [Dec]
+objectShowJSON name =
+ [d| showJSON = JSON.showJSON . $(varE . mkName $ "save" ++ name) |]
+
+genLoadObject :: (Field -> Q (Name, Stmt))
+ -> String -> [Field] -> Q (Dec, Dec)
+genLoadObject load_fn sname fields = do
+ let name = mkName sname
+ funname = mkName $ "load" ++ sname
+ arg1 = mkName "v"
+ objname = mkName "o"
+ opid = mkName "op_id"
+ st1 <- bindS (varP objname) [| liftM JSON.fromJSObject
+ (JSON.readJSON $(varE arg1)) |]
+ fbinds <- mapM load_fn fields
+ let (fnames, fstmts) = unzip fbinds
+ let cval = foldl (\accu fn -> AppE accu (VarE fn)) (ConE name) fnames
+ fstmts' = st1:fstmts ++ [NoBindS (AppE (VarE 'return) cval)]
+ sigt <- [t| JSON.JSValue -> JSON.Result $(conT name) |]
+ return $ (SigD funname sigt,
+ FunD funname [Clause [VarP arg1] (NormalB (DoE fstmts')) []])
+
+loadObjectField :: Field -> Q (Name, Stmt)
+loadObjectField field = do
+ let name = fieldVariable field
+ fvar = mkName name
+ -- these are used in all patterns below
+ let objvar = varNameE "o"
+ objfield = stringE (fieldName field)
+ loadexp =
+ if fieldIsOptional field
+ then [| $(varNameE "maybeFromObj") $objvar $objfield |]
+ else case fieldDefault field of
+ Just defv ->
+ [| $(varNameE "fromObjWithDefault") $objvar
+ $objfield $defv |]
+ Nothing -> [| $(varNameE "fromObj") $objvar $objfield |]
+ bexp <- loadFn field loadexp
+
+ return (fvar, BindS (VarP fvar) bexp)
+
+objectReadJSON :: String -> Q Dec
+objectReadJSON name = do
+ let s = mkName "s"
+ body <- [| case JSON.readJSON $(varE s) of
+ JSON.Ok s' -> $(varE .mkName $ "load" ++ name) s'
+ JSON.Error e ->
+ JSON.Error $ "Can't parse value for type " ++
+ $(stringE name) ++ ": " ++ e
+ |]
+ return $ FunD (mkName "readJSON") [Clause [VarP s] (NormalB body) []]
+
+-- * Inheritable parameter tables implementation
+
+-- | Compute parameter type names.
+paramTypeNames :: String -> (String, String)
+paramTypeNames root = ("Filled" ++ root ++ "Params",
+ "Partial" ++ root ++ "Params")
+
+-- | Compute information about the type of a parameter field.
+paramFieldTypeInfo :: String -> Field -> Q (Name, Strict, Type)
+paramFieldTypeInfo field_pfx fd = do
+ t <- actualFieldType fd
+ let n = mkName . (++ "P") . (field_pfx ++) .
+ fieldRecordName $ fd
+ return (n, NotStrict, AppT (ConT ''Maybe) t)
+
+-- | Build a parameter declaration.
+--
+-- This function builds two different data structures: a /filled/ one,
+-- in which all fields are required, and a /partial/ one, in which all
+-- fields are optional. Due to the current record syntax issues, the
+-- fields need to be named differrently for the two structures, so the
+-- partial ones get a /P/ suffix.
+buildParam :: String -> String -> [Field] -> Q [Dec]
+buildParam sname field_pfx fields = do
+ let (sname_f, sname_p) = paramTypeNames sname
+ name_f = mkName sname_f
+ name_p = mkName sname_p
+ fields_f <- mapM (fieldTypeInfo field_pfx) fields
+ fields_p <- mapM (paramFieldTypeInfo field_pfx) fields
+ let decl_f = RecC name_f fields_f
+ decl_p = RecC name_p fields_p
+ let declF = DataD [] name_f [] [decl_f] [''Show, ''Read]
+ declP = DataD [] name_p [] [decl_p] [''Show, ''Read]
+ ser_decls_f <- buildObjectSerialisation sname_f fields
+ ser_decls_p <- buildPParamSerialisation sname_p fields
+ fill_decls <- fillParam sname field_pfx fields
+ return $ [declF, declP] ++ ser_decls_f ++ ser_decls_p ++ fill_decls
+
+buildPParamSerialisation :: String -> [Field] -> Q [Dec]
+buildPParamSerialisation sname fields = do
+ let name = mkName sname
+ savedecls <- genSaveObject savePParamField sname fields
+ (loadsig, loadfn) <- genLoadObject loadPParamField sname fields
+ shjson <- objectShowJSON sname
+ rdjson <- objectReadJSON sname
+ let instdecl = InstanceD [] (AppT (ConT ''JSON.JSON) (ConT name))
+ (rdjson:shjson)
+ return $ savedecls ++ [loadsig, loadfn, instdecl]
+
+savePParamField :: Name -> Field -> Q Exp
+savePParamField fvar field = do
+ checkNonOptDef field
+ let actualVal = mkName "v"
+ normalexpr <- saveObjectField actualVal field
+ -- we have to construct the block here manually, because we can't
+ -- splice-in-splice
+ return $ CaseE (VarE fvar) [ Match (ConP 'Nothing [])
+ (NormalB (ConE '[])) []
+ , Match (ConP 'Just [VarP actualVal])
+ (NormalB normalexpr) []
+ ]
+loadPParamField :: Field -> Q (Name, Stmt)
+loadPParamField field = do
+ checkNonOptDef field
+ let name = fieldName field
+ fvar = mkName name
+ -- these are used in all patterns below
+ let objvar = varNameE "o"
+ objfield = stringE name
+ loadexp = [| $(varNameE "maybeFromObj") $objvar $objfield |]
+ bexp <- loadFn field loadexp
+ return (fvar, BindS (VarP fvar) bexp)
+
+-- | Builds a simple declaration of type @n_x = fromMaybe f_x p_x@.
+buildFromMaybe :: String -> Q Dec
+buildFromMaybe fname =
+ valD (varP (mkName $ "n_" ++ fname))
+ (normalB [| $(varNameE "fromMaybe")
+ $(varNameE $ "f_" ++ fname)
+ $(varNameE $ "p_" ++ fname) |]) []
+
+fillParam :: String -> String -> [Field] -> Q [Dec]
+fillParam sname field_pfx fields = do
+ let fnames = map (\fd -> field_pfx ++ fieldRecordName fd) fields
+ (sname_f, sname_p) = paramTypeNames sname
+ oname_f = "fobj"
+ oname_p = "pobj"
+ name_f = mkName sname_f
+ name_p = mkName sname_p
+ fun_name = mkName $ "fill" ++ sname ++ "Params"
+ le_full = ValD (ConP name_f (map (VarP . mkName . ("f_" ++)) fnames))
+ (NormalB . VarE . mkName $ oname_f) []
+ le_part = ValD (ConP name_p (map (VarP . mkName . ("p_" ++)) fnames))
+ (NormalB . VarE . mkName $ oname_p) []
+ obj_new = foldl (\accu vname -> AppE accu (VarE vname)) (ConE name_f)
+ $ map (mkName . ("n_" ++)) fnames
+ le_new <- mapM buildFromMaybe fnames
+ funt <- [t| $(conT name_f) -> $(conT name_p) -> $(conT name_f) |]
+ let sig = SigD fun_name funt
+ fclause = Clause [VarP (mkName oname_f), VarP (mkName oname_p)]
+ (NormalB $ LetE (le_full:le_part:le_new) obj_new) []
+ fun = FunD fun_name [fclause]
+ return [sig, fun]
module Main (main) where
import Data.Char (toLower)
-import System
+import System.Environment
+import System.Exit
import System.IO
import Ganeti.HTools.Utils
main :: IO ()
main = do
- binary <- getEnv "HTOOLS" `catch` (\_ -> getProgName)
+ binary <- getEnv "HTOOLS" `catch` const getProgName
let name = map toLower binary
boolnames = map (\(x, y) -> (x == name, y)) personalities
select (usage name) boolnames
--- /dev/null
+-- The following two hints warn to simplify e.g. "map (\v -> (v,
+-- True)) lst" to "zip lst (repeat True)", which is more abstract
+warn = map (\v -> (v, x)) y ==> zip y (repeat x)
+ where _ = notIn v x
+warn = map (\v -> (x, v)) ==> zip (repeat x)
+ where _ = notIn v x
module Main(main) where
+import Data.Char
import Data.IORef
-import Test.QuickCheck
-import System.Console.GetOpt
-import System.IO
+import Data.List
+import System.Console.GetOpt ()
+import System.Environment (getArgs)
import System.Exit
-import System (getArgs)
+import System.IO
+import Test.QuickCheck
import Text.Printf
import Ganeti.HTools.QC
-- | Options list and functions.
options :: [OptType]
options =
- [ oReplay
- , oVerbose
- , oShowVer
- , oShowHelp
- ]
+ [ oReplay
+ , oVerbose
+ , oShowVer
+ , oShowHelp
+ ]
fast :: Args
fast = stdArgs
-- | Wrapper over a test runner with error counting.
wrapTest :: IORef Int
- -> (Args -> IO Result)
+ -> (Args -> IO Result, String)
-> Args
- -> IO (Result, Char)
-wrapTest ir test opts = do
+ -> IO (Result, Char, String)
+wrapTest ir (test, desc) opts = do
r <- test opts
c <- case r of
Success {} -> return '.'
GaveUp {} -> return '?'
Failure {} -> incIORef ir >> return '#'
NoExpectedFailure {} -> incIORef ir >> return '*'
- return (r, c)
+ return (r, c, desc)
+
+runTests :: String
+ -> Args
+ -> [Args -> IO (Result, Char, String)]
+ -> Int
+ -> IO [(Result, String)]
runTests name opts tests max_count = do
_ <- printf "%25s : " name
hFlush stdout
results <- mapM (\t -> do
- (r, c) <- t opts
+ (r, c, desc) <- t opts
putChar c
hFlush stdout
- return r
+ return (r, desc)
) tests
- let alldone = sum . map numTests $ results
+ let alldone = sum . map (numTests . fst) $ results
_ <- printf "%*s(%d)\n" (max_count - length tests + 1) " " alldone
- mapM_ (\(idx, r) ->
+ mapM_ (\(r, desc) ->
case r of
Failure { output = o, usedSeed = u, usedSize = size } ->
- printf "Test %d failed (seed was %s, test size %d): %s\n"
- idx (show u) size o
+ printf "Test %s failed (seed was %s, test size %d): %s\n"
+ desc (show u) size o
GaveUp { numTests = passed } ->
- printf "Test %d incomplete: gave up with only %d\
+ printf "Test %s incomplete: gave up with only %d\
\ passes after discarding %d tests\n"
- idx passed (maxDiscard opts)
+ desc passed (maxDiscard opts)
_ -> return ()
- ) $ zip ([1..]::[Int]) results
+ ) results
return results
-allTests :: [(String, Args, [Args -> IO Result])]
+allTests :: [(Args, (String, [(Args -> IO Result, String)]))]
allTests =
- [ ("Utils", fast, testUtils)
- , ("PeerMap", fast, testPeerMap)
- , ("Container", fast, testContainer)
- , ("Instance", fast, testInstance)
- , ("Node", fast, testNode)
- , ("Text", fast, testText)
- , ("OpCodes", fast, testOpCodes)
- , ("Jobs", fast, testJobs)
- , ("Loader", fast, testLoader)
- , ("Types", fast, testTypes)
- , ("Cluster", slow, testCluster)
+ [ (fast, testUtils)
+ , (fast, testPeerMap)
+ , (fast, testContainer)
+ , (fast, testInstance)
+ , (fast, testNode)
+ , (fast, testText)
+ , (fast, testOpCodes)
+ , (fast, testJobs)
+ , (fast, testLoader)
+ , (fast, testTypes)
+ , (slow, testCluster)
]
+-- | Extracts the name of a test group.
+extractName :: (Args, (String, [(Args -> IO Result, String)])) -> String
+extractName (_, (name, _)) = name
+
+-- | Lowercase a string.
+lower :: String -> String
+lower = map toLower
+
transformTestOpts :: Args -> Options -> IO Args
transformTestOpts args opts = do
r <- case optReplay opts of
Nothing -> return Nothing
Just str -> do
let vs = sepSplit ',' str
- (case vs of
- [rng, size] -> return $ Just (read rng, read size)
- _ -> fail "Invalid state given")
+ case vs of
+ [rng, size] -> return $ Just (read rng, read size)
+ _ -> fail "Invalid state given"
return args { chatty = optVerbose opts > 1,
replay = r
}
main = do
errs <- newIORef 0
let wrap = map (wrapTest errs)
- cmd_args <- System.getArgs
+ cmd_args <- getArgs
(opts, args) <- parseOpts cmd_args "test" options
- let tests = if null args
- then allTests
- else filter (\(name, _, _) -> name `elem` args) allTests
- max_count = maximum $ map (\(_, _, t) -> length t) tests
- mapM_ (\(name, targs, tl) ->
- transformTestOpts targs opts >>= \newargs ->
- runTests name newargs (wrap tl) max_count) tests
+ tests <- if null args
+ then return allTests
+ else let args' = map lower args
+ selected = filter ((`elem` args') . lower .
+ extractName) allTests
+ in if null selected
+ then do
+ hPutStrLn stderr $ "No tests matching '"
+ ++ unwords args ++ "', available tests: "
+ ++ intercalate ", " (map extractName allTests)
+ exitWith $ ExitFailure 1
+ else return selected
+
+ let max_count = maximum $ map (\(_, (_, t)) -> length t) tests
+ mapM_ (\(targs, (name, tl)) ->
+ transformTestOpts targs opts >>= \newargs ->
+ runTests name newargs (wrap tl) max_count) tests
terr <- readIORef errs
- (if terr > 0
- then do
- hPutStrLn stderr $ "A total of " ++ show terr ++ " tests failed."
- exitWith $ ExitFailure 1
- else putStrLn "All tests succeeded.")
+ if terr > 0
+ then do
+ hPutStrLn stderr $ "A total of " ++ show terr ++ " tests failed."
+ exitWith $ ExitFailure 1
+ else putStrLn "All tests succeeded."
from ganeti import netutils
from ganeti import runtime
from ganeti import mcpu
+from ganeti import compat
_BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
#: Valid LVS output line regex
_LVSLINE_REGEX = re.compile("^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6})\|?$")
+# Actions for the master setup script
+_MASTER_START = "start"
+_MASTER_STOP = "stop"
+
class RPCFail(Exception):
"""Class denoting RPC failure.
constants.SSH_KNOWN_HOSTS_FILE,
constants.VNC_PASSWORD_FILE,
constants.RAPI_CERT_FILE,
+ constants.SPICE_CERT_FILE,
+ constants.SPICE_CACERT_FILE,
constants.RAPI_USERS_FILE,
constants.CONFD_HMAC_KEY,
constants.CLUSTER_DOMAIN_SECRET_FILE,
for consumption here or from the node daemon.
@rtype: tuple
- @return: master_netdev, master_ip, master_name, primary_ip_family
+ @return: master_netdev, master_ip, master_name, primary_ip_family,
+ master_netmask
@raise RPCFail: in case of errors
"""
cfg = _GetConfig()
master_netdev = cfg.GetMasterNetdev()
master_ip = cfg.GetMasterIP()
+ master_netmask = cfg.GetMasterNetmask()
master_node = cfg.GetMasterNode()
primary_ip_family = cfg.GetPrimaryIPFamily()
except errors.ConfigurationError, err:
_Fail("Cluster configuration incomplete: %s", err, exc=True)
- return (master_netdev, master_ip, master_node, primary_ip_family)
+ return (master_netdev, master_ip, master_node, primary_ip_family,
+ master_netmask)
def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn):
@param hooks_path: path of the hooks
@type env_builder_fn: function
@param env_builder_fn: function that returns a dictionary containing the
- environment variables for the hooks.
+ environment variables for the hooks. Will get all the parameters of the
+ decorated function.
@raise RPCFail: in case of pre-hook failure
"""
_, myself = ssconf.GetMasterAndMyself()
nodes = ([myself], [myself]) # these hooks run locally
+ env_fn = compat.partial(env_builder_fn, *args, **kwargs)
+
cfg = _GetConfig()
hr = HooksRunner()
hm = mcpu.HooksMaster(hook_opcode, hooks_path, nodes, hr.RunLocalHooks,
- None, env_builder_fn, logging.warning,
- cfg.GetClusterName(), cfg.GetMasterNode())
+ None, env_fn, logging.warning, cfg.GetClusterName(),
+ cfg.GetMasterNode())
hm.RunPhase(constants.HOOKS_PHASE_PRE)
result = fn(*args, **kwargs)
return decorator
-def _BuildMasterIpHookEnv():
+def _BuildMasterIpEnv(master_params, use_external_mip_script=None):
"""Builds environment variables for master IP hooks.
+ @type master_params: L{objects.MasterNetworkParameters}
+ @param master_params: network parameters of the master
+ @type use_external_mip_script: boolean
+ @param use_external_mip_script: whether to use an external master IP
+ address setup script (unused, but necessary per the implementation of the
+ _RunLocalHooks decorator)
+
"""
- cfg = _GetConfig()
+ # pylint: disable=W0613
+ ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family)
env = {
- "MASTER_NETDEV": cfg.GetMasterNetdev(),
- "MASTER_IP": cfg.GetMasterIP(),
+ "MASTER_NETDEV": master_params.netdev,
+ "MASTER_IP": master_params.ip,
+ "MASTER_NETMASK": str(master_params.netmask),
+ "CLUSTER_IP_VERSION": str(ver),
}
return env
-@RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
- _BuildMasterIpHookEnv)
-def ActivateMasterIp():
- """Activate the IP address of the master daemon.
+def _RunMasterSetupScript(master_params, action, use_external_mip_script):
+ """Execute the master IP address setup script.
+
+ @type master_params: L{objects.MasterNetworkParameters}
+ @param master_params: network parameters of the master
+ @type action: string
+ @param action: action to pass to the script. Must be one of
+ L{backend._MASTER_START} or L{backend._MASTER_STOP}
+ @type use_external_mip_script: boolean
+ @param use_external_mip_script: whether to use an external master IP
+ address setup script
+ @raise backend.RPCFail: if there are errors during the execution of the
+ script
"""
- # GetMasterInfo will raise an exception if not able to return data
- master_netdev, master_ip, _, family = GetMasterInfo()
+ env = _BuildMasterIpEnv(master_params)
- err_msg = None
- if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
- if netutils.IPAddress.Own(master_ip):
- # we already have the ip:
- logging.debug("Master IP already configured, doing nothing")
- else:
- err_msg = "Someone else has the master ip, not activating"
- logging.error(err_msg)
+ if use_external_mip_script:
+ setup_script = constants.EXTERNAL_MASTER_SETUP_SCRIPT
else:
- ipcls = netutils.IP4Address
- if family == netutils.IP6Address.family:
- ipcls = netutils.IP6Address
-
- result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add",
- "%s/%d" % (master_ip, ipcls.iplen),
- "dev", master_netdev, "label",
- "%s:0" % master_netdev])
- if result.failed:
- err_msg = "Can't activate master IP: %s" % result.output
- logging.error(err_msg)
-
- # we ignore the exit code of the following cmds
- if ipcls == netutils.IP4Address:
- utils.RunCmd(["arping", "-q", "-U", "-c 3", "-I", master_netdev, "-s",
- master_ip, master_ip])
- elif ipcls == netutils.IP6Address:
- try:
- utils.RunCmd(["ndisc6", "-q", "-r 3", master_ip, master_netdev])
- except errors.OpExecError:
- # TODO: Better error reporting
- logging.warning("Can't execute ndisc6, please install if missing")
+ setup_script = constants.DEFAULT_MASTER_SETUP_SCRIPT
+
+ result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
- if err_msg:
- _Fail(err_msg)
+ if result.failed:
+ _Fail("Failed to %s the master IP. Script return value: %s" %
+ (action, result.exit_code), log=True)
+
+
+@RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
+ _BuildMasterIpEnv)
+def ActivateMasterIp(master_params, use_external_mip_script):
+ """Activate the IP address of the master daemon.
+
+ @type master_params: L{objects.MasterNetworkParameters}
+ @param master_params: network parameters of the master
+ @type use_external_mip_script: boolean
+ @param use_external_mip_script: whether to use an external master IP
+ address setup script
+ @raise RPCFail: in case of errors during the IP startup
+
+ """
+ _RunMasterSetupScript(master_params, _MASTER_START,
+ use_external_mip_script)
def StartMasterDaemons(no_voting):
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown",
- _BuildMasterIpHookEnv)
-def DeactivateMasterIp():
+ _BuildMasterIpEnv)
+def DeactivateMasterIp(master_params, use_external_mip_script):
"""Deactivate the master IP on this node.
- """
- # TODO: log and report back to the caller the error failures; we
- # need to decide in which case we fail the RPC for this
+ @type master_params: L{objects.MasterNetworkParameters}
+ @param master_params: network parameters of the master
+ @type use_external_mip_script: boolean
+ @param use_external_mip_script: whether to use an external master IP
+ address setup script
+ @raise RPCFail: in case of errors during the IP turndown
- # GetMasterInfo will raise an exception if not able to return data
- master_netdev, master_ip, _, family = GetMasterInfo()
-
- ipcls = netutils.IP4Address
- if family == netutils.IP6Address.family:
- ipcls = netutils.IP6Address
-
- result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del",
- "%s/%d" % (master_ip, ipcls.iplen),
- "dev", master_netdev])
- if result.failed:
- logging.error("Can't remove the master IP, error: %s", result.output)
- # but otherwise ignore the failure
+ """
+ _RunMasterSetupScript(master_params, _MASTER_STOP,
+ use_external_mip_script)
def StopMasterDaemons():
result.cmd, result.exit_code, result.output)
+def ChangeMasterNetmask(old_netmask, netmask, master_ip, master_netdev):
+ """Change the netmask of the master IP.
+
+ @param old_netmask: the old value of the netmask
+ @param netmask: the new value of the netmask
+ @param master_ip: the master IP
+ @param master_netdev: the master network device
+
+ """
+ if old_netmask == netmask:
+ return
+
+ if not netutils.IPAddress.Own(master_ip):
+ _Fail("The master IP address is not up, not attempting to change its"
+ " netmask")
+
+ result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add",
+ "%s/%s" % (master_ip, netmask),
+ "dev", master_netdev, "label",
+ "%s:0" % master_netdev])
+ if result.failed:
+ _Fail("Could not set the new netmask on the master IP address")
+
+ result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del",
+ "%s/%s" % (master_ip, old_netmask),
+ "dev", master_netdev, "label",
+ "%s:0" % master_netdev])
+ if result.failed:
+ _Fail("Could not bring down the master IP address with the old netmask")
+
+
def EtcHostsModify(mode, host, ip):
"""Modify a host entry in /etc/hosts.
try:
utils.RemoveFile(constants.CONFD_HMAC_KEY)
utils.RemoveFile(constants.RAPI_CERT_FILE)
+ utils.RemoveFile(constants.SPICE_CERT_FILE)
+ utils.RemoveFile(constants.SPICE_CACERT_FILE)
utils.RemoveFile(constants.NODED_CERT_FILE)
except: # pylint: disable=W0702
logging.exception("Error while removing cluster secrets")
raise errors.QuitGanetiException(True, "Shutdown scheduled")
-def GetNodeInfo(vgname, hypervisor_type):
- """Gives back a hash with different information about the node.
+def _GetVgInfo(name):
+ """Retrieves information about a LVM volume group.
- @type vgname: C{string}
- @param vgname: the name of the volume group to ask for disk space information
- @type hypervisor_type: C{str}
- @param hypervisor_type: the name of the hypervisor to ask for
- memory information
- @rtype: C{dict}
- @return: dictionary with the following keys:
- - vg_size is the size of the configured volume group in MiB
- - vg_free is the free size of the volume group in MiB
- - memory_dom0 is the memory allocated for domain0 in MiB
- - memory_free is the currently available (free) ram in MiB
- - memory_total is the total number of ram in MiB
- - hv_version: the hypervisor version, if available
+ """
+ # TODO: GetVGInfo supports returning information for multiple VGs at once
+ vginfo = bdev.LogicalVolume.GetVGInfo([name])
+ if vginfo:
+ vg_free = int(round(vginfo[0][0], 0))
+ vg_size = int(round(vginfo[0][1], 0))
+ else:
+ vg_free = None
+ vg_size = None
+
+ return {
+ "name": name,
+ "vg_free": vg_free,
+ "vg_size": vg_size,
+ }
+
+
+def _GetHvInfo(name):
+ """Retrieves node information from a hypervisor.
+
+ The information returned depends on the hypervisor. Common items:
+
+ - vg_size is the size of the configured volume group in MiB
+ - vg_free is the free size of the volume group in MiB
+ - memory_dom0 is the memory allocated for domain0 in MiB
+ - memory_free is the currently available (free) ram in MiB
+ - memory_total is the total number of ram in MiB
+ - hv_version: the hypervisor version, if available
"""
- outputarray = {}
+ return hypervisor.GetHypervisor(name).GetNodeInfo()
- if vgname is not None:
- vginfo = bdev.LogicalVolume.GetVGInfo([vgname])
- vg_free = vg_size = None
- if vginfo:
- vg_free = int(round(vginfo[0][0], 0))
- vg_size = int(round(vginfo[0][1], 0))
- outputarray["vg_size"] = vg_size
- outputarray["vg_free"] = vg_free
- if hypervisor_type is not None:
- hyper = hypervisor.GetHypervisor(hypervisor_type)
- hyp_info = hyper.GetNodeInfo()
- if hyp_info is not None:
- outputarray.update(hyp_info)
+def _GetNamedNodeInfo(names, fn):
+ """Calls C{fn} for all names in C{names} and returns a dictionary.
- outputarray["bootid"] = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
+ @rtype: None or dict
- return outputarray
+ """
+ if names is None:
+ return None
+ else:
+ return map(fn, names)
+
+
+def GetNodeInfo(vg_names, hv_names):
+ """Gives back a hash with different information about the node.
+
+ @type vg_names: list of string
+ @param vg_names: Names of the volume groups to ask for disk space information
+ @type hv_names: list of string
+ @param hv_names: Names of the hypervisors to ask for node information
+ @rtype: tuple; (string, None/dict, None/dict)
+ @return: Tuple containing boot ID, volume group information and hypervisor
+ information
+
+ """
+ bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
+ vg_info = _GetNamedNodeInfo(vg_names, _GetVgInfo)
+ hv_info = _GetNamedNodeInfo(hv_names, _GetHvInfo)
+
+ return (bootid, vg_info, hv_info)
def VerifyNode(what, cluster_name):
result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
source=source)
+ if constants.NV_USERSCRIPTS in what:
+ result[constants.NV_USERSCRIPTS] = \
+ [script for script in what[constants.NV_USERSCRIPTS]
+ if not (os.path.exists(script) and os.access(script, os.X_OK))]
+
if constants.NV_OOB_PATHS in what:
result[constants.NV_OOB_PATHS] = tmp = []
for path in what[constants.NV_OOB_PATHS]:
_Fail("Failed to accept instance: %s", err, exc=True)
-def FinalizeMigration(instance, info, success):
+def FinalizeMigrationDst(instance, info, success):
"""Finalize any preparation to accept an instance.
@type instance: L{objects.Instance}
"""
hyper = hypervisor.GetHypervisor(instance.hypervisor)
try:
- hyper.FinalizeMigration(instance, info, success)
+ hyper.FinalizeMigrationDst(instance, info, success)
except errors.HypervisorError, err:
- _Fail("Failed to finalize migration: %s", err, exc=True)
+ _Fail("Failed to finalize migration on the target node: %s", err, exc=True)
def MigrateInstance(instance, target, live):
@type live: boolean
@param live: whether the migration should be done live or not (the
interpretation of this parameter is left to the hypervisor)
- @rtype: tuple
- @return: a tuple of (success, msg) where:
- - succes is a boolean denoting the success/failure of the operation
- - msg is a string with details in case of failure
+ @raise RPCFail: if migration fails for some reason
"""
hyper = hypervisor.GetHypervisor(instance.hypervisor)
_Fail("Failed to migrate instance: %s", err, exc=True)
+def FinalizeMigrationSource(instance, success, live):
+ """Finalize the instance migration on the source node.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance definition of the migrated instance
+ @type success: bool
+ @param success: whether the migration succeeded or not
+ @type live: bool
+ @param live: whether the user requested a live migration or not
+ @raise RPCFail: If the execution fails for some reason
+
+ """
+ hyper = hypervisor.GetHypervisor(instance.hypervisor)
+
+ try:
+ hyper.FinalizeMigrationSource(instance, success, live)
+ except Exception, err: # pylint: disable=W0703
+ _Fail("Failed to finalize the migration on the source node: %s", err,
+ exc=True)
+
+
+def GetMigrationStatus(instance):
+ """Get the migration status
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that is being migrated
+ @rtype: L{objects.MigrationStatus}
+ @return: the status of the current migration (one of
+ L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
+ progress info that can be retrieved from the hypervisor
+ @raise RPCFail: If the migration status cannot be retrieved
+
+ """
+ hyper = hypervisor.GetHypervisor(instance.hypervisor)
+ try:
+ return hyper.GetMigrationStatus(instance)
+ except Exception, err: # pylint: disable=W0703
+ _Fail("Failed to get migration status: %s", err, exc=True)
+
+
def BlockdevCreate(disk, size, owner, on_primary, info):
"""Creates a block device for an instance.
clist.append(crdev)
try:
- device = bdev.Create(disk.dev_type, disk.physical_id, clist, disk.size)
+ device = bdev.Create(disk, clist)
except errors.BlockDeviceError, err:
_Fail("Can't create block device: %s", err)
device.Assemble()
except errors.BlockDeviceError, err:
_Fail("Can't assemble device after creation, unusual event: %s", err)
- device.SetSyncSpeed(constants.SYNC_SPEED)
if on_primary or disk.OpenOnSecondary():
try:
device.Open(force=True)
children.append(cdev)
if as_primary or disk.AssembleOnSecondary():
- r_dev = bdev.Assemble(disk.dev_type, disk.physical_id, children, disk.size)
- r_dev.SetSyncSpeed(constants.SYNC_SPEED)
+ r_dev = bdev.Assemble(disk, children)
result = r_dev
if as_primary or disk.OpenOnSecondary():
r_dev.Open()
for chdisk in disk.children:
children.append(_RecursiveFindBD(chdisk))
- return bdev.FindDevice(disk.dev_type, disk.physical_id, children, disk.size)
+ return bdev.FindDevice(disk, children)
def _OpenRealBD(disk):
ssconf.SimpleStore().WriteFiles(values)
-def _ErrnoOrStr(err):
- """Format an EnvironmentError exception.
-
- If the L{err} argument has an errno attribute, it will be looked up
- and converted into a textual C{E...} description. Otherwise the
- string representation of the error will be returned.
-
- @type err: L{EnvironmentError}
- @param err: the exception to format
-
- """
- if hasattr(err, "errno"):
- detail = errno.errorcode[err.errno]
- else:
- detail = str(err)
- return detail
-
-
def _OSOndiskAPIVersion(os_dir):
"""Compute and return the API version of a given OS.
st = os.stat(api_file)
except EnvironmentError, err:
return False, ("Required file '%s' not found under path %s: %s" %
- (constants.OS_API_FILE, os_dir, _ErrnoOrStr(err)))
+ (constants.OS_API_FILE, os_dir, utils.ErrnoOrStr(err)))
if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
return False, ("File '%s' in %s is not a regular file" %
api_versions = utils.ReadFile(api_file).splitlines()
except EnvironmentError, err:
return False, ("Error while reading the API version file at %s: %s" %
- (api_file, _ErrnoOrStr(err)))
+ (api_file, utils.ErrnoOrStr(err)))
try:
api_versions = [int(version.strip()) for version in api_versions]
del os_files[filename]
continue
return False, ("File '%s' under path '%s' is missing (%s)" %
- (filename, os_dir, _ErrnoOrStr(err)))
+ (filename, os_dir, utils.ErrnoOrStr(err)))
if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
return False, ("File '%s' under path '%s' is not a regular file" %
# we accept missing files, but not other errors
if err.errno != errno.ENOENT:
return False, ("Error while reading the OS variants file at %s: %s" %
- (variants_file, _ErrnoOrStr(err)))
+ (variants_file, utils.ErrnoOrStr(err)))
parameters = []
if constants.OS_PARAMETERS_FILE in os_files:
parameters = utils.ReadFile(parameters_file).splitlines()
except EnvironmentError, err:
return False, ("Error while reading the OS parameters file at %s: %s" %
- (parameters_file, _ErrnoOrStr(err)))
+ (parameters_file, utils.ErrnoOrStr(err)))
parameters = [v.split(None, 1) for v in parameters]
os_obj = objects.OS(name=name, path=os_dir,
config.add_section(constants.INISECT_INS)
config.set(constants.INISECT_INS, "name", instance.name)
+ config.set(constants.INISECT_INS, "maxmem", "%d" %
+ instance.beparams[constants.BE_MAXMEM])
+ config.set(constants.INISECT_INS, "minmem", "%d" %
+ instance.beparams[constants.BE_MINMEM])
+ # "memory" is deprecated, but useful for exporting to old ganeti versions
config.set(constants.INISECT_INS, "memory", "%d" %
- instance.beparams[constants.BE_MEMORY])
+ instance.beparams[constants.BE_MAXMEM])
config.set(constants.INISECT_INS, "vcpus", "%d" %
instance.beparams[constants.BE_VCPUS])
config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
import re
import time
import errno
+import shlex
import stat
import pyparsing as pyp
import os
after assembly we'll have our correct major/minor.
"""
- def __init__(self, unique_id, children, size):
+ def __init__(self, unique_id, children, size, params):
self._children = children
self.dev_path = None
self.unique_id = unique_id
self.minor = None
self.attached = False
self.size = size
+ self.params = params
def Assemble(self):
"""Assemble the device from its components.
raise NotImplementedError
@classmethod
- def Create(cls, unique_id, children, size):
+ def Create(cls, unique_id, children, size, params):
"""Create the device.
If the device cannot be created, it will return None
"""
raise NotImplementedError
- def SetSyncSpeed(self, speed):
- """Adjust the sync speed of the mirror.
+ def SetSyncParams(self, params):
+ """Adjust the synchronization parameters of the mirror.
In case this is not a mirroring device, this is no-op.
+ @param params: dictionary of LD level disk parameters related to the
+ synchronization.
+ @rtype: list
+ @return: a list of error messages, emitted both by the current node and by
+ children. An empty list means no errors.
+
"""
- result = True
+ result = []
if self._children:
for child in self._children:
- result = result and child.SetSyncSpeed(speed)
+ result.extend(child.SetSyncParams(params))
return result
def PauseResumeSync(self, pause):
In case this is not a mirroring device, this is no-op.
- @param pause: Wheater to pause or resume
+ @param pause: Whether to pause or resume
"""
result = True
_INVALID_NAMES = frozenset([".", "..", "snapshot", "pvmove"])
_INVALID_SUBSTRINGS = frozenset(["_mlog", "_mimage"])
- def __init__(self, unique_id, children, size):
+ def __init__(self, unique_id, children, size, params):
"""Attaches to a LV device.
The unique_id is a tuple (vg_name, lv_name)
"""
- super(LogicalVolume, self).__init__(unique_id, children, size)
+ super(LogicalVolume, self).__init__(unique_id, children, size, params)
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
raise ValueError("Invalid configuration data %s" % str(unique_id))
self._vg_name, self._lv_name = unique_id
self.Attach()
@classmethod
- def Create(cls, unique_id, children, size):
+ def Create(cls, unique_id, children, size, params):
"""Create a new logical volume.
"""
" in lvm.conf using either 'filter' or 'preferred_names'")
free_size = sum([pv[0] for pv in pvs_info])
current_pvs = len(pvlist)
- stripes = min(current_pvs, constants.LVM_STRIPECOUNT)
+ desired_stripes = params[constants.LDP_STRIPES]
+ stripes = min(current_pvs, desired_stripes)
+ if stripes < desired_stripes:
+ logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
+ " available.", desired_stripes, vg_name, current_pvs)
# The size constraint should have been checked from the master before
# calling the create function.
if result.failed:
_ThrowError("LV create failed (%s): %s",
result.fail_reason, result.output)
- return LogicalVolume(unique_id, children, size)
+ return LogicalVolume(unique_id, children, size, params)
@staticmethod
def _GetVolumeInfo(lvm_cmd, fields):
snap_name = self._lv_name + ".snap"
# remove existing snapshot if found
- snap = LogicalVolume((self._vg_name, snap_name), None, size)
+ snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
_IgnoreError(snap.Remove)
vg_info = self.GetVGInfo([self._vg_name])
# timeout constants
_NET_RECONFIG_TIMEOUT = 60
- def __init__(self, unique_id, children, size):
+ # command line options for barriers
+ _DISABLE_DISK_OPTION = "--no-disk-barrier" # -a
+ _DISABLE_DRAIN_OPTION = "--no-disk-drain" # -D
+ _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
+ _DISABLE_META_FLUSH_OPTION = "--no-md-flushes" # -m
+
+ def __init__(self, unique_id, children, size, params):
if children and children.count(None) > 0:
children = []
if len(children) not in (0, 2):
if not _CanReadDevice(children[1].dev_path):
logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
children = []
- super(DRBD8, self).__init__(unique_id, children, size)
+ super(DRBD8, self).__init__(unique_id, children, size, params)
self.major = self._DRBD_MAJOR
version = self._GetVersion(self._GetProcData())
if version["k_major"] != 8:
info["remote_addr"] == (self._rhost, self._rport))
return retval
- @classmethod
- def _AssembleLocal(cls, minor, backend, meta, size):
+ def _AssembleLocal(self, minor, backend, meta, size):
"""Configure the local part of a DRBD device.
"""
- args = ["drbdsetup", cls._DevPath(minor), "disk",
+ args = ["drbdsetup", self._DevPath(minor), "disk",
backend, meta, "0",
"-e", "detach",
"--create-device"]
if size:
args.extend(["-d", "%sm" % size])
- if not constants.DRBD_BARRIERS: # disable barriers, if configured so
- version = cls._GetVersion(cls._GetProcData())
- # various DRBD versions support different disk barrier options;
- # what we aim here is to revert back to the 'drain' method of
- # disk flushes and to disable metadata barriers, in effect going
- # back to pre-8.0.7 behaviour
- vmaj = version["k_major"]
- vmin = version["k_minor"]
- vrel = version["k_point"]
- assert vmaj == 8
- if vmin == 0: # 8.0.x
- if vrel >= 12:
- args.extend(["-i", "-m"])
- elif vmin == 2: # 8.2.x
- if vrel >= 7:
- args.extend(["-i", "-m"])
- elif vmaj >= 3: # 8.3.x or newer
- args.extend(["-i", "-a", "m"])
+
+ version = self._GetVersion(self._GetProcData())
+ vmaj = version["k_major"]
+ vmin = version["k_minor"]
+ vrel = version["k_point"]
+
+ barrier_args = \
+ self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
+ self.params[constants.LDP_BARRIERS],
+ self.params[constants.LDP_NO_META_FLUSH])
+ args.extend(barrier_args)
+
+ if self.params[constants.LDP_DISK_CUSTOM]:
+ args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
+
result = utils.RunCmd(args)
if result.failed:
_ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
@classmethod
- def _AssembleNet(cls, minor, net_info, protocol,
+ def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
+ disable_meta_flush):
+ """Compute the DRBD command line parameters for disk barriers
+
+ Returns a list of the disk barrier parameters as requested via the
+ disabled_barriers and disable_meta_flush arguments, and according to the
+ supported ones in the DRBD version vmaj.vmin.vrel
+
+ If the desired option is unsupported, raises errors.BlockDeviceError.
+
+ """
+ disabled_barriers_set = frozenset(disabled_barriers)
+ if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
+ raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
+ " barriers" % disabled_barriers)
+
+ args = []
+
+ # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
+ # does not exist)
+ if not vmaj == 8 and vmin in (0, 2, 3):
+ raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
+ (vmaj, vmin, vrel))
+
+ def _AppendOrRaise(option, min_version):
+ """Helper for DRBD options"""
+ if min_version is not None and vrel >= min_version:
+ args.append(option)
+ else:
+ raise errors.BlockDeviceError("Could not use the option %s as the"
+ " DRBD version %d.%d.%d does not support"
+ " it." % (option, vmaj, vmin, vrel))
+
+ # the minimum version for each feature is encoded via pairs of (minor
+ # version -> x) where x is version in which support for the option was
+ # introduced.
+ meta_flush_supported = disk_flush_supported = {
+ 0: 12,
+ 2: 7,
+ 3: 0,
+ }
+
+ disk_drain_supported = {
+ 2: 7,
+ 3: 0,
+ }
+
+ disk_barriers_supported = {
+ 3: 0,
+ }
+
+ # meta flushes
+ if disable_meta_flush:
+ _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
+ meta_flush_supported.get(vmin, None))
+
+ # disk flushes
+ if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
+ _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
+ disk_flush_supported.get(vmin, None))
+
+ # disk drain
+ if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
+ _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
+ disk_drain_supported.get(vmin, None))
+
+ # disk barriers
+ if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
+ _AppendOrRaise(cls._DISABLE_DISK_OPTION,
+ disk_barriers_supported.get(vmin, None))
+
+ return args
+
+ def _AssembleNet(self, minor, net_info, protocol,
dual_pri=False, hmac=None, secret=None):
"""Configure the network part of the device.
if None in net_info:
# we don't want network connection and actually want to make
# sure its shutdown
- cls._ShutdownNet(minor)
+ self._ShutdownNet(minor)
return
# Workaround for a race condition. When DRBD is doing its dance to
# sync speed only after setting up both sides can race with DRBD
# connecting, hence we set it here before telling DRBD anything
# about its peer.
- cls._SetMinorSyncSpeed(minor, constants.SYNC_SPEED)
+ sync_errors = self._SetMinorSyncParams(minor, self.params)
+ if sync_errors:
+ _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
+ (minor, utils.CommaJoin(sync_errors)))
if netutils.IP6Address.IsValid(lhost):
if not netutils.IP6Address.IsValid(rhost):
else:
_ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
- args = ["drbdsetup", cls._DevPath(minor), "net",
+ args = ["drbdsetup", self._DevPath(minor), "net",
"%s:%s:%s" % (family, lhost, lport),
"%s:%s:%s" % (family, rhost, rport), protocol,
"-A", "discard-zero-changes",
args.append("-m")
if hmac and secret:
args.extend(["-a", hmac, "-x", secret])
+
+ if self.params[constants.LDP_NET_CUSTOM]:
+ args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
+
result = utils.RunCmd(args)
if result.failed:
_ThrowError("drbd%d: can't setup network: %s - %s",
minor, result.fail_reason, result.output)
def _CheckNetworkConfig():
- info = cls._GetDevInfo(cls._GetShowData(minor))
+ info = self._GetDevInfo(self._GetShowData(minor))
if not "local_addr" in info or not "remote_addr" in info:
raise utils.RetryAgain()
self._children = []
@classmethod
- def _SetMinorSyncSpeed(cls, minor, kbytes):
- """Set the speed of the DRBD syncer.
+ def _SetMinorSyncParams(cls, minor, params):
+ """Set the parameters of the DRBD syncer.
This is the low-level implementation.
@type minor: int
@param minor: the drbd minor whose settings we change
- @type kbytes: int
- @param kbytes: the speed in kbytes/second
- @rtype: boolean
- @return: the success of the operation
+ @type params: dict
+ @param params: LD level disk parameters related to the synchronization
+ @rtype: list
+ @return: a list of error messages
"""
- result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "syncer",
- "-r", "%d" % kbytes, "--create-device"])
+
+ args = ["drbdsetup", cls._DevPath(minor), "syncer"]
+ if params[constants.LDP_DYNAMIC_RESYNC]:
+ version = cls._GetVersion(cls._GetProcData())
+ vmin = version["k_minor"]
+ vrel = version["k_point"]
+
+ # By definition we are using 8.x, so just check the rest of the version
+ # number
+ if vmin != 3 or vrel < 9:
+ msg = ("The current DRBD version (8.%d.%d) does not support the "
+ "dynamic resync speed controller" % (vmin, vrel))
+ logging.error(msg)
+ return [msg]
+
+ if params[constants.LDP_PLAN_AHEAD] == 0:
+ msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
+ " controller at DRBD level. If you want to disable it, please"
+ " set the dynamic-resync disk parameter to False.")
+ logging.error(msg)
+ return [msg]
+
+ # add the c-* parameters to args
+ args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
+ "--c-fill-target", params[constants.LDP_FILL_TARGET],
+ "--c-delay-target", params[constants.LDP_DELAY_TARGET],
+ "--c-max-rate", params[constants.LDP_MAX_RATE],
+ "--c-min-rate", params[constants.LDP_MIN_RATE],
+ ])
+
+ else:
+ args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
+
+ args.append("--create-device")
+ result = utils.RunCmd(args)
if result.failed:
- logging.error("Can't change syncer rate: %s - %s",
- result.fail_reason, result.output)
- return not result.failed
+ msg = ("Can't change syncer rate: %s - %s" %
+ (result.fail_reason, result.output))
+ logging.error(msg)
+ return msg
- def SetSyncSpeed(self, kbytes):
- """Set the speed of the DRBD syncer.
+ return []
- @type kbytes: int
- @param kbytes: the speed in kbytes/second
- @rtype: boolean
- @return: the success of the operation
+ def SetSyncParams(self, params):
+ """Set the synchronization parameters of the DRBD syncer.
+
+ @type params: dict
+ @param params: LD level disk parameters related to the synchronization
+ @rtype: list
+ @return: a list of error messages, emitted both by the current node and by
+ children. An empty list means no errors
"""
if self.minor is None:
- logging.info("Not attached during SetSyncSpeed")
- return False
- children_result = super(DRBD8, self).SetSyncSpeed(kbytes)
- return self._SetMinorSyncSpeed(self.minor, kbytes) and children_result
+ err = "Not attached during SetSyncParams"
+ logging.info(err)
+ return [err]
+
+ children_result = super(DRBD8, self).SetSyncParams(params)
+ children_result.extend(self._SetMinorSyncParams(self.minor, params))
+ return children_result
def PauseResumeSync(self, pause):
"""Pauses or resumes the sync of a DRBD device.
- if we have a configured device, we try to ensure that it matches
our config
- if not, we create it from zero
+ - anyway, set the device parameters
"""
super(DRBD8, self).Assemble()
# the device
self._SlowAssemble()
+ sync_errors = self.SetSyncParams(self.params)
+ if sync_errors:
+ _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
+ (self.minor, utils.CommaJoin(sync_errors)))
+
def _SlowAssemble(self):
"""Assembles the DRBD device from a (partially) configured device.
self.Shutdown()
@classmethod
- def Create(cls, unique_id, children, size):
+ def Create(cls, unique_id, children, size, params):
"""Create a new DRBD8 device.
Since DRBD devices are not created per se, just assembled, this
aminor, meta)
cls._CheckMetaSize(meta.dev_path)
cls._InitMeta(aminor, meta.dev_path)
- return cls(unique_id, children, size)
+ return cls(unique_id, children, size, params)
def Grow(self, amount, dryrun):
"""Resize the DRBD device and its backing storage.
The unique_id for the file device is a (file_driver, file_path) tuple.
"""
- def __init__(self, unique_id, children, size):
+ def __init__(self, unique_id, children, size, params):
"""Initalizes a file device backend.
"""
if children:
raise errors.BlockDeviceError("Invalid setup for file device")
- super(FileStorage, self).__init__(unique_id, children, size)
+ super(FileStorage, self).__init__(unique_id, children, size, params)
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
raise ValueError("Invalid configuration data %s" % str(unique_id))
self.driver = unique_id[0]
_ThrowError("Can't stat %s: %s", self.dev_path, err)
@classmethod
- def Create(cls, unique_id, children, size):
+ def Create(cls, unique_id, children, size, params):
"""Create a new file.
@param size: the size of file in MiB
_ThrowError("File already existing: %s", dev_path)
_ThrowError("Error in file creation: %", str(err))
- return FileStorage(unique_id, children, size)
+ return FileStorage(unique_id, children, size, params)
class PersistentBlockDevice(BlockDev):
For the time being, pathnames are required to lie under /dev.
"""
- def __init__(self, unique_id, children, size):
+ def __init__(self, unique_id, children, size, params):
"""Attaches to a static block device.
The unique_id is a path under /dev.
"""
- super(PersistentBlockDevice, self).__init__(unique_id, children, size)
+ super(PersistentBlockDevice, self).__init__(unique_id, children, size,
+ params)
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
raise ValueError("Invalid configuration data %s" % str(unique_id))
self.dev_path = unique_id[1]
self.Attach()
@classmethod
- def Create(cls, unique_id, children, size):
+ def Create(cls, unique_id, children, size, params):
"""Create a new device
This is a noop, we only return a PersistentBlockDevice instance
"""
- return PersistentBlockDevice(unique_id, children, 0)
+ return PersistentBlockDevice(unique_id, children, 0, params)
def Remove(self):
"""Remove a device
DEV_MAP[constants.LD_FILE] = FileStorage
-def FindDevice(dev_type, unique_id, children, size):
+def _VerifyDiskType(dev_type):
+ if dev_type not in DEV_MAP:
+ raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
+
+
+def FindDevice(disk, children):
"""Search for an existing, assembled device.
This will succeed only if the device exists and is assembled, but it
does not do any actions in order to activate the device.
+ @type disk: L{objects.Disk}
+ @param disk: the disk object to find
+ @type children: list of L{bdev.BlockDev}
+ @param children: the list of block devices that are children of the device
+ represented by the disk parameter
+
"""
- if dev_type not in DEV_MAP:
- raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
- device = DEV_MAP[dev_type](unique_id, children, size)
+ _VerifyDiskType(disk.dev_type)
+ dev_params = objects.FillDict(constants.DISK_LD_DEFAULTS[disk.dev_type],
+ disk.params)
+ device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
+ dev_params)
if not device.attached:
return None
return device
-def Assemble(dev_type, unique_id, children, size):
+def Assemble(disk, children):
"""Try to attach or assemble an existing device.
This will attach to assemble the device, as needed, to bring it
fully up. It must be safe to run on already-assembled devices.
+ @type disk: L{objects.Disk}
+ @param disk: the disk object to assemble
+ @type children: list of L{bdev.BlockDev}
+ @param children: the list of block devices that are children of the device
+ represented by the disk parameter
+
"""
- if dev_type not in DEV_MAP:
- raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
- device = DEV_MAP[dev_type](unique_id, children, size)
+ _VerifyDiskType(disk.dev_type)
+ dev_params = objects.FillDict(constants.DISK_LD_DEFAULTS[disk.dev_type],
+ disk.params)
+ device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
+ dev_params)
device.Assemble()
return device
-def Create(dev_type, unique_id, children, size):
+def Create(disk, children):
"""Create a device.
+ @type disk: L{objects.Disk}
+ @param disk: the disk object to create
+ @type children: list of L{bdev.BlockDev}
+ @param children: the list of block devices that are children of the device
+ represented by the disk parameter
+
"""
- if dev_type not in DEV_MAP:
- raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
- device = DEV_MAP[dev_type].Create(unique_id, children, size)
+ _VerifyDiskType(disk.dev_type)
+ dev_params = objects.FillDict(constants.DISK_LD_DEFAULTS[disk.dev_type],
+ disk.params)
+ device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
+ dev_params)
return device
backup=True)
-def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_confd_hmac_key,
- new_cds, rapi_cert_pem=None, cds=None,
+def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
+ new_confd_hmac_key, new_cds,
+ rapi_cert_pem=None, spice_cert_pem=None,
+ spice_cacert_pem=None, cds=None,
nodecert_file=constants.NODED_CERT_FILE,
rapicert_file=constants.RAPI_CERT_FILE,
+ spicecert_file=constants.SPICE_CERT_FILE,
+ spicecacert_file=constants.SPICE_CACERT_FILE,
hmackey_file=constants.CONFD_HMAC_KEY,
cds_file=constants.CLUSTER_DOMAIN_SECRET_FILE):
"""Updates the cluster certificates, keys and secrets.
@param new_cluster_cert: Whether to generate a new cluster certificate
@type new_rapi_cert: bool
@param new_rapi_cert: Whether to generate a new RAPI certificate
+ @type new_spice_cert: bool
+ @param new_spice_cert: Whether to generate a new SPICE certificate
@type new_confd_hmac_key: bool
@param new_confd_hmac_key: Whether to generate a new HMAC key
@type new_cds: bool
@param new_cds: Whether to generate a new cluster domain secret
@type rapi_cert_pem: string
@param rapi_cert_pem: New RAPI certificate in PEM format
+ @type spice_cert_pem: string
+ @param spice_cert_pem: New SPICE certificate in PEM format
+ @type spice_cacert_pem: string
+ @param spice_cacert_pem: Certificate of the CA that signed the SPICE
+ certificate, in PEM format
@type cds: string
@param cds: New cluster domain secret
@type nodecert_file: string
@param nodecert_file: optional override of the node cert file path
@type rapicert_file: string
@param rapicert_file: optional override of the rapi cert file path
+ @type spicecert_file: string
+ @param spicecert_file: optional override of the spice cert file path
+ @type spicecacert_file: string
+ @param spicecacert_file: optional override of the spice CA cert file path
@type hmackey_file: string
@param hmackey_file: optional override of the hmac key file path
logging.debug("Generating new RAPI certificate at %s", rapicert_file)
utils.GenerateSelfSignedSslCert(rapicert_file)
+ # SPICE
+ spice_cert_exists = os.path.exists(spicecert_file)
+ spice_cacert_exists = os.path.exists(spicecacert_file)
+ if spice_cert_pem:
+ # spice_cert_pem implies also spice_cacert_pem
+ logging.debug("Writing SPICE certificate at %s", spicecert_file)
+ utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
+ logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
+ utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
+ elif new_spice_cert or not spice_cert_exists:
+ if spice_cert_exists:
+ utils.CreateBackup(spicecert_file)
+ if spice_cacert_exists:
+ utils.CreateBackup(spicecacert_file)
+
+ logging.debug("Generating new self-signed SPICE certificate at %s",
+ spicecert_file)
+ (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file)
+
+ # Self-signed certificate -> the public certificate is also the CA public
+ # certificate
+ logging.debug("Writing the public certificate to %s",
+ spicecert_file)
+ utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
+
# Cluster domain secret
if cds:
logging.debug("Writing cluster domain secret to %s", cds_file)
"""
# Generate cluster secrets
- GenerateClusterCrypto(True, False, False, False)
+ GenerateClusterCrypto(True, False, False, False, False)
result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
if result.failed:
"""
def _CheckNodeDaemon():
- result = rpc.RpcRunner.call_version([node_name])[node_name]
+ result = rpc.BootstrapRunner().call_version([node_name])[node_name]
if result.fail_msg:
raise utils.RetryAgain()
return file_storage_dir
-def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913
- master_netdev, file_storage_dir, shared_file_storage_dir,
- candidate_pool_size, secondary_ip=None, vg_name=None,
- beparams=None, nicparams=None, ndparams=None, hvparams=None,
- enabled_hypervisors=None, modify_etc_hosts=True,
- modify_ssh_setup=True, maintain_node_health=False,
- drbd_helper=None, uid_pool=None, default_iallocator=None,
- primary_ip_version=None, prealloc_wipe_disks=False):
+def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
+ master_netmask, master_netdev, file_storage_dir,
+ shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
+ vg_name=None, beparams=None, nicparams=None, ndparams=None,
+ hvparams=None, diskparams=None, enabled_hypervisors=None,
+ modify_etc_hosts=True, modify_ssh_setup=True,
+ maintain_node_health=False, drbd_helper=None, uid_pool=None,
+ default_iallocator=None, primary_ip_version=None, ipolicy=None,
+ prealloc_wipe_disks=False, use_external_mip_script=False,
+ hv_state=None, disk_state=None):
"""Initialise the cluster.
@type candidate_pool_size: int
" entries: %s" % invalid_hvs,
errors.ECODE_INVAL)
- ipcls = None
- if primary_ip_version == constants.IP4_VERSION:
- ipcls = netutils.IP4Address
- elif primary_ip_version == constants.IP6_VERSION:
- ipcls = netutils.IP6Address
- else:
+ try:
+ ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
+ except errors.ProgrammerError:
raise errors.OpPrereqError("Invalid primary ip version: %d." %
primary_ip_version)
" but it does not belong to this host." %
secondary_ip, errors.ECODE_ENVIRON)
+ if master_netmask is not None:
+ if not ipcls.ValidateNetmask(master_netmask):
+ raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
+ (master_netmask, primary_ip_version))
+ else:
+ master_netmask = ipcls.iplen
+
if vg_name is not None:
# Check if volume group is valid
vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
utils.EnsureDirs(dirs)
+ objects.UpgradeBeParams(beparams)
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
+ for key, val in ipolicy.items():
+ if key not in constants.IPOLICY_PARAMETERS:
+ raise errors.OpPrereqError("'%s' is not a valid key for instance policy"
+ " description", key)
+ utils.ForceDictType(val, constants.ISPECS_PARAMETER_TYPES)
+
objects.NIC.CheckParameterSyntax(nicparams)
+ full_ipolicy = objects.FillDictOfDicts(constants.IPOLICY_DEFAULTS,
+ ipolicy)
+ objects.InstancePolicy.CheckParameterSyntax(full_ipolicy)
if ndparams is not None:
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
else:
ndparams = dict(constants.NDC_DEFAULTS)
+ # This is ugly, as we modify the dict itself
+ # FIXME: Make utils.ForceDictType pure functional or write a wrapper around it
+ if hv_state:
+ for hvname, hvs_data in hv_state.items():
+ utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
+ hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
+ else:
+ hv_state = dict((hvname, constants.HVST_DEFAULTS)
+ for hvname in enabled_hypervisors)
+
+ # FIXME: disk_state has no default values yet
+ if disk_state:
+ for storage, ds_data in disk_state.items():
+ if storage not in constants.DS_VALID_TYPES:
+ raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
+ storage, errors.ECODE_INVAL)
+ for ds_name, state in ds_data.items():
+ utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
+ ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
+
# hvparams is a mapping of hypervisor->hvparams dict
for hv_name, hv_params in hvparams.iteritems():
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class = hypervisor.GetHypervisor(hv_name)
hv_class.CheckParameterSyntax(hv_params)
+ # diskparams is a mapping of disk-template->diskparams dict
+ for template, dt_params in diskparams.items():
+ param_keys = set(dt_params.keys())
+ default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
+ if not (param_keys <= default_param_keys):
+ unknown_params = param_keys - default_param_keys
+ raise errors.OpPrereqError("Invalid parameters for disk template %s:"
+ " %s" % (template,
+ utils.CommaJoin(unknown_params)))
+ utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+
# set up ssh config and /etc/hosts
sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
sshkey = sshline.split(" ")[1]
tcpudp_port_pool=set(),
master_node=hostname.name,
master_ip=clustername.ip,
+ master_netmask=master_netmask,
master_netdev=master_netdev,
cluster_name=clustername.name,
file_storage_dir=file_storage_dir,
nicparams={constants.PP_DEFAULT: nicparams},
ndparams=ndparams,
hvparams=hvparams,
+ diskparams=diskparams,
candidate_pool_size=candidate_pool_size,
modify_etc_hosts=modify_etc_hosts,
modify_ssh_setup=modify_ssh_setup,
default_iallocator=default_iallocator,
primary_ip_family=ipcls.family,
prealloc_wipe_disks=prealloc_wipe_disks,
+ use_external_mip_script=use_external_mip_script,
+ ipolicy=ipolicy,
+ hv_state_static=hv_state,
+ disk_state_static=disk_state,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
name=constants.INITIAL_NODE_GROUP_NAME,
members=[master_node_config.name],
+ diskparams=cluster_config.diskparams,
)
nodegroups = {
default_nodegroup.uuid: default_nodegroup,
"""
cfg = config.ConfigWriter()
modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
- result = rpc.RpcRunner.call_node_stop_master(master)
+ runner = rpc.BootstrapRunner()
+
+ master_params = cfg.GetMasterNetworkParameters()
+ master_params.name = master
+ ems = cfg.GetUseExternalMipScript()
+ result = runner.call_node_deactivate_master_ip(master_params.name,
+ master_params, ems)
+
+ msg = result.fail_msg
+ if msg:
+ logging.warning("Could not disable the master IP: %s", msg)
+
+ result = runner.call_node_stop_master(master)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master role: %s", msg)
- result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
+
+ result = runner.call_node_leave_cluster(master, modify_ssh_setup)
msg = result.fail_msg
if msg:
logging.warning("Could not shutdown the node daemon and cleanup"
# either by being constants or by the checks above
sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
+ sshrunner.CopyFileToNode(node, constants.SPICE_CERT_FILE)
+ sshrunner.CopyFileToNode(node, constants.SPICE_CACERT_FILE)
sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
mycommand = ("%s stop-all; %s start %s -b %s" %
(constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
logging.info("Stopping the master daemon on node %s", old_master)
- result = rpc.RpcRunner.call_node_stop_master(old_master)
+ runner = rpc.BootstrapRunner()
+ master_params = cfg.GetMasterNetworkParameters()
+ master_params.name = old_master
+ ems = cfg.GetUseExternalMipScript()
+ result = runner.call_node_deactivate_master_ip(master_params.name,
+ master_params, ems)
+
+ msg = result.fail_msg
+ if msg:
+ logging.warning("Could not disable the master IP: %s", msg)
+
+ result = runner.call_node_stop_master(old_master)
msg = result.fail_msg
if msg:
logging.error("Could not disable the master role on the old master"
logging.info("Starting the master daemons on the new master")
- result = rpc.RpcRunner.call_node_start_master_daemons(new_master, no_voting)
+ result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
+ no_voting)
msg = result.fail_msg
if msg:
logging.error("Could not start the master role on the new master"
if not node_list:
# no nodes left (eventually after removing myself)
return []
- results = rpc.RpcRunner.call_master_info(node_list)
+ results = rpc.BootstrapRunner().call_master_info(node_list)
if not isinstance(results, dict):
# this should not happen (unless internal error in rpc)
logging.critical("Can't complete rpc call, aborting master startup")
if msg:
logging.warning("Error contacting node %s: %s", node, msg)
fail = True
- # for now we accept both length 3 and 4 (data[3] is primary ip version)
+ # for now we accept both length 3, 4 and 5 (data[3] is primary ip version
+ # and data[4] is the master netmask)
elif not isinstance(data, (tuple, list)) or len(data) < 3:
logging.warning("Invalid data received from node %s: %s", node, data)
fail = True
import ganeti.rapi.rlib2 # pylint: disable=W0611
-COMMON_PARAM_NAMES = map(compat.fst, opcodes.OpCode.OP_PARAMS)
+def _GetCommonParamNames():
+ """Builds a list of parameters common to all opcodes.
+
+ """
+ names = set(map(compat.fst, opcodes.OpCode.OP_PARAMS))
+
+ # The "depends" attribute should be listed
+ names.remove(opcodes.DEPEND_ATTR)
+
+ return names
+
+
+COMMON_PARAM_NAMES = _GetCommonParamNames()
#: Namespace for evaluating expressions
EVAL_NS = dict(compat=compat, constants=constants, utils=utils, errors=errors,
rlib2=rapi.rlib2)
+# Constants documentation for man pages
+CV_ECODES_DOC = "ecodes"
+# We don't care about the leak of variables _, name and doc here.
+# pylint: disable=W0621
+CV_ECODES_DOC_LIST = [(name, doc) for (_, name, doc) in constants.CV_ALL_ECODES]
+DOCUMENTED_CONSTANTS = {
+ CV_ECODES_DOC: CV_ECODES_DOC_LIST,
+ }
+
class OpcodeError(sphinx.errors.SphinxError):
category = "Opcode error"
@type fields: dict (field name as key, field details as value)
"""
- for (_, (fdef, _, _, _)) in utils.NiceSort(fields.items(),
- key=compat.fst):
- assert len(fdef.doc.splitlines()) == 1
- yield "``%s``" % fdef.name
- yield " %s" % fdef.doc
+ defs = [(fdef.name, fdef.doc)
+ for (_, (fdef, _, _, _)) in utils.NiceSort(fields.items(),
+ key=compat.fst)]
+ yield BuildValuesDoc(defs)
+
+
+def BuildValuesDoc(values):
+ """Builds documentation for a list of values
+
+ @type values: list of tuples in the form (value, documentation)
+
+ """
+ for name, doc in values:
+ assert len(doc.splitlines()) == 1
+ yield "``%s``" % name
+ yield " %s" % doc
# TODO: Implement Sphinx directive for query fields
import logging
import errno
import itertools
+import shlex
from cStringIO import StringIO
from ganeti import utils
"DEBUG_SIMERR_OPT",
"DISKIDX_OPT",
"DISK_OPT",
+ "DISK_PARAMS_OPT",
"DISK_TEMPLATE_OPT",
"DRAINED_OPT",
"DRY_RUN_OPT",
"DEFAULT_IALLOCATOR_OPT",
"IDENTIFY_DEFAULTS_OPT",
"IGNORE_CONSIST_OPT",
+ "IGNORE_ERRORS_OPT",
"IGNORE_FAILURES_OPT",
"IGNORE_OFFLINE_OPT",
"IGNORE_REMOVE_FAILURES_OPT",
"MAC_PREFIX_OPT",
"MAINTAIN_NODE_HEALTH_OPT",
"MASTER_NETDEV_OPT",
+ "MASTER_NETMASK_OPT",
"MC_OPT",
"MIGRATION_MODE_OPT",
"NET_OPT",
"NEW_CONFD_HMAC_KEY_OPT",
"NEW_RAPI_CERT_OPT",
"NEW_SECONDARY_OPT",
+ "NEW_SPICE_CERT_OPT",
"NIC_PARAMS_OPT",
"NODE_FORCE_JOIN_OPT",
"NODE_LIST_OPT",
"NOVOTING_OPT",
"NO_REMEMBER_OPT",
"NWSYNC_OPT",
+ "OFFLINE_INST_OPT",
+ "ONLINE_INST_OPT",
"ON_PRIMARY_OPT",
"ON_SECONDARY_OPT",
"OFFLINE_OPT",
"SHOWCMD_OPT",
"SHUTDOWN_TIMEOUT_OPT",
"SINGLE_NODE_OPT",
+ "SPECS_CPU_COUNT_OPT",
+ "SPECS_DISK_COUNT_OPT",
+ "SPECS_DISK_SIZE_OPT",
+ "SPECS_MEM_SIZE_OPT",
+ "SPECS_NIC_COUNT_OPT",
+ "SPICE_CACERT_OPT",
+ "SPICE_CERT_OPT",
"SRC_DIR_OPT",
"SRC_NODE_OPT",
"SUBMIT_OPT",
"TO_GROUP_OPT",
"UIDPOOL_OPT",
"USEUNITS_OPT",
+ "USE_EXTERNAL_MIP_SCRIPT",
"USE_REPL_NET_OPT",
"VERBOSE_OPT",
"VG_NAME_OPT",
"YES_DOIT_OPT",
+ "DISK_STATE_OPT",
+ "HV_STATE_OPT",
# Generic functions for CLI programs
"ConfirmOperation",
"GenericMain",
default=True, action="store_false",
help="Don't wait for sync (DANGEROUS!)")
+ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
+ action="store_true", default=False,
+ help="Enable offline instance")
+
+OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
+ action="store_true", default=False,
+ help="Disable down instance")
+
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
help=("Custom disk setup (%s)" %
utils.CommaJoin(constants.DISK_TEMPLATES)),
default={}, dest="hvparams",
help="Hypervisor parameters")
+DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
+ help="Disk template parameters, in the format"
+ " template:option=value,option=value,...",
+ type="identkeyval", action="append", default=[])
+
+SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
+ type="keyval", default={},
+ help="Memory count specs: min, max, std"
+ " (in MB)")
+
+SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
+ type="keyval", default={},
+ help="CPU count specs: min, max, std")
+
+SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
+ dest="ispecs_disk_count",
+ type="keyval", default={},
+ help="Disk count specs: min, max, std")
+
+SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
+ type="keyval", default={},
+ help="Disk size specs: min, max, std (in MB)")
+
+SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
+ type="keyval", default={},
+ help="NIC count specs: min, max, std")
+
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
help="Hypervisor and hypervisor options, in the"
" format hypervisor:option=value,option=value,...",
metavar="NETDEV",
default=None)
+MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
+ help="Specify the netmask of the master IP",
+ metavar="NETMASK",
+ default=None)
+
+USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
+ dest="use_external_mip_script",
+ help="Specify whether to run a user-provided"
+ " script for the master IP address turnup and"
+ " turndown operations",
+ type="bool", metavar=_YORNO, default=None)
+
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
help="Specify the default directory (cluster-"
"wide) for storing the file-based disks [%s]" %
help=("Generate a new self-signed RAPI"
" certificate"))
+SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
+ default=None,
+ help="File containing new SPICE certificate")
+
+SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
+ default=None,
+ help="File containing the certificate of the CA"
+ " which signed the SPICE certificate")
+
+NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
+ dest="new_spice_cert", default=None,
+ action="store_true",
+ help=("Generate a new self-signed SPICE"
+ " certificate"))
+
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
dest="new_confd_hmac_key",
default=False, action="store_true",
default=None, action="append",
completion_suggest=OPT_COMPL_ONE_NODEGROUP)
+IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
+ action="append", dest="ignore_errors",
+ choices=list(constants.CV_ALL_ECODES_STRINGS),
+ help="Error code to be ignored")
+
+DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
+ action="append",
+ help=("Specify disk state information in the format"
+ " storage_type/identifier:option=value,..."),
+ type="identkeyval")
+
+HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
+ action="append",
+ help=("Specify hypervisor state information in the"
+ " format hypervisor:option=value,..."),
+ type="identkeyval")
+
#: Options provided by all commands
COMMON_OPTS = [DEBUG_OPT]
]
-def _ParseArgs(argv, commands, aliases):
+def _ParseArgs(argv, commands, aliases, env_override):
"""Parser for the command line arguments.
This function parses the arguments and returns the function which
@param commands: dictionary with special contents, see the design
doc for cmdline handling
@param aliases: dictionary with command aliases {'alias': 'target, ...}
+ @param env_override: list of env variables allowed for default args
"""
+ assert not (env_override - set(commands))
+
if len(argv) == 0:
binary = "<command>"
else:
cmd = aliases[cmd]
+ if cmd in env_override:
+ args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
+ env_args = os.environ.get(args_env_name)
+ if env_args:
+ argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
+
func, args_def, parser_opts, usage, description = commands[cmd]
parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
description=description,
formatter=TitledHelpFormatter(),
usage="%%prog %s %s" % (cmd, usage))
parser.disable_interspersed_args()
- options, args = parser.parse_args()
+ options, args = parser.parse_args(args=argv[1:])
if not _CheckArguments(cmd, args_def, args):
return None, None, None
return retcode, obuf.getvalue().rstrip("\n")
-def GenericMain(commands, override=None, aliases=None):
+def GenericMain(commands, override=None, aliases=None,
+ env_override=frozenset()):
"""Generic main function for all the gnt-* commands.
- Arguments:
- - commands: a dictionary with a special structure, see the design doc
- for command line handling.
- - override: if not None, we expect a dictionary with keys that will
- override command line options; this can be used to pass
- options from the scripts to generic functions
- - aliases: dictionary with command aliases {'alias': 'target, ...}
+ @param commands: a dictionary with a special structure, see the design doc
+ for command line handling.
+ @param override: if not None, we expect a dictionary with keys that will
+ override command line options; this can be used to pass
+ options from the scripts to generic functions
+ @param aliases: dictionary with command aliases {'alias': 'target, ...}
+ @param env_override: list of environment names which are allowed to submit
+ default args for commands
"""
# save the program name and the entire command line for later logging
aliases = {}
try:
- func, options, args = _ParseArgs(sys.argv, commands, aliases)
+ func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
except errors.ParameterError, err:
result, err_msg = FormatError(err)
ToStderr(err_msg)
else:
tags = []
- utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
+ utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
if mode == constants.INSTANCE_CREATE:
@param verbose: whether to use verbose field descriptions or not
"""
- if cl is None:
- cl = GetClient()
-
if not names:
names = None
- filter_ = qlang.MakeFilter(names, force_filter)
+ qfilter = qlang.MakeFilter(names, force_filter)
+
+ if cl is None:
+ cl = GetClient()
- response = cl.Query(resource, fields, filter_)
+ response = cl.Query(resource, fields, qfilter)
found_unknown = _WarnUnknownFields(response.fields)
if cl is None:
cl = GetClient()
- filter_ = []
+ qfilter = []
if nodes:
- filter_.append(qlang.MakeSimpleFilter("name", nodes))
+ qfilter.append(qlang.MakeSimpleFilter("name", nodes))
if nodegroup is not None:
- filter_.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
+ qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
[qlang.OP_EQUAL, "group.uuid", nodegroup]])
if filter_master:
- filter_.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
+ qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
- if filter_:
- if len(filter_) > 1:
- final_filter = [qlang.OP_AND] + filter_
+ if qfilter:
+ if len(qfilter) > 1:
+ final_filter = [qlang.OP_AND] + qfilter
else:
- assert len(filter_) == 1
- final_filter = filter_[0]
+ assert len(qfilter) == 1
+ final_filter = qfilter[0]
else:
final_filter = None
for (_, _, ops) in self.queue:
# SubmitJob will remove the success status, but raise an exception if
# the submission fails, so we'll notice that anyway.
- results.append([True, self.cl.SubmitJob(ops)])
+ results.append([True, self.cl.SubmitJob(ops)[0]])
else:
results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
for ((status, data), (idx, name, _)) in zip(results, self.queue):
beparams = opts.beparams
nicparams = opts.nicparams
+ diskparams = dict(opts.diskparams)
+
+ # check the disk template types here, as we cannot rely on the type check done
+ # by the opcode parameter types
+ diskparams_keys = set(diskparams.keys())
+ if not (diskparams_keys <= constants.DISK_TEMPLATES):
+ unknown = utils.NiceSort(diskparams_keys - constants.DISK_TEMPLATES)
+ ToStderr("Disk templates unknown: %s" % utils.CommaJoin(unknown))
+ return 1
+
# prepare beparams dict
beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
- utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
+ utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
# prepare nicparams dict
nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
+ # prepare diskparams dict
+ for templ in constants.DISK_TEMPLATES:
+ if templ not in diskparams:
+ diskparams[templ] = {}
+ diskparams[templ] = objects.FillDict(constants.DISK_DT_DEFAULTS[templ],
+ diskparams[templ])
+ utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
+
+ # prepare ipolicy dict
+ ipolicy_raw = \
+ objects.CreateIPolicyFromOpts(ispecs_mem_size=opts.ispecs_mem_size,
+ ispecs_cpu_count=opts.ispecs_cpu_count,
+ ispecs_disk_count=opts.ispecs_disk_count,
+ ispecs_disk_size=opts.ispecs_disk_size,
+ ispecs_nic_count=opts.ispecs_nic_count)
+ ipolicy = objects.FillDictOfDicts(constants.IPOLICY_DEFAULTS, ipolicy_raw)
+ for value in ipolicy.values():
+ utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
+
if opts.candidate_pool_size is None:
opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
if opts.prealloc_wipe_disks is None:
opts.prealloc_wipe_disks = False
+ external_ip_setup_script = opts.use_external_mip_script
+ if external_ip_setup_script is None:
+ external_ip_setup_script = False
+
try:
primary_ip_version = int(opts.primary_ip_version)
except (ValueError, TypeError), err:
ToStderr("Invalid primary ip version value: %s" % str(err))
return 1
+ master_netmask = opts.master_netmask
+ try:
+ if master_netmask is not None:
+ master_netmask = int(master_netmask)
+ except (ValueError, TypeError), err:
+ ToStderr("Invalid master netmask value: %s" % str(err))
+ return 1
+
+ if opts.disk_state:
+ disk_state = utils.FlatToDict(opts.disk_state)
+ else:
+ disk_state = {}
+
+ hv_state = dict(opts.hv_state)
+
bootstrap.InitCluster(cluster_name=args[0],
secondary_ip=opts.secondary_ip,
vg_name=vg_name,
mac_prefix=opts.mac_prefix,
+ master_netmask=master_netmask,
master_netdev=master_netdev,
file_storage_dir=opts.file_storage_dir,
shared_file_storage_dir=opts.shared_file_storage_dir,
beparams=beparams,
nicparams=nicparams,
ndparams=ndparams,
+ diskparams=diskparams,
+ ipolicy=ipolicy,
candidate_pool_size=opts.candidate_pool_size,
modify_etc_hosts=opts.modify_etc_hosts,
modify_ssh_setup=opts.modify_ssh_setup,
default_iallocator=opts.default_iallocator,
primary_ip_version=primary_ip_version,
prealloc_wipe_disks=opts.prealloc_wipe_disks,
+ use_external_mip_script=external_ip_setup_script,
+ hv_state=hv_state,
+ disk_state=disk_state,
)
op = opcodes.OpClusterPostInit()
SubmitOpCode(op, opts=opts)
compat.TryToRoman(result["candidate_pool_size"],
convert=opts.roman_integers))
ToStdout(" - master netdev: %s", result["master_netdev"])
+ ToStdout(" - master netmask: %s", result["master_netmask"])
+ ToStdout(" - use external master IP address setup script: %s",
+ result["use_external_mip_script"])
ToStdout(" - lvm volume group: %s", result["volume_group_name"])
if result["reserved_lvs"]:
reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
ToStdout("Default nic parameters:")
_PrintGroupedParams(result["nicparams"], roman=opts.roman_integers)
+ ToStdout("Instance policy - limits for instances:")
+ for key in constants.IPOLICY_PARAMETERS:
+ ToStdout(" - %s", key)
+ _PrintGroupedParams(result["ipolicy"][key], roman=opts.roman_integers)
+
return 0
error_codes=opts.error_codes,
debug_simulate_errors=opts.simulate_errors,
skip_checks=skip_checks,
+ ignore_errors=opts.ignore_errors,
group_name=opts.nodegroup)
result = SubmitOpCode(op, cl=cl, opts=opts)
ToStdout("%s %s", path, tag)
-def _RenewCrypto(new_cluster_cert, new_rapi_cert, rapi_cert_filename,
- new_confd_hmac_key, new_cds, cds_filename,
- force):
+def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
+ """Reads and verifies an X509 certificate.
+
+ @type cert_filename: string
+ @param cert_filename: the path of the file containing the certificate to
+ verify encoded in PEM format
+ @type verify_private_key: bool
+ @param verify_private_key: whether to verify the private key in addition to
+ the public certificate
+ @rtype: string
+ @return: a string containing the PEM-encoded certificate.
+
+ """
+ try:
+ pem = utils.ReadFile(cert_filename)
+ except IOError, err:
+ raise errors.X509CertError(cert_filename,
+ "Unable to read certificate: %s" % str(err))
+
+ try:
+ OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
+ except Exception, err:
+ raise errors.X509CertError(cert_filename,
+ "Unable to load certificate: %s" % str(err))
+
+ if verify_private_key:
+ try:
+ OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem)
+ except Exception, err:
+ raise errors.X509CertError(cert_filename,
+ "Unable to load private key: %s" % str(err))
+
+ return pem
+
+
+def _RenewCrypto(new_cluster_cert, new_rapi_cert, #pylint: disable=R0911
+ rapi_cert_filename, new_spice_cert, spice_cert_filename,
+ spice_cacert_filename, new_confd_hmac_key, new_cds,
+ cds_filename, force):
"""Renews cluster certificates, keys and secrets.
@type new_cluster_cert: bool
@param new_rapi_cert: Whether to generate a new RAPI certificate
@type rapi_cert_filename: string
@param rapi_cert_filename: Path to file containing new RAPI certificate
+ @type new_spice_cert: bool
+ @param new_spice_cert: Whether to generate a new SPICE certificate
+ @type spice_cert_filename: string
+ @param spice_cert_filename: Path to file containing new SPICE certificate
+ @type spice_cacert_filename: string
+ @param spice_cacert_filename: Path to file containing the certificate of the
+ CA that signed the SPICE certificate
@type new_confd_hmac_key: bool
@param new_confd_hmac_key: Whether to generate a new HMAC key
@type new_cds: bool
" the same time.")
return 1
- if rapi_cert_filename:
- # Read and verify new certificate
- try:
- rapi_cert_pem = utils.ReadFile(rapi_cert_filename)
-
- OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
- rapi_cert_pem)
- except Exception, err: # pylint: disable=W0703
- ToStderr("Can't load new RAPI certificate from %s: %s" %
- (rapi_cert_filename, str(err)))
- return 1
+ if new_spice_cert and (spice_cert_filename or spice_cacert_filename):
+ ToStderr("When using --new-spice-certificate, the --spice-certificate"
+ " and --spice-ca-certificate must not be used.")
+ return 1
- try:
- OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, rapi_cert_pem)
- except Exception, err: # pylint: disable=W0703
- ToStderr("Can't load new RAPI private key from %s: %s" %
- (rapi_cert_filename, str(err)))
- return 1
+ if bool(spice_cacert_filename) ^ bool(spice_cert_filename):
+ ToStderr("Both --spice-certificate and --spice-ca-certificate must be"
+ " specified.")
+ return 1
- else:
- rapi_cert_pem = None
+ rapi_cert_pem, spice_cert_pem, spice_cacert_pem = (None, None, None)
+ try:
+ if rapi_cert_filename:
+ rapi_cert_pem = _ReadAndVerifyCert(rapi_cert_filename, True)
+ if spice_cert_filename:
+ spice_cert_pem = _ReadAndVerifyCert(spice_cert_filename, True)
+ spice_cacert_pem = _ReadAndVerifyCert(spice_cacert_filename)
+ except errors.X509CertError, err:
+ ToStderr("Unable to load X509 certificate from %s: %s", err[0], err[1])
+ return 1
if cds_filename:
try:
def _RenewCryptoInner(ctx):
ctx.feedback_fn("Updating certificates and keys")
- bootstrap.GenerateClusterCrypto(new_cluster_cert, new_rapi_cert,
+ bootstrap.GenerateClusterCrypto(new_cluster_cert,
+ new_rapi_cert,
+ new_spice_cert,
new_confd_hmac_key,
new_cds,
rapi_cert_pem=rapi_cert_pem,
+ spice_cert_pem=spice_cert_pem,
+ spice_cacert_pem=spice_cacert_pem,
cds=cds)
files_to_copy = []
if new_rapi_cert or rapi_cert_pem:
files_to_copy.append(constants.RAPI_CERT_FILE)
+ if new_spice_cert or spice_cert_pem:
+ files_to_copy.append(constants.SPICE_CERT_FILE)
+ files_to_copy.append(constants.SPICE_CACERT_FILE)
+
if new_confd_hmac_key:
files_to_copy.append(constants.CONFD_HMAC_KEY)
return _RenewCrypto(opts.new_cluster_cert,
opts.new_rapi_cert,
opts.rapi_cert,
+ opts.new_spice_cert,
+ opts.spice_cert,
+ opts.spice_cacert,
opts.new_confd_hmac_key,
opts.new_cluster_domain_secret,
opts.cluster_domain_secret,
if not (not opts.lvm_storage or opts.vg_name or
not opts.drbd_storage or opts.drbd_helper or
opts.enabled_hypervisors or opts.hvparams or
- opts.beparams or opts.nicparams or opts.ndparams or
+ opts.beparams or opts.nicparams or
+ opts.ndparams or opts.diskparams or
opts.candidate_pool_size is not None or
opts.uid_pool is not None or
opts.maintain_node_health is not None or
opts.default_iallocator is not None or
opts.reserved_lvs is not None or
opts.master_netdev is not None or
- opts.prealloc_wipe_disks is not None):
+ opts.master_netmask is not None or
+ opts.use_external_mip_script is not None or
+ opts.prealloc_wipe_disks is not None or
+ opts.hv_state or
+ opts.disk_state or
+ opts.ispecs_mem_size is not None or
+ opts.ispecs_cpu_count is not None or
+ opts.ispecs_disk_count is not None or
+ opts.ispecs_disk_size is not None or
+ opts.ispecs_nic_count is not None):
ToStderr("Please give at least one of the parameters.")
return 1
for hv_params in hvparams.values():
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
+ diskparams = dict(opts.diskparams)
+
+ for dt_params in hvparams.values():
+ utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+
beparams = opts.beparams
- utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
+ utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
nicparams = opts.nicparams
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
if ndparams is not None:
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
+ ipolicy = \
+ objects.CreateIPolicyFromOpts(ispecs_mem_size=opts.ispecs_mem_size,
+ ispecs_cpu_count=opts.ispecs_cpu_count,
+ ispecs_disk_count=opts.ispecs_disk_count,
+ ispecs_disk_size=opts.ispecs_disk_size,
+ ispecs_nic_count=opts.ispecs_nic_count)
+ for value in ipolicy.values():
+ utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
+
mnh = opts.maintain_node_health
uid_pool = opts.uid_pool
else:
opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
+ if opts.master_netmask is not None:
+ try:
+ opts.master_netmask = int(opts.master_netmask)
+ except ValueError:
+ ToStderr("The --master-netmask option expects an int parameter.")
+ return 1
+
+ ext_ip_script = opts.use_external_mip_script
+
+ if opts.disk_state:
+ disk_state = utils.FlatToDict(opts.disk_state)
+ else:
+ disk_state = {}
+
+ hv_state = dict(opts.hv_state)
+
op = opcodes.OpClusterSetParams(vg_name=vg_name,
drbd_helper=drbd_helper,
enabled_hypervisors=hvlist,
beparams=beparams,
nicparams=nicparams,
ndparams=ndparams,
+ diskparams=diskparams,
+ ipolicy=ipolicy,
candidate_pool_size=opts.candidate_pool_size,
maintain_node_health=mnh,
uid_pool=uid_pool,
default_iallocator=opts.default_iallocator,
prealloc_wipe_disks=opts.prealloc_wipe_disks,
master_netdev=opts.master_netdev,
- reserved_lvs=opts.reserved_lvs)
+ master_netmask=opts.master_netmask,
+ reserved_lvs=opts.reserved_lvs,
+ use_external_mip_script=ext_ip_script,
+ hv_state=hv_state,
+ disk_state=disk_state,
+ )
SubmitOpCode(op, opts=opts)
return 0
else:
return _EpoOff(opts, node_list, inst_map)
+INSTANCE_POLICY_OPTS = [
+ SPECS_CPU_COUNT_OPT,
+ SPECS_DISK_COUNT_OPT,
+ SPECS_DISK_SIZE_OPT,
+ SPECS_MEM_SIZE_OPT,
+ SPECS_NIC_COUNT_OPT,
+ ]
commands = {
"init": (
InitCluster, [ArgHost(min=1, max=1)],
[BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
- HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT,
- NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, NOMODIFY_SSH_SETUP_OPT,
- SECONDARY_IP_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
- UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
+ HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, MASTER_NETMASK_OPT,
+ NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT,
+ NOMODIFY_SSH_SETUP_OPT, SECONDARY_IP_OPT, VG_NAME_OPT,
+ MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
- NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT],
+ NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT,
+ DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT] + INSTANCE_POLICY_OPTS,
"[opts...] <cluster_name>", "Initialises a new cluster configuration"),
"destroy": (
DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
"verify": (
VerifyCluster, ARGS_NONE,
[VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
- DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT],
+ DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT, IGNORE_ERRORS_OPT],
"", "Does a check on the cluster configuration"),
"verify-disks": (
VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
"modify": (
SetClusterParams, ARGS_NONE,
[BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
- NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
- UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT, DRBD_HELPER_OPT,
- NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT, RESERVED_LVS_OPT,
- DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT],
+ MASTER_NETMASK_OPT, NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT,
+ MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT,
+ DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
+ RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
+ NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT,
+ DISK_STATE_OPT] +
+ INSTANCE_POLICY_OPTS,
"[opts...]",
"Alters the parameters of the cluster"),
"renew-crypto": (
RenewCrypto, ARGS_NONE,
[NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
- NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT],
+ NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT,
+ NEW_SPICE_CERT_OPT, SPICE_CERT_OPT, SPICE_CACERT_OPT],
"[opts...]",
"Renews cluster certificates, keys and secrets"),
"epo": (
from ganeti.cli import *
from ganeti import constants
+from ganeti import objects
from ganeti import opcodes
from ganeti import utils
_LIST_DEF_FIELDS = ["name", "node_cnt", "pinst_cnt", "alloc_policy", "ndparams"]
+_ENV_OVERRIDE = frozenset(["list"])
+
+
def AddGroup(opts, args):
"""Add a node group to the cluster.
@return: the desired exit code
"""
+ ipolicy = \
+ objects.CreateIPolicyFromOpts(ispecs_mem_size=opts.ispecs_mem_size,
+ ispecs_cpu_count=opts.ispecs_cpu_count,
+ ispecs_disk_count=opts.ispecs_disk_count,
+ ispecs_disk_size=opts.ispecs_disk_size,
+ ispecs_nic_count=opts.ispecs_nic_count,
+ group_ipolicy=True)
+ for key in ipolicy.keys():
+ utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
+
(group_name,) = args
+ diskparams = dict(opts.diskparams)
+
+ if opts.disk_state:
+ disk_state = utils.FlatToDict(opts.disk_state)
+ else:
+ disk_state = {}
+ hv_state = dict(opts.hv_state)
+
op = opcodes.OpGroupAdd(group_name=group_name, ndparams=opts.ndparams,
- alloc_policy=opts.alloc_policy)
+ alloc_policy=opts.alloc_policy,
+ diskparams=diskparams, ipolicy=ipolicy,
+ hv_state=hv_state,
+ disk_state=disk_state)
SubmitOpCode(op, opts=opts)
@return: the desired exit code
"""
- if opts.ndparams is None and opts.alloc_policy is None:
+ allmods = [opts.ndparams, opts.alloc_policy, opts.diskparams, opts.hv_state,
+ opts.disk_state, opts.ispecs_mem_size, opts.ispecs_cpu_count,
+ opts.ispecs_disk_count, opts.ispecs_disk_size,
+ opts.ispecs_nic_count, opts.diskparams]
+ if allmods.count(None) == len(allmods):
ToStderr("Please give at least one of the parameters.")
return 1
+ if opts.disk_state:
+ disk_state = utils.FlatToDict(opts.disk_state)
+ else:
+ disk_state = {}
+
+ hv_state = dict(opts.hv_state)
+
+ diskparams = dict(opts.diskparams)
+
+ # set the default values
+ to_ipolicy = [
+ opts.ispecs_mem_size,
+ opts.ispecs_cpu_count,
+ opts.ispecs_disk_count,
+ opts.ispecs_disk_size,
+ opts.ispecs_nic_count,
+ ]
+ for ispec in to_ipolicy:
+ for param in ispec:
+ if isinstance(ispec[param], basestring):
+ if ispec[param].lower() == "default":
+ ispec[param] = constants.VALUE_DEFAULT
+ # create ipolicy object
+ ipolicy = objects.CreateIPolicyFromOpts(\
+ ispecs_mem_size=opts.ispecs_mem_size,
+ ispecs_cpu_count=opts.ispecs_cpu_count,
+ ispecs_disk_count=opts.ispecs_disk_count,
+ ispecs_disk_size=opts.ispecs_disk_size,
+ ispecs_nic_count=opts.ispecs_nic_count,
+ group_ipolicy=True,
+ allowed_values=[constants.VALUE_DEFAULT])
+ for key in ipolicy.keys():
+ utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES,
+ allowed_values=[constants.VALUE_DEFAULT])
+
op = opcodes.OpGroupSetParams(group_name=args[0],
ndparams=opts.ndparams,
- alloc_policy=opts.alloc_policy)
+ alloc_policy=opts.alloc_policy,
+ hv_state=hv_state,
+ disk_state=disk_state,
+ diskparams=diskparams,
+ ipolicy=ipolicy)
+
result = SubmitOrSend(op, opts)
if result:
return rcode
+INSTANCE_POLICY_OPTS = [
+ SPECS_CPU_COUNT_OPT,
+ SPECS_DISK_COUNT_OPT,
+ SPECS_DISK_SIZE_OPT,
+ SPECS_MEM_SIZE_OPT,
+ SPECS_NIC_COUNT_OPT,
+ ]
commands = {
"add": (
- AddGroup, ARGS_ONE_GROUP, [DRY_RUN_OPT, ALLOC_POLICY_OPT, NODE_PARAMS_OPT],
+ AddGroup, ARGS_ONE_GROUP,
+ [DRY_RUN_OPT, ALLOC_POLICY_OPT, NODE_PARAMS_OPT, DISK_PARAMS_OPT,
+ HV_STATE_OPT, DISK_STATE_OPT] + INSTANCE_POLICY_OPTS,
"<group_name>", "Add a new node group to the cluster"),
"assign-nodes": (
AssignNodes, ARGS_ONE_GROUP + ARGS_MANY_NODES, [DRY_RUN_OPT, FORCE_OPT],
"Lists all available fields for node groups"),
"modify": (
SetGroupParams, ARGS_ONE_GROUP,
- [DRY_RUN_OPT, SUBMIT_OPT, ALLOC_POLICY_OPT, NODE_PARAMS_OPT],
+ [DRY_RUN_OPT, SUBMIT_OPT, ALLOC_POLICY_OPT, NODE_PARAMS_OPT, HV_STATE_OPT,
+ DISK_STATE_OPT, DISK_PARAMS_OPT] + INSTANCE_POLICY_OPTS,
"<group_name>", "Alters the parameters of a node group"),
"remove": (
RemoveGroup, ARGS_ONE_GROUP, [DRY_RUN_OPT],
def Main():
return GenericMain(commands,
- override={"tag_type": constants.TAG_NODEGROUP})
+ override={"tag_type": constants.TAG_NODEGROUP},
+ env_override=_ENV_OVERRIDE)
]
+_ENV_OVERRIDE = frozenset(["list"])
+
+
def _ExpandMultiNames(mode, names, client=None):
"""Expand the given names using the passed mode.
(elem, name, err), errors.ECODE_INVAL)
disks.append({"size": size})
- utils.ForceDictType(specs["backend"], constants.BES_PARAMETER_TYPES)
+ utils.ForceDictType(specs["backend"], constants.BES_PARAMETER_COMPAT)
utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
tmp_nics = []
buf.write(" - VCPUs: %s\n" %
compat.TryToRoman(instance["be_actual"][constants.BE_VCPUS],
convert=opts.roman_integers))
+ buf.write(" - maxmem: %sMiB\n" %
+ compat.TryToRoman(instance["be_actual"][constants.BE_MAXMEM],
+ convert=opts.roman_integers))
+ buf.write(" - minmem: %sMiB\n" %
+ compat.TryToRoman(instance["be_actual"][constants.BE_MINMEM],
+ convert=opts.roman_integers))
+ # deprecated "memory" value, kept for one version for compatibility
+ # TODO(ganeti 2.7) remove.
buf.write(" - memory: %sMiB\n" %
- compat.TryToRoman(instance["be_actual"][constants.BE_MEMORY],
+ compat.TryToRoman(instance["be_actual"][constants.BE_MAXMEM],
convert=opts.roman_integers))
+ buf.write(" - %s: %s\n" %
+ (constants.BE_ALWAYS_FAILOVER,
+ instance["be_actual"][constants.BE_ALWAYS_FAILOVER]))
buf.write(" - NICs:\n")
for idx, (ip, mac, mode, link) in enumerate(instance["nics"]):
buf.write(" - nic/%d: MAC: %s, IP: %s, mode: %s, link: %s\n" %
"""
if not (opts.nics or opts.disks or opts.disk_template or
- opts.hvparams or opts.beparams or opts.os or opts.osparams):
+ opts.hvparams or opts.beparams or opts.os or opts.osparams or
+ opts.offline_inst or opts.online_inst):
ToStderr("Please give at least one of the parameters.")
return 1
if opts.beparams[param].lower() == "default":
opts.beparams[param] = constants.VALUE_DEFAULT
- utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES,
+ utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT,
allowed_values=[constants.VALUE_DEFAULT])
for param in opts.hvparams:
osparams=opts.osparams,
force_variant=opts.force_variant,
force=opts.force,
- wait_for_sync=opts.wait_for_sync)
+ wait_for_sync=opts.wait_for_sync,
+ offline_inst=opts.offline_inst,
+ online_inst=opts.online_inst)
# even if here we process the result, we allow submit only
result = SubmitOrSend(op, opts)
SetInstanceParams, ARGS_ONE_INSTANCE,
[BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT, SUBMIT_OPT,
DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
- OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT],
+ OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT, OFFLINE_INST_OPT,
+ ONLINE_INST_OPT],
"<instance>", "Alters the parameters of an instance"),
"shutdown": (
GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
def Main():
return GenericMain(commands, aliases=aliases,
- override={"tag_type": constants.TAG_INSTANCE})
+ override={"tag_type": constants.TAG_INSTANCE},
+ env_override=_ENV_OVERRIDE)
constants.OOB_POWER_CYCLE])
+_ENV_OVERRIDE = frozenset(["list"])
+
+
NONODE_SETUP_OPT = cli_option("--no-node-setup", default=True,
action="store_false", dest="node_setup",
help=("Do not make initial SSH setup on remote"
bootstrap.SetupNodeDaemon(cluster_name, node, opts.ssh_key_check)
+ if opts.disk_state:
+ disk_state = utils.FlatToDict(opts.disk_state)
+ else:
+ disk_state = {}
+
+ hv_state = dict(opts.hv_state)
+
op = opcodes.OpNodeAdd(node_name=args[0], secondary_ip=sip,
readd=opts.readd, group=opts.nodegroup,
vm_capable=opts.vm_capable, ndparams=opts.ndparams,
- master_capable=opts.master_capable)
+ master_capable=opts.master_capable,
+ disk_state=disk_state,
+ hv_state=hv_state)
SubmitOpCode(op, opts=opts)
all_changes = [opts.master_candidate, opts.drained, opts.offline,
opts.master_capable, opts.vm_capable, opts.secondary_ip,
opts.ndparams]
- if all_changes.count(None) == len(all_changes):
+ if (all_changes.count(None) == len(all_changes) and
+ not (opts.hv_state or opts.disk_state)):
ToStderr("Please give at least one of the parameters.")
return 1
+ if opts.disk_state:
+ disk_state = utils.FlatToDict(opts.disk_state)
+ else:
+ disk_state = {}
+
+ hv_state = dict(opts.hv_state)
+
op = opcodes.OpNodeSetParams(node_name=args[0],
master_candidate=opts.master_candidate,
offline=opts.offline,
force=opts.force,
ndparams=opts.ndparams,
auto_promote=opts.auto_promote,
- powered=opts.node_powered)
+ powered=opts.node_powered,
+ hv_state=hv_state,
+ disk_state=disk_state)
# even if here we process the result, we allow submit only
result = SubmitOrSend(op, opts)
AddNode, [ArgHost(min=1, max=1)],
[SECONDARY_IP_OPT, READD_OPT, NOSSH_KEYCHECK_OPT, NODE_FORCE_JOIN_OPT,
NONODE_SETUP_OPT, VERBOSE_OPT, NODEGROUP_OPT, PRIORITY_OPT,
- CAPAB_MASTER_OPT, CAPAB_VM_OPT, NODE_PARAMS_OPT],
+ CAPAB_MASTER_OPT, CAPAB_VM_OPT, NODE_PARAMS_OPT, HV_STATE_OPT,
+ DISK_STATE_OPT],
"[-s ip] [--readd] [--no-ssh-key-check] [--force-join]"
" [--no-node-setup] [--verbose]"
" <node_name>",
[FORCE_OPT, SUBMIT_OPT, MC_OPT, DRAINED_OPT, OFFLINE_OPT,
CAPAB_MASTER_OPT, CAPAB_VM_OPT, SECONDARY_IP_OPT,
AUTO_PROMOTE_OPT, DRY_RUN_OPT, PRIORITY_OPT, NODE_PARAMS_OPT,
- NODE_POWERED_OPT],
+ NODE_POWERED_OPT, HV_STATE_OPT, DISK_STATE_OPT],
"<node_name>", "Alters the parameters of a node"),
"powercycle": (
PowercycleNode, ARGS_ONE_NODE,
def Main():
- return GenericMain(commands, override={"tag_type": constants.TAG_NODE})
+ return GenericMain(commands, override={"tag_type": constants.TAG_NODE},
+ env_override=_ENV_OVERRIDE)
# W0201 since most LU attributes are defined in CheckPrereq or similar
# functions
-# C0302: since we have waaaay to many lines in this module
+# C0302: since we have waaaay too many lines in this module
import os
import os.path
from ganeti import qlang
from ganeti import opcodes
from ganeti import ht
+from ganeti import rpc
import ganeti.masterd.instance # pylint: disable=W0611
+#: Size of DRBD meta block device
+DRBD_META_SIZE = 128
+
+# States of instance
+INSTANCE_UP = [constants.ADMINST_UP]
+INSTANCE_DOWN = [constants.ADMINST_DOWN]
+INSTANCE_OFFLINE = [constants.ADMINST_OFFLINE]
+INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
+INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
+
+
class ResultWithJobs:
"""Data container for LU results with jobs.
HTYPE = None
REQ_BGL = True
- def __init__(self, processor, op, context, rpc):
+ def __init__(self, processor, op, context, rpc_runner):
"""Constructor for LogicalUnit.
This needs to be overridden in derived classes in order to check op
# readability alias
self.owned_locks = context.glm.list_owned
self.context = context
- self.rpc = rpc
+ self.rpc = rpc_runner
# Dicts used to declare locking needs to mcpu
self.needed_locks = None
self.share_locks = dict.fromkeys(locking.LEVELS, 0)
self.op.instance_name)
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
- def _LockInstancesNodes(self, primary_only=False):
+ def _LockInstancesNodes(self, primary_only=False,
+ level=locking.LEVEL_NODE):
"""Helper function to declare instances' nodes for locking.
This function should be called after locking one or more instances to lock
@type primary_only: boolean
@param primary_only: only lock primary nodes of locked instances
+ @param level: Which lock level to use for locking nodes
"""
- assert locking.LEVEL_NODE in self.recalculate_locks, \
+ assert level in self.recalculate_locks, \
"_LockInstancesNodes helper function called with no nodes to recalculate"
# TODO: check if we're really been called with the instance locks held
if not primary_only:
wanted_nodes.extend(instance.secondary_nodes)
- if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
- self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
- elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
- self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
+ if self.recalculate_locks[level] == constants.LOCKS_REPLACE:
+ self.needed_locks[level] = wanted_nodes
+ elif self.recalculate_locks[level] == constants.LOCKS_APPEND:
+ self.needed_locks[level].extend(wanted_nodes)
+ else:
+ raise errors.ProgrammerError("Unknown recalculation mode")
- del self.recalculate_locks[locking.LEVEL_NODE]
+ del self.recalculate_locks[level]
class NoHooksLU(LogicalUnit): # pylint: disable=W0223
#: Attribute holding field definitions
FIELDS = None
- def __init__(self, filter_, fields, use_locking):
+ def __init__(self, qfilter, fields, use_locking):
"""Initializes this class.
"""
self.use_locking = use_locking
- self.query = query.Query(self.FIELDS, fields, filter_=filter_,
+ self.query = query.Query(self.FIELDS, fields, qfilter=qfilter,
namefield="name")
self.requested_data = self.query.RequestedData()
self.names = self.query.RequestedNames()
return dict.fromkeys(locking.LEVELS, 1)
+def _MakeLegacyNodeInfo(data):
+ """Formats the data returned by L{rpc.RpcRunner.call_node_info}.
+
+ Converts the data into a single dictionary. This is fine for most use cases,
+ but some require information from more than one volume group or hypervisor.
+
+ """
+ (bootid, (vg_info, ), (hv_info, )) = data
+
+ return utils.JoinDisjointDicts(utils.JoinDisjointDicts(vg_info, hv_info), {
+ "bootid": bootid,
+ })
+
+
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
"""Checks if the owned node groups are still correct for an instance.
return params_copy
+def _UpdateAndVerifySubDict(base, updates, type_check):
+ """Updates and verifies a dict with sub dicts of the same type.
+
+ @param base: The dict with the old data
+ @param updates: The dict with the new data
+ @param type_check: Dict suitable to ForceDictType to verify correct types
+ @returns: A new dict with updated and verified values
+
+ """
+ def fn(old, value):
+ new = _GetUpdatedParams(old, value)
+ utils.ForceDictType(new, type_check)
+ return new
+
+ ret = copy.deepcopy(base)
+ ret.update(dict((key, fn(base.get(key, {}), value))
+ for key, value in updates.items()))
+ return ret
+
+
+def _MergeAndVerifyHvState(op_input, obj_input):
+ """Combines the hv state from an opcode with the one of the object
+
+ @param op_input: The input dict from the opcode
+ @param obj_input: The input dict from the objects
+ @return: The verified and updated dict
+
+ """
+ if op_input:
+ invalid_hvs = set(op_input) - constants.HYPER_TYPES
+ if invalid_hvs:
+ raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
+ " %s" % utils.CommaJoin(invalid_hvs),
+ errors.ECODE_INVAL)
+ if obj_input is None:
+ obj_input = {}
+ type_check = constants.HVSTS_PARAMETER_TYPES
+ return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
+
+ return None
+
+
+def _MergeAndVerifyDiskState(op_input, obj_input):
+ """Combines the disk state from an opcode with the one of the object
+
+ @param op_input: The input dict from the opcode
+ @param obj_input: The input dict from the objects
+ @return: The verified and updated dict
+ """
+ if op_input:
+ invalid_dst = set(op_input) - constants.DS_VALID_TYPES
+ if invalid_dst:
+ raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
+ utils.CommaJoin(invalid_dst),
+ errors.ECODE_INVAL)
+ type_check = constants.DSS_PARAMETER_TYPES
+ if obj_input is None:
+ obj_input = {}
+ return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
+ type_check))
+ for key, value in op_input.items())
+
+ return None
+
+
def _ReleaseLocks(lu, level, names=None, keep=None):
"""Releases locks owned by an LU.
else:
should_release = None
- if should_release:
+ owned = lu.owned_locks(level)
+ if not owned:
+ # Not owning any lock at this level, do nothing
+ pass
+
+ elif should_release:
retain = []
release = []
# Determine which locks to release
- for name in lu.owned_locks(level):
+ for name in owned:
if should_release(name):
release.append(name)
else:
strict=True)
-def _CheckInstanceDown(lu, instance, reason):
- """Ensure that an instance is not running."""
- if instance.admin_up:
- raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
- (instance.name, reason), errors.ECODE_STATE)
+def _CheckInstanceState(lu, instance, req_states, msg=None):
+ """Ensure that an instance is in one of the required states.
- pnode = instance.primary_node
- ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
- ins_l.Raise("Can't contact node %s for instance information" % pnode,
- prereq=True, ecode=errors.ECODE_ENVIRON)
+ @param lu: the LU on behalf of which we make the check
+ @param instance: the instance to check
+ @param msg: if passed, should be a message to replace the default one
+ @raise errors.OpPrereqError: if the instance is not in the required state
+
+ """
+ if msg is None:
+ msg = "can't use instance from outside %s states" % ", ".join(req_states)
+ if instance.admin_state not in req_states:
+ raise errors.OpPrereqError("Instance %s is marked to be %s, %s" %
+ (instance, instance.admin_state, msg),
+ errors.ECODE_STATE)
+
+ if constants.ADMINST_UP not in req_states:
+ pnode = instance.primary_node
+ ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
+ ins_l.Raise("Can't contact node %s for instance information" % pnode,
+ prereq=True, ecode=errors.ECODE_ENVIRON)
+
+ if instance.name in ins_l.payload:
+ raise errors.OpPrereqError("Instance %s is running, %s" %
+ (instance.name, msg), errors.ECODE_STATE)
- if instance.name in ins_l.payload:
- raise errors.OpPrereqError("Instance %s is running, %s" %
- (instance.name, reason), errors.ECODE_STATE)
+
+def _CheckMinMaxSpecs(name, ipolicy, value):
+ """Checks if value is in the desired range.
+
+ @param name: name of the parameter for which we perform the check
+ @param ipolicy: dictionary containing min, max and std values
+ @param value: actual value that we want to use
+ @return: None or element not meeting the criteria
+
+
+ """
+ if value in [None, constants.VALUE_AUTO]:
+ return None
+ max_v = ipolicy[constants.ISPECS_MAX].get(name, value)
+ min_v = ipolicy[constants.ISPECS_MIN].get(name, value)
+ if value > max_v or min_v > value:
+ return ("%s value %s is not in range [%s, %s]" %
+ (name, value, min_v, max_v))
+ return None
def _ExpandItemName(fn, name, kind):
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
- memory, vcpus, nics, disk_template, disks,
+ minmem, maxmem, vcpus, nics, disk_template, disks,
bep, hvp, hypervisor_name, tags):
"""Builds instance related env variables for hooks
@param secondary_nodes: list of secondary nodes as strings
@type os_type: string
@param os_type: the name of the instance's OS
- @type status: boolean
- @param status: the should_run status of the instance
- @type memory: string
- @param memory: the memory size of the instance
+ @type status: string
+ @param status: the desired status of the instance
+ @type minmem: string
+ @param minmem: the minimum memory size of the instance
+ @type maxmem: string
+ @param maxmem: the maximum memory size of the instance
@type vcpus: string
@param vcpus: the count of VCPUs the instance has
@type nics: list
@return: the hook environment for this instance
"""
- if status:
- str_status = "up"
- else:
- str_status = "down"
env = {
"OP_TARGET": name,
"INSTANCE_NAME": name,
"INSTANCE_PRIMARY": primary_node,
"INSTANCE_SECONDARIES": " ".join(secondary_nodes),
"INSTANCE_OS_TYPE": os_type,
- "INSTANCE_STATUS": str_status,
- "INSTANCE_MEMORY": memory,
+ "INSTANCE_STATUS": status,
+ "INSTANCE_MINMEM": minmem,
+ "INSTANCE_MAXMEM": maxmem,
+ # TODO(2.7) remove deprecated "memory" value
+ "INSTANCE_MEMORY": maxmem,
"INSTANCE_VCPUS": vcpus,
"INSTANCE_DISK_TEMPLATE": disk_template,
"INSTANCE_HYPERVISOR": hypervisor_name,
}
-
if nics:
nic_count = len(nics)
for idx, (ip, mac, mode, link) in enumerate(nics):
"primary_node": instance.primary_node,
"secondary_nodes": instance.secondary_nodes,
"os_type": instance.os,
- "status": instance.admin_up,
- "memory": bep[constants.BE_MEMORY],
+ "status": instance.admin_state,
+ "maxmem": bep[constants.BE_MAXMEM],
+ "minmem": bep[constants.BE_MINMEM],
"vcpus": bep[constants.BE_VCPUS],
"nics": _NICListToTuple(lu, instance.nics),
"disk_template": instance.disk_template,
return mc_now < mc_should
+def _CalculateGroupIPolicy(cfg, group):
+ """Calculate instance policy for group.
+
+ """
+ cluster = cfg.GetClusterInfo()
+ return cluster.SimpleFillIPolicy(group.ipolicy)
+
+
def _CheckNicsBridgesExist(lu, target_nics, target_node):
"""Check that the brigdes needed by a list of nics exist.
return []
-def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
+def _FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_name, prereq):
faulty = []
for dev in instance.disks:
cfg.SetDiskID(dev, node_name)
- result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
+ result = rpc_runner.call_blockdev_getmirrorstatus(node_name, instance.disks)
result.Raise("Failed to get disk status from node %s" % node_name,
prereq=prereq, ecode=errors.ECODE_ENVIRON)
"""Destroys the cluster.
"""
- master = self.cfg.GetMasterNode()
+ master_params = self.cfg.GetMasterNetworkParameters()
# Run post hooks on master node before it's removed
- _RunPostHook(self, master)
+ _RunPostHook(self, master_params.name)
- result = self.rpc.call_node_deactivate_master_ip(master)
- result.Raise("Could not disable the master role")
+ ems = self.cfg.GetUseExternalMipScript()
+ result = self.rpc.call_node_deactivate_master_ip(master_params.name,
+ master_params, ems)
+ if result.fail_msg:
+ self.LogWarning("Error disabling the master IP address: %s",
+ result.fail_msg)
- return master
+ return master_params.name
def _VerifyCertificate(filename):
self.op and self._feedback_fn to be available.)
"""
- TCLUSTER = "cluster"
- TNODE = "node"
- TINSTANCE = "instance"
-
- ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
- ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
- ECLUSTERFILECHECK = (TCLUSTER, "ECLUSTERFILECHECK")
- ECLUSTERDANGLINGNODES = (TNODE, "ECLUSTERDANGLINGNODES")
- ECLUSTERDANGLINGINST = (TNODE, "ECLUSTERDANGLINGINST")
- EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
- EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
- EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
- EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
- EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
- EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
- EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
- ENODEDRBD = (TNODE, "ENODEDRBD")
- ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
- ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
- ENODEHOOKS = (TNODE, "ENODEHOOKS")
- ENODEHV = (TNODE, "ENODEHV")
- ENODELVM = (TNODE, "ENODELVM")
- ENODEN1 = (TNODE, "ENODEN1")
- ENODENET = (TNODE, "ENODENET")
- ENODEOS = (TNODE, "ENODEOS")
- ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
- ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
- ENODERPC = (TNODE, "ENODERPC")
- ENODESSH = (TNODE, "ENODESSH")
- ENODEVERSION = (TNODE, "ENODEVERSION")
- ENODESETUP = (TNODE, "ENODESETUP")
- ENODETIME = (TNODE, "ENODETIME")
- ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
ETYPE_FIELD = "code"
ETYPE_ERROR = "ERROR"
"""
ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
- itype, etxt = ecode
+ itype, etxt, _ = ecode
# first complete the msg
if args:
msg = msg % args
# and finally report it via the feedback_fn
self._feedback_fn(" - %s" % msg) # Mix-in. pylint: disable=E1101
- def _ErrorIf(self, cond, *args, **kwargs):
+ def _ErrorIf(self, cond, ecode, *args, **kwargs):
"""Log an error message if the passed condition is True.
"""
cond = (bool(cond)
or self.op.debug_simulate_errors) # pylint: disable=E1101
+
+ # If the error code is in the list of ignored errors, demote the error to a
+ # warning
+ (_, etxt, _) = ecode
+ if etxt in self.op.ignore_errors: # pylint: disable=E1101
+ kwargs[self.ETYPE_FIELD] = self.ETYPE_WARNING
+
if cond:
- self._Error(*args, **kwargs)
+ self._Error(ecode, *args, **kwargs)
+
# do not mark the operation as failed for WARN cases only
if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
self.bad = self.bad or cond
groups = self.cfg.GetNodeGroupList()
# Verify global configuration
- jobs.append([opcodes.OpClusterVerifyConfig()])
+ jobs.append([
+ opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors)
+ ])
# Always depend on global verification
depends_fn = lambda: [(-len(jobs), [])]
jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
- depends=depends_fn())]
+ ignore_errors=self.op.ignore_errors,
+ depends=depends_fn())]
for group in groups)
# Fix up all parameters
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class.CheckParameterSyntax(hv_params)
except errors.GenericError, err:
- self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
+ self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
def ExpandNames(self):
# Information can be safely retrieved as the BGL is acquired in exclusive
feedback_fn("* Verifying cluster config")
for msg in self.cfg.VerifyConfig():
- self._ErrorIf(True, self.ECLUSTERCFG, None, msg)
+ self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg)
feedback_fn("* Verifying cluster certificate files")
for cert_filename in constants.ALL_CERT_FILES:
(errcode, msg) = _VerifyCertificate(cert_filename)
- self._ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
+ self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
feedback_fn("* Verifying hypervisor parameters")
["no instances"])))
for node in dangling_nodes]
- self._ErrorIf(bool(dangling_nodes), self.ECLUSTERDANGLINGNODES, None,
+ self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
+ None,
"the following nodes (and their instances) belong to a non"
" existing group: %s", utils.CommaJoin(pretty_dangling))
- self._ErrorIf(bool(no_node_instances), self.ECLUSTERDANGLINGINST, None,
+ self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
+ None,
"the following instances have a non-existing primary-node:"
" %s", utils.CommaJoin(no_node_instances))
# main result, nresult should be a non-empty dict
test = not nresult or not isinstance(nresult, dict)
- _ErrorIf(test, self.ENODERPC, node,
+ _ErrorIf(test, constants.CV_ENODERPC, node,
"unable to verify node: no data returned")
if test:
return False
test = not (remote_version and
isinstance(remote_version, (list, tuple)) and
len(remote_version) == 2)
- _ErrorIf(test, self.ENODERPC, node,
+ _ErrorIf(test, constants.CV_ENODERPC, node,
"connection to node returned invalid data")
if test:
return False
test = local_version != remote_version[0]
- _ErrorIf(test, self.ENODEVERSION, node,
+ _ErrorIf(test, constants.CV_ENODEVERSION, node,
"incompatible protocol versions: master %s,"
" node %s", local_version, remote_version[0])
if test:
# full package version
self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
- self.ENODEVERSION, node,
+ constants.CV_ENODEVERSION, node,
"software version mismatch: master %s, node %s",
constants.RELEASE_VERSION, remote_version[1],
code=self.ETYPE_WARNING)
if ninfo.vm_capable and isinstance(hyp_result, dict):
for hv_name, hv_result in hyp_result.iteritems():
test = hv_result is not None
- _ErrorIf(test, self.ENODEHV, node,
+ _ErrorIf(test, constants.CV_ENODEHV, node,
"hypervisor %s verify failure: '%s'", hv_name, hv_result)
hvp_result = nresult.get(constants.NV_HVPARAMS, None)
if ninfo.vm_capable and isinstance(hvp_result, list):
for item, hv_name, hv_result in hvp_result:
- _ErrorIf(True, self.ENODEHV, node,
+ _ErrorIf(True, constants.CV_ENODEHV, node,
"hypervisor %s parameter verify failure (source %s): %s",
hv_name, item, hv_result)
test = nresult.get(constants.NV_NODESETUP,
["Missing NODESETUP results"])
- _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
+ _ErrorIf(test, constants.CV_ENODESETUP, node, "node setup error: %s",
"; ".join(test))
return True
try:
ntime_merged = utils.MergeTime(ntime)
except (ValueError, TypeError):
- _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
+ _ErrorIf(True, constants.CV_ENODETIME, node, "Node returned invalid time")
return
if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
else:
ntime_diff = None
- _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
+ _ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, node,
"Node time diverges by at least %s from master node time",
ntime_diff)
# checks vg existence and size > 20G
vglist = nresult.get(constants.NV_VGLIST, None)
test = not vglist
- _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
+ _ErrorIf(test, constants.CV_ENODELVM, node, "unable to check volume groups")
if not test:
vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
constants.MIN_VG_SIZE)
- _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
+ _ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
# check pv names
pvlist = nresult.get(constants.NV_PVLIST, None)
test = pvlist is None
- _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
+ _ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
if not test:
# check that ':' is not present in PV names, since it's a
# special character for lvcreate (denotes the range of PEs to
# use on the PV)
for _, pvname, owner_vg in pvlist:
test = ":" in pvname
- _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
- " '%s' of VG '%s'", pvname, owner_vg)
+ _ErrorIf(test, constants.CV_ENODELVM, node,
+ "Invalid character ':' in PV '%s' of VG '%s'",
+ pvname, owner_vg)
def _VerifyNodeBridges(self, ninfo, nresult, bridges):
"""Check the node bridges.
missing = nresult.get(constants.NV_BRIDGES, None)
test = not isinstance(missing, list)
- _ErrorIf(test, self.ENODENET, node,
+ _ErrorIf(test, constants.CV_ENODENET, node,
"did not return valid bridge information")
if not test:
- _ErrorIf(bool(missing), self.ENODENET, node, "missing bridges: %s" %
- utils.CommaJoin(sorted(missing)))
+ _ErrorIf(bool(missing), constants.CV_ENODENET, node,
+ "missing bridges: %s" % utils.CommaJoin(sorted(missing)))
+
+ def _VerifyNodeUserScripts(self, ninfo, nresult):
+ """Check the results of user scripts presence and executability on the node
+
+ @type ninfo: L{objects.Node}
+ @param ninfo: the node to check
+ @param nresult: the remote results for the node
+
+ """
+ node = ninfo.name
+
+ test = not constants.NV_USERSCRIPTS in nresult
+ self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, node,
+ "did not return user scripts information")
+
+ broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
+ if not test:
+ self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, node,
+ "user scripts not present or not executable: %s" %
+ utils.CommaJoin(sorted(broken_scripts)))
def _VerifyNodeNetwork(self, ninfo, nresult):
"""Check the node network connectivity results.
_ErrorIf = self._ErrorIf # pylint: disable=C0103
test = constants.NV_NODELIST not in nresult
- _ErrorIf(test, self.ENODESSH, node,
+ _ErrorIf(test, constants.CV_ENODESSH, node,
"node hasn't returned node ssh connectivity data")
if not test:
if nresult[constants.NV_NODELIST]:
for a_node, a_msg in nresult[constants.NV_NODELIST].items():
- _ErrorIf(True, self.ENODESSH, node,
+ _ErrorIf(True, constants.CV_ENODESSH, node,
"ssh communication with node '%s': %s", a_node, a_msg)
test = constants.NV_NODENETTEST not in nresult
- _ErrorIf(test, self.ENODENET, node,
+ _ErrorIf(test, constants.CV_ENODENET, node,
"node hasn't returned node tcp connectivity data")
if not test:
if nresult[constants.NV_NODENETTEST]:
nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
for anode in nlist:
- _ErrorIf(True, self.ENODENET, node,
+ _ErrorIf(True, constants.CV_ENODENET, node,
"tcp communication with node '%s': %s",
anode, nresult[constants.NV_NODENETTEST][anode])
test = constants.NV_MASTERIP not in nresult
- _ErrorIf(test, self.ENODENET, node,
+ _ErrorIf(test, constants.CV_ENODENET, node,
"node hasn't returned node master IP reachability data")
if not test:
if not nresult[constants.NV_MASTERIP]:
msg = "the master node cannot reach the master IP (not configured?)"
else:
msg = "cannot reach the master IP"
- _ErrorIf(True, self.ENODENET, node, msg)
+ _ErrorIf(True, constants.CV_ENODENET, node, msg)
+
+ def _VerifyInstancePolicy(self, instance):
+ """Verify instance specs against instance policy set on node group level.
+
+
+ """
+ cluster = self.cfg.GetClusterInfo()
+ full_beparams = cluster.FillBE(instance)
+ ipolicy = cluster.SimpleFillIPolicy(self.group_info.ipolicy)
+
+ mem_size = full_beparams.get(constants.BE_MAXMEM, None)
+ cpu_count = full_beparams.get(constants.BE_VCPUS, None)
+ disk_count = len(instance.disks)
+ disk_sizes = [disk.size for disk in instance.disks]
+ nic_count = len(instance.nics)
+
+ test_settings = [
+ (constants.ISPEC_MEM_SIZE, mem_size),
+ (constants.ISPEC_CPU_COUNT, cpu_count),
+ (constants.ISPEC_DISK_COUNT, disk_count),
+ (constants.ISPEC_NIC_COUNT, nic_count),
+ ] + map((lambda d: (constants.ISPEC_DISK_SIZE, d)), disk_sizes)
+
+ for (name, value) in test_settings:
+ test_result = _CheckMinMaxSpecs(name, ipolicy, value)
+ self._ErrorIf(test_result is not None,
+ constants.CV_EINSTANCEPOLICY, instance.name,
+ test_result)
def _VerifyInstance(self, instance, instanceconfig, node_image,
diskstatus):
node_vol_should = {}
instanceconfig.MapLVsByNode(node_vol_should)
+ self._VerifyInstancePolicy(instanceconfig)
+
for node in node_vol_should:
n_img = node_image[node]
if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
continue
for volume in node_vol_should[node]:
test = volume not in n_img.volumes
- _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
+ _ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
"volume %s missing on node %s", volume, node)
- if instanceconfig.admin_up:
+ if instanceconfig.admin_state == constants.ADMINST_UP:
pri_img = node_image[node_current]
test = instance not in pri_img.instances and not pri_img.offline
- _ErrorIf(test, self.EINSTANCEDOWN, instance,
+ _ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
"instance not running on its primary node %s",
node_current)
# node here
snode = node_image[nname]
bad_snode = snode.ghost or snode.offline
- _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
- self.EINSTANCEFAULTYDISK, instance,
+ _ErrorIf(instanceconfig.admin_state == constants.ADMINST_UP and
+ not success and not bad_snode,
+ constants.CV_EINSTANCEFAULTYDISK, instance,
"couldn't retrieve status for disk/%s on %s: %s",
idx, nname, bdev_status)
- _ErrorIf((instanceconfig.admin_up and success and
- bdev_status.ldisk_status == constants.LDS_FAULTY),
- self.EINSTANCEFAULTYDISK, instance,
+ _ErrorIf((instanceconfig.admin_state == constants.ADMINST_UP and
+ success and bdev_status.ldisk_status == constants.LDS_FAULTY),
+ constants.CV_EINSTANCEFAULTYDISK, instance,
"disk/%s on %s is faulty", idx, nname)
def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
test = ((node not in node_vol_should or
volume not in node_vol_should[node]) and
not reserved.Matches(volume))
- self._ErrorIf(test, self.ENODEORPHANLV, node,
+ self._ErrorIf(test, constants.CV_ENODEORPHANLV, node,
"volume %s is unknown", volume)
def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
# we already list instances living on such nodes, and that's
# enough warning
continue
+ #TODO(dynmem): use MINMEM for checking
+ #TODO(dynmem): also consider ballooning out other instances
for prinode, instances in n_img.sbp.items():
needed_mem = 0
for instance in instances:
bep = cluster_info.FillBE(instance_cfg[instance])
if bep[constants.BE_AUTO_BALANCE]:
- needed_mem += bep[constants.BE_MEMORY]
+ needed_mem += bep[constants.BE_MAXMEM]
test = n_img.mfree < needed_mem
- self._ErrorIf(test, self.ENODEN1, node,
+ self._ErrorIf(test, constants.CV_ENODEN1, node,
"not enough memory to accomodate instance failovers"
" should node %s fail (%dMiB needed, %dMiB available)",
prinode, needed_mem, n_img.mfree)
node_files = nresult.payload.get(constants.NV_FILELIST, None)
test = not (node_files and isinstance(node_files, dict))
- errorif(test, cls.ENODEFILECHECK, node.name,
+ errorif(test, constants.CV_ENODEFILECHECK, node.name,
"Node did not return file checksum data")
if test:
ignore_nodes.add(node.name)
if filename in files_opt:
# All or no nodes
errorif(missing_file and missing_file != expected_nodes,
- cls.ECLUSTERFILECHECK, None,
+ constants.CV_ECLUSTERFILECHECK, None,
"File %s is optional, but it must exist on all or no"
" nodes (not found on %s)",
filename, utils.CommaJoin(utils.NiceSort(missing_file)))
else:
- # Non-optional files
- errorif(missing_file, cls.ECLUSTERFILECHECK, None,
+ errorif(missing_file, constants.CV_ECLUSTERFILECHECK, None,
"File %s is missing from node(s) %s", filename,
utils.CommaJoin(utils.NiceSort(missing_file)))
# Warn if a node has a file it shouldn't
unexpected = with_file - expected_nodes
errorif(unexpected,
- cls.ECLUSTERFILECHECK, None,
+ constants.CV_ECLUSTERFILECHECK, None,
"File %s should not exist on node(s) %s",
filename, utils.CommaJoin(utils.NiceSort(unexpected)))
else:
variants = []
- errorif(test, cls.ECLUSTERFILECHECK, None,
+ errorif(test, constants.CV_ECLUSTERFILECHECK, None,
"File %s found with %s different checksums (%s)",
filename, len(checksums), "; ".join(variants))
if drbd_helper:
helper_result = nresult.get(constants.NV_DRBDHELPER, None)
test = (helper_result == None)
- _ErrorIf(test, self.ENODEDRBDHELPER, node,
+ _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
"no drbd usermode helper returned")
if helper_result:
status, payload = helper_result
test = not status
- _ErrorIf(test, self.ENODEDRBDHELPER, node,
+ _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
"drbd usermode helper check unsuccessful: %s", payload)
test = status and (payload != drbd_helper)
- _ErrorIf(test, self.ENODEDRBDHELPER, node,
+ _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
"wrong drbd usermode helper: %s", payload)
# compute the DRBD minors
node_drbd = {}
for minor, instance in drbd_map[node].items():
test = instance not in instanceinfo
- _ErrorIf(test, self.ECLUSTERCFG, None,
+ _ErrorIf(test, constants.CV_ECLUSTERCFG, None,
"ghost instance '%s' in temporary DRBD map", instance)
# ghost instance should not be running, but otherwise we
# don't give double warnings (both ghost instance and
node_drbd[minor] = (instance, False)
else:
instance = instanceinfo[instance]
- node_drbd[minor] = (instance.name, instance.admin_up)
+ node_drbd[minor] = (instance.name,
+ instance.admin_state == constants.ADMINST_UP)
# and now check them
used_minors = nresult.get(constants.NV_DRBDLIST, [])
test = not isinstance(used_minors, (tuple, list))
- _ErrorIf(test, self.ENODEDRBD, node,
+ _ErrorIf(test, constants.CV_ENODEDRBD, node,
"cannot parse drbd status file: %s", str(used_minors))
if test:
# we cannot check drbd status
for minor, (iname, must_exist) in node_drbd.items():
test = minor not in used_minors and must_exist
- _ErrorIf(test, self.ENODEDRBD, node,
+ _ErrorIf(test, constants.CV_ENODEDRBD, node,
"drbd minor %d of instance %s is not active", minor, iname)
for minor in used_minors:
test = minor not in node_drbd
- _ErrorIf(test, self.ENODEDRBD, node,
+ _ErrorIf(test, constants.CV_ENODEDRBD, node,
"unallocated drbd minor %d is in use", minor)
def _UpdateNodeOS(self, ninfo, nresult, nimg):
not compat.all(isinstance(v, list) and len(v) == 7
for v in remote_os))
- _ErrorIf(test, self.ENODEOS, node,
+ _ErrorIf(test, constants.CV_ENODEOS, node,
"node hasn't returned valid OS data")
nimg.os_fail = test
for os_name, os_data in nimg.oslist.items():
assert os_data, "Empty OS status for OS %s?!" % os_name
f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
- _ErrorIf(not f_status, self.ENODEOS, node,
+ _ErrorIf(not f_status, constants.CV_ENODEOS, node,
"Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
- _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
+ _ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, node,
"OS '%s' has multiple entries (first one shadows the rest): %s",
os_name, utils.CommaJoin([v[0] for v in os_data]))
# comparisons with the 'base' image
test = os_name not in base.oslist
- _ErrorIf(test, self.ENODEOS, node,
+ _ErrorIf(test, constants.CV_ENODEOS, node,
"Extra OS %s not present on reference node (%s)",
os_name, base.name)
if test:
("variants list", f_var, b_var),
("parameters", beautify_params(f_param),
beautify_params(b_param))]:
- _ErrorIf(a != b, self.ENODEOS, node,
+ _ErrorIf(a != b, constants.CV_ENODEOS, node,
"OS %s for %s differs from reference node %s: [%s] vs. [%s]",
kind, os_name, base.name,
utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
# check any missing OSes
missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
- _ErrorIf(missing, self.ENODEOS, node,
+ _ErrorIf(missing, constants.CV_ENODEOS, node,
"OSes present on reference node %s but missing on this node: %s",
base.name, utils.CommaJoin(missing))
if ((ninfo.master_candidate or ninfo.master_capable) and
constants.NV_OOB_PATHS in nresult):
for path_result in nresult[constants.NV_OOB_PATHS]:
- self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
+ self._ErrorIf(path_result, constants.CV_ENODEOOBPATH, node, path_result)
def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
"""Verifies and updates the node volume data.
if vg_name is None:
pass
elif isinstance(lvdata, basestring):
- _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
+ _ErrorIf(True, constants.CV_ENODELVM, node, "LVM problem on node: %s",
utils.SafeEncode(lvdata))
elif not isinstance(lvdata, dict):
- _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
+ _ErrorIf(True, constants.CV_ENODELVM, node,
+ "rpc call to node failed (lvlist)")
else:
nimg.volumes = lvdata
nimg.lvm_fail = False
"""
idata = nresult.get(constants.NV_INSTANCELIST, None)
test = not isinstance(idata, list)
- self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
- " (instancelist): %s", utils.SafeEncode(str(idata)))
+ self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
+ "rpc call to node failed (instancelist): %s",
+ utils.SafeEncode(str(idata)))
if test:
nimg.hyp_fail = True
else:
# try to read free memory (from the hypervisor)
hv_info = nresult.get(constants.NV_HVINFO, None)
test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
- _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
+ _ErrorIf(test, constants.CV_ENODEHV, node,
+ "rpc call to node failed (hvinfo)")
if not test:
try:
nimg.mfree = int(hv_info["memory_free"])
except (ValueError, TypeError):
- _ErrorIf(True, self.ENODERPC, node,
+ _ErrorIf(True, constants.CV_ENODERPC, node,
"node returned invalid nodeinfo, check hypervisor")
# FIXME: devise a free space model for file based instances as well
if vg_name is not None:
test = (constants.NV_VGLIST not in nresult or
vg_name not in nresult[constants.NV_VGLIST])
- _ErrorIf(test, self.ENODELVM, node,
+ _ErrorIf(test, constants.CV_ENODELVM, node,
"node didn't return data for the volume group '%s'"
" - it is either missing or broken", vg_name)
if not test:
try:
nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
except (ValueError, TypeError):
- _ErrorIf(True, self.ENODERPC, node,
+ _ErrorIf(True, constants.CV_ENODERPC, node,
"node returned invalid LVM info, check LVM status")
def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
data = len(disks) * [(False, "node offline")]
else:
msg = nres.fail_msg
- _ErrorIf(msg, self.ENODERPC, nname,
+ _ErrorIf(msg, constants.CV_ENODERPC, nname,
"while getting disk information: %s", msg)
if msg:
# No data from this node
i_non_redundant = [] # Non redundant instances
i_non_a_balanced = [] # Non auto-balanced instances
+ i_offline = 0 # Count of offline instances
n_offline = 0 # Count of offline nodes
n_drained = 0 # Count of nodes being drained
node_vol_should = {}
feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
+ user_scripts = []
+ if self.cfg.GetUseExternalMipScript():
+ user_scripts.append(constants.EXTERNAL_MASTER_SETUP_SCRIPT)
+
node_verify_param = {
constants.NV_FILELIST:
utils.UniqueSequence(filename
constants.NV_MASTERIP: (master_node, master_ip),
constants.NV_OSLIST: None,
constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
+ constants.NV_USERSCRIPTS: user_scripts,
}
if vg_name is not None:
feedback_fn("* Verifying node %s (%s)" % (node, ntype))
msg = all_nvinfo[node].fail_msg
- _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
+ _ErrorIf(msg, constants.CV_ENODERPC, node, "while contacting node: %s",
+ msg)
if msg:
nimg.rpc_fail = True
continue
nimg.call_ok = self._VerifyNode(node_i, nresult)
self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
self._VerifyNodeNetwork(node_i, nresult)
+ self._VerifyNodeUserScripts(node_i, nresult)
self._VerifyOob(node_i, nresult)
if nimg.vm_capable:
non_primary_inst = set(nimg.instances).difference(nimg.pinst)
for inst in non_primary_inst:
+ # FIXME: investigate best way to handle offline insts
+ if inst.admin_state == constants.ADMINST_OFFLINE:
+ if verbose:
+ feedback_fn("* Skipping offline instance %s" % inst.name)
+ i_offline += 1
+ continue
test = inst in self.all_inst_info
- _ErrorIf(test, self.EINSTANCEWRONGNODE, inst,
+ _ErrorIf(test, constants.CV_EINSTANCEWRONGNODE, inst,
"instance should not run on node %s", node_i.name)
- _ErrorIf(not test, self.ENODEORPHANINSTANCE, node_i.name,
+ _ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
"node is running unknown instance %s", inst)
for node, result in extra_lv_nvinfo.items():
pnode = inst_config.primary_node
pnode_img = node_image[pnode]
_ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
- self.ENODERPC, pnode, "instance %s, connection to"
+ constants.CV_ENODERPC, pnode, "instance %s, connection to"
" primary node failed", instance)
- _ErrorIf(inst_config.admin_up and pnode_img.offline,
- self.EINSTANCEBADNODE, instance,
+ _ErrorIf(inst_config.admin_state == constants.ADMINST_UP and
+ pnode_img.offline,
+ constants.CV_EINSTANCEBADNODE, instance,
"instance is marked as running and lives on offline node %s",
inst_config.primary_node)
if not inst_config.secondary_nodes:
i_non_redundant.append(instance)
- _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
+ _ErrorIf(len(inst_config.secondary_nodes) > 1,
+ constants.CV_EINSTANCELAYOUT,
instance, "instance has multiple secondary nodes: %s",
utils.CommaJoin(inst_config.secondary_nodes),
code=self.ETYPE_WARNING)
key=lambda (_, nodes): pnode in nodes,
reverse=True)]
- self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
+ self._ErrorIf(len(instance_groups) > 1,
+ constants.CV_EINSTANCESPLITGROUPS,
instance, "instance has primary and secondary nodes in"
" different groups: %s", utils.CommaJoin(pretty_list),
code=self.ETYPE_WARNING)
for snode in inst_config.secondary_nodes:
s_img = node_image[snode]
- _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
- "instance %s, connection to secondary node failed", instance)
+ _ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
+ snode, "instance %s, connection to secondary node failed",
+ instance)
if s_img.offline:
inst_nodes_offline.append(snode)
# warn that the instance lives on offline nodes
- _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
+ _ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE, instance,
"instance has offline secondary node(s) %s",
utils.CommaJoin(inst_nodes_offline))
# ... or ghost/non-vm_capable nodes
for node in inst_config.all_nodes:
- _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
- "instance lives on ghost node %s", node)
- _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
+ _ErrorIf(node_image[node].ghost, constants.CV_EINSTANCEBADNODE,
+ instance, "instance lives on ghost node %s", node)
+ _ErrorIf(not node_image[node].vm_capable, constants.CV_EINSTANCEBADNODE,
instance, "instance lives on non-vm_capable node %s", node)
feedback_fn("* Verifying orphan volumes")
feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
% len(i_non_a_balanced))
+ if i_offline:
+ feedback_fn(" - NOTICE: %d offline instance(s) found." % i_offline)
+
if n_offline:
feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
res = hooks_results[node_name]
msg = res.fail_msg
test = msg and not res.offline
- self._ErrorIf(test, self.ENODEHOOKS, node_name,
+ self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
"Communication failure in hooks execution: %s", msg)
if res.offline or msg:
# No need to investigate payload if node is offline or gave
continue
for script, hkr, output in res.payload:
test = hkr == constants.HKR_FAIL
- self._ErrorIf(test, self.ENODEHOOKS, node_name,
+ self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
"Script %s failed, output:", script)
if test:
output = self._HOOKS_INDENT_RE.sub(" ", output)
res_missing = {}
nv_dict = _MapInstanceDisksToNodes([inst
- for inst in self.instances.values()
- if inst.admin_up])
+ for inst in self.instances.values()
+ if inst.admin_state == constants.ADMINST_UP])
if nv_dict:
nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
if self.op.instances:
self.wanted_names = _GetWantedInstances(self, self.op.instances)
self.needed_locks = {
- locking.LEVEL_NODE: [],
+ locking.LEVEL_NODE_RES: [],
locking.LEVEL_INSTANCE: self.wanted_names,
}
- self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+ self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
else:
self.wanted_names = None
self.needed_locks = {
- locking.LEVEL_NODE: locking.ALL_SET,
+ locking.LEVEL_NODE_RES: locking.ALL_SET,
locking.LEVEL_INSTANCE: locking.ALL_SET,
}
self.share_locks = {
- locking.LEVEL_NODE: 1,
+ locking.LEVEL_NODE_RES: 1,
locking.LEVEL_INSTANCE: 0,
}
def DeclareLocks(self, level):
- if level == locking.LEVEL_NODE and self.wanted_names is not None:
- self._LockInstancesNodes(primary_only=True)
+ if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
+ self._LockInstancesNodes(primary_only=True, level=level)
def CheckPrereq(self):
"""Check prerequisites.
for idx, disk in enumerate(instance.disks):
per_node_disks[pnode].append((instance, idx, disk))
+ assert not (frozenset(per_node_disks.keys()) -
+ self.owned_locks(locking.LEVEL_NODE_RES)), \
+ "Not owning correct locks"
+ assert not self.owned_locks(locking.LEVEL_NODE)
+
changed = []
for node, dskl in per_node_disks.items():
newl = [v[2].Copy() for v in dskl]
"""
clustername = self.op.name
- ip = self.ip
+ new_ip = self.ip
# shutdown the master IP
- master = self.cfg.GetMasterNode()
- result = self.rpc.call_node_deactivate_master_ip(master)
+ master_params = self.cfg.GetMasterNetworkParameters()
+ ems = self.cfg.GetUseExternalMipScript()
+ result = self.rpc.call_node_deactivate_master_ip(master_params.name,
+ master_params, ems)
result.Raise("Could not disable the master role")
try:
cluster = self.cfg.GetClusterInfo()
cluster.cluster_name = clustername
- cluster.master_ip = ip
+ cluster.master_ip = new_ip
self.cfg.Update(cluster, feedback_fn)
# update the known hosts file
ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
node_list = self.cfg.GetOnlineNodeList()
try:
- node_list.remove(master)
+ node_list.remove(master_params.name)
except ValueError:
pass
_UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
finally:
- result = self.rpc.call_node_activate_master_ip(master)
+ master_params.ip = new_ip
+ result = self.rpc.call_node_activate_master_ip(master_params.name,
+ master_params, ems)
msg = result.fail_msg
if msg:
self.LogWarning("Could not re-enable the master role on"
return clustername
+def _ValidateNetmask(cfg, netmask):
+ """Checks if a netmask is valid.
+
+ @type cfg: L{config.ConfigWriter}
+ @param cfg: The cluster configuration
+ @type netmask: int
+ @param netmask: the netmask to be verified
+ @raise errors.OpPrereqError: if the validation fails
+
+ """
+ ip_family = cfg.GetPrimaryIPFamily()
+ try:
+ ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family)
+ except errors.ProgrammerError:
+ raise errors.OpPrereqError("Invalid primary ip family: %s." %
+ ip_family)
+ if not ipcls.ValidateNetmask(netmask):
+ raise errors.OpPrereqError("CIDR netmask (%s) not valid" %
+ (netmask))
+
+
class LUClusterSetParams(LogicalUnit):
"""Change the parameters of the cluster.
if self.op.remove_uids:
uidpool.CheckUidPool(self.op.remove_uids)
+ if self.op.master_netmask is not None:
+ _ValidateNetmask(self.cfg, self.op.master_netmask)
+
+ if self.op.diskparams:
+ for dt_params in self.op.diskparams.values():
+ utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
+
def ExpandNames(self):
# FIXME: in the future maybe other cluster params won't require checking on
# all nodes to be modified.
self.cluster = cluster = self.cfg.GetClusterInfo()
# validate params changes
if self.op.beparams:
+ objects.UpgradeBeParams(self.op.beparams)
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
self.new_ndparams["oob_program"] = \
constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
+ if self.op.hv_state:
+ new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
+ self.cluster.hv_state_static)
+ self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
+ for hv, values in new_hv_state.items())
+
+ if self.op.disk_state:
+ new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
+ self.cluster.disk_state_static)
+ self.new_disk_state = \
+ dict((storage, dict((name, cluster.SimpleFillDiskState(values))
+ for name, values in svalues.items()))
+ for storage, svalues in new_disk_state.items())
+
+ if self.op.ipolicy:
+ ipolicy = {}
+ for key, value in self.op.ipolicy.items():
+ utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES)
+ ipolicy[key] = _GetUpdatedParams(cluster.ipolicy.get(key, {}),
+ value)
+ objects.InstancePolicy.CheckParameterSyntax(ipolicy)
+ self.new_ipolicy = ipolicy
+
if self.op.nicparams:
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
else:
self.new_hvparams[hv_name].update(hv_dict)
+ # disk template parameters
+ self.new_diskparams = objects.FillDict(cluster.diskparams, {})
+ if self.op.diskparams:
+ for dt_name, dt_params in self.op.diskparams.items():
+ if dt_name not in self.op.diskparams:
+ self.new_diskparams[dt_name] = dt_params
+ else:
+ self.new_diskparams[dt_name].update(dt_params)
+
# os hypervisor parameters
self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
if self.op.os_hvp:
self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
if self.op.nicparams:
self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
+ if self.op.ipolicy:
+ self.cluster.ipolicy = self.new_ipolicy
if self.op.osparams:
self.cluster.osparams = self.new_osp
if self.op.ndparams:
self.cluster.ndparams = self.new_ndparams
+ if self.op.diskparams:
+ self.cluster.diskparams = self.new_diskparams
+ if self.op.hv_state:
+ self.cluster.hv_state_static = self.new_hv_state
+ if self.op.disk_state:
+ self.cluster.disk_state_static = self.new_disk_state
if self.op.candidate_pool_size is not None:
self.cluster.candidate_pool_size = self.op.candidate_pool_size
_AdjustCandidatePool(self, [])
if self.op.maintain_node_health is not None:
+ if self.op.maintain_node_health and not constants.ENABLE_CONFD:
+ feedback_fn("Note: CONFD was disabled at build time, node health"
+ " maintenance is not useful (still enabling it)")
self.cluster.maintain_node_health = self.op.maintain_node_health
if self.op.prealloc_wipe_disks is not None:
if self.op.reserved_lvs is not None:
self.cluster.reserved_lvs = self.op.reserved_lvs
+ if self.op.use_external_mip_script is not None:
+ self.cluster.use_external_mip_script = self.op.use_external_mip_script
+
def helper_os(aname, mods, desc):
desc += " OS list"
lst = getattr(self.cluster, aname)
helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
if self.op.master_netdev:
- master = self.cfg.GetMasterNode()
+ master_params = self.cfg.GetMasterNetworkParameters()
+ ems = self.cfg.GetUseExternalMipScript()
feedback_fn("Shutting down master ip on the current netdev (%s)" %
self.cluster.master_netdev)
- result = self.rpc.call_node_deactivate_master_ip(master)
+ result = self.rpc.call_node_deactivate_master_ip(master_params.name,
+ master_params, ems)
result.Raise("Could not disable the master ip")
feedback_fn("Changing master_netdev from %s to %s" %
- (self.cluster.master_netdev, self.op.master_netdev))
+ (master_params.netdev, self.op.master_netdev))
self.cluster.master_netdev = self.op.master_netdev
+ if self.op.master_netmask:
+ master_params = self.cfg.GetMasterNetworkParameters()
+ feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
+ result = self.rpc.call_node_change_master_netmask(master_params.name,
+ master_params.netmask,
+ self.op.master_netmask,
+ master_params.ip,
+ master_params.netdev)
+ if result.fail_msg:
+ msg = "Could not change the master IP netmask: %s" % result.fail_msg
+ feedback_fn(msg)
+
+ self.cluster.master_netmask = self.op.master_netmask
+
self.cfg.Update(self.cluster, feedback_fn)
if self.op.master_netdev:
+ master_params = self.cfg.GetMasterNetworkParameters()
feedback_fn("Starting the master ip on the new master netdev (%s)" %
self.op.master_netdev)
- result = self.rpc.call_node_activate_master_ip(master)
+ ems = self.cfg.GetUseExternalMipScript()
+ result = self.rpc.call_node_activate_master_ip(master_params.name,
+ master_params, ems)
if result.fail_msg:
self.LogWarning("Could not re-enable the master ip on"
" the master, please restart manually: %s",
constants.SSH_KNOWN_HOSTS_FILE,
constants.CONFD_HMAC_KEY,
constants.CLUSTER_DOMAIN_SECRET_FILE,
+ constants.SPICE_CERT_FILE,
+ constants.SPICE_CACERT_FILE,
constants.RAPI_USERS_FILE,
])
# Files which should only be on master candidates
files_mc = set()
+
if not redist:
files_mc.add(constants.CLUSTER_CONF_FILE)
+ # FIXME: this should also be replicated but Ganeti doesn't support files_mc
+ # replication
+ files_mc.add(constants.DEFAULT_MASTER_SETUP_SCRIPT)
+
# Files which should only be on VM-capable nodes
files_vm = set(filename
for hv_name in cluster.enabled_hypervisors
"""Activate the master IP.
"""
- master = self.cfg.GetMasterNode()
- result = self.rpc.call_node_activate_master_ip(master)
+ master_params = self.cfg.GetMasterNetworkParameters()
+ ems = self.cfg.GetUseExternalMipScript()
+ result = self.rpc.call_node_activate_master_ip(master_params.name,
+ master_params, ems)
result.Raise("Could not activate the master IP")
"""Deactivate the master IP.
"""
- master = self.cfg.GetMasterNode()
- result = self.rpc.call_node_deactivate_master_ip(master)
+ master_params = self.cfg.GetMasterNetworkParameters()
+ ems = self.cfg.GetUseExternalMipScript()
+ result = self.rpc.call_node_deactivate_master_ip(master_params.name,
+ master_params, ems)
result.Raise("Could not deactivate the master IP")
modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
+ assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
+ "Not owning BGL"
+
# Promote nodes to master candidate as needed
_AdjustCandidatePool(self, exceptions=[node.name])
self.context.RemoveNode(node.name)
# filter out non-vm_capable nodes
toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
- node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
- lu.cfg.GetHypervisorType())
- live_data = dict((name, nresult.payload)
+ node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
+ [lu.cfg.GetHypervisorType()])
+ live_data = dict((name, _MakeLegacyNodeInfo(nresult.payload))
for (name, nresult) in node_data.items()
if not nresult.fail_msg and nresult.payload)
else:
def ExpandNames(self):
self.nq.ExpandNames(self)
+ def DeclareLocks(self, level):
+ self.nq.DeclareLocks(self, level)
+
def Exec(self, feedback_fn):
return self.nq.OldStyleQuery(self)
selected=self.op.output_fields)
def ExpandNames(self):
+ self.share_locks = _ShareAll()
self.needed_locks = {}
- self.share_locks[locking.LEVEL_NODE] = 1
+
if not self.op.nodes:
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
else:
selected=self.op.output_fields)
def ExpandNames(self):
+ self.share_locks = _ShareAll()
self.needed_locks = {}
- self.share_locks[locking.LEVEL_NODE] = 1
if self.op.nodes:
self.needed_locks[locking.LEVEL_NODE] = \
def CheckArguments(self):
qcls = _GetQueryImplementation(self.op.what)
- self.impl = qcls(self.op.filter, self.op.fields, self.op.use_locking)
+ self.impl = qcls(self.op.qfilter, self.op.fields, self.op.use_locking)
def ExpandNames(self):
self.impl.ExpandNames(self)
if self.op.ndparams:
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+ if self.op.hv_state:
+ self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
+
+ if self.op.disk_state:
+ self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
+
def Exec(self, feedback_fn):
"""Adds the new node to the cluster.
new_node = self.new_node
node = new_node.name
+ assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER), \
+ "Not owning BGL"
+
# We adding a new node so we assume it's powered
new_node.powered = True
else:
new_node.ndparams = {}
+ if self.op.hv_state:
+ new_node.hv_state_static = self.new_hv_state
+
+ if self.op.disk_state:
+ new_node.disk_state_static = self.new_disk_state
+
# check connectivity
result = self.rpc.call_version([node])[node]
result.Raise("Can't get version information from node %s" % node)
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
self.op.master_capable, self.op.vm_capable,
- self.op.secondary_ip, self.op.ndparams]
+ self.op.secondary_ip, self.op.ndparams, self.op.hv_state,
+ self.op.disk_state]
if all_mods.count(None) == len(all_mods):
raise errors.OpPrereqError("Please pass at least one modification",
errors.ECODE_INVAL)
self.lock_all = self.op.auto_promote and self.might_demote
self.lock_instances = self.op.secondary_ip is not None
+ def _InstanceFilter(self, instance):
+ """Filter for getting affected instances.
+
+ """
+ return (instance.disk_template in constants.DTS_INT_MIRROR and
+ self.op.node_name in instance.all_nodes)
+
def ExpandNames(self):
if self.lock_all:
self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
else:
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
- if self.lock_instances:
- self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
+ # Since modifying a node can have severe effects on currently running
+ # operations the resource lock is at least acquired in shared mode
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.needed_locks[locking.LEVEL_NODE]
- def DeclareLocks(self, level):
- # If we have locked all instances, before waiting to lock nodes, release
- # all the ones living on nodes unrelated to the current operation.
- if level == locking.LEVEL_NODE and self.lock_instances:
- self.affected_instances = []
- if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
- instances_keep = []
-
- # Build list of instances to release
- locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
- for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i):
- if (instance.disk_template in constants.DTS_INT_MIRROR and
- self.op.node_name in instance.all_nodes):
- instances_keep.append(instance_name)
- self.affected_instances.append(instance)
-
- _ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
-
- assert (set(self.owned_locks(locking.LEVEL_INSTANCE)) ==
- set(instances_keep))
+ # Get node resource and instance locks in shared mode; they are not used
+ # for anything but read-only access
+ self.share_locks[locking.LEVEL_NODE_RES] = 1
+ self.share_locks[locking.LEVEL_INSTANCE] = 1
+
+ if self.lock_instances:
+ self.needed_locks[locking.LEVEL_INSTANCE] = \
+ frozenset(self.cfg.GetInstancesInfoByFilter(self._InstanceFilter))
def BuildHooksEnv(self):
"""Build hooks env.
"""
node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
+ if self.lock_instances:
+ affected_instances = \
+ self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)
+
+ # Verify instance locks
+ owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
+ wanted_instances = frozenset(affected_instances.keys())
+ if wanted_instances - owned_instances:
+ raise errors.OpPrereqError("Instances affected by changing node %s's"
+ " secondary IP address have changed since"
+ " locks were acquired, wanted '%s', have"
+ " '%s'; retry the operation" %
+ (self.op.node_name,
+ utils.CommaJoin(wanted_instances),
+ utils.CommaJoin(owned_instances)),
+ errors.ECODE_STATE)
+ else:
+ affected_instances = None
+
if (self.op.master_candidate is not None or
self.op.drained is not None or
self.op.offline is not None):
if old_role == self._ROLE_OFFLINE and new_role != old_role:
# Trying to transition out of offline status
- result = self.rpc.call_version([node.name])[node.name]
+ # TODO: Use standard RPC runner, but make sure it works when the node is
+ # still marked offline
+ result = rpc.BootstrapRunner().call_version([node.name])[node.name]
if result.fail_msg:
raise errors.OpPrereqError("Node %s is being de-offlined but fails"
" to report its version: %s" %
raise errors.OpPrereqError("Cannot change the secondary ip on a single"
" homed cluster", errors.ECODE_INVAL)
+ assert not (frozenset(affected_instances) -
+ self.owned_locks(locking.LEVEL_INSTANCE))
+
if node.offline:
- if self.affected_instances:
- raise errors.OpPrereqError("Cannot change secondary ip: offline"
- " node has instances (%s) configured"
- " to use it" % self.affected_instances)
+ if affected_instances:
+ raise errors.OpPrereqError("Cannot change secondary IP address:"
+ " offline node has instances (%s)"
+ " configured to use it" %
+ utils.CommaJoin(affected_instances.keys()))
else:
# On online nodes, check that no instances are running, and that
# the node has the new ip and we can reach it.
- for instance in self.affected_instances:
- _CheckInstanceDown(self, instance, "cannot change secondary ip")
+ for instance in affected_instances.values():
+ _CheckInstanceState(self, instance, INSTANCE_DOWN,
+ msg="cannot change secondary ip")
_CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
if master.name != node.name:
utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
self.new_ndparams = new_ndparams
+ if self.op.hv_state:
+ self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
+ self.node.hv_state_static)
+
+ if self.op.disk_state:
+ self.new_disk_state = \
+ _MergeAndVerifyDiskState(self.op.disk_state,
+ self.node.disk_state_static)
+
def Exec(self, feedback_fn):
"""Modifies a node.
if self.op.powered is not None:
node.powered = self.op.powered
+ if self.op.hv_state:
+ node.hv_state_static = self.new_hv_state
+
+ if self.op.disk_state:
+ node.disk_state_static = self.new_disk_state
+
for attr in ["master_capable", "vm_capable"]:
val = getattr(self.op, attr)
if val is not None:
"architecture": (platform.architecture()[0], platform.machine()),
"name": cluster.cluster_name,
"master": cluster.master_node,
- "default_hypervisor": cluster.enabled_hypervisors[0],
+ "default_hypervisor": cluster.primary_hypervisor,
"enabled_hypervisors": cluster.enabled_hypervisors,
"hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
for hypervisor_name in cluster.enabled_hypervisors]),
"os_hvp": os_hvp,
"beparams": cluster.beparams,
"osparams": cluster.osparams,
+ "ipolicy": cluster.ipolicy,
"nicparams": cluster.nicparams,
"ndparams": cluster.ndparams,
"candidate_pool_size": cluster.candidate_pool_size,
"master_netdev": cluster.master_netdev,
+ "master_netmask": cluster.master_netmask,
+ "use_external_mip_script": cluster.use_external_mip_script,
"volume_group_name": cluster.volume_group_name,
"drbd_usermode_helper": cluster.drbd_usermode_helper,
"file_storage_dir": cluster.file_storage_dir,
_ShutdownInstanceDisks.
"""
- _CheckInstanceDown(lu, instance, "cannot shutdown disks")
+ _CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
_ShutdownInstanceDisks(lu, instance, disks=disks)
we cannot check the node
"""
- nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
+ nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
nodeinfo[node].Raise("Can't get data from node %s" % node,
prereq=True, ecode=errors.ECODE_ENVIRON)
- free_mem = nodeinfo[node].payload.get("memory_free", None)
+ (_, _, (hv_info, )) = nodeinfo[node].payload
+
+ free_mem = hv_info.get("memory_free", None)
if not isinstance(free_mem, int):
raise errors.OpPrereqError("Can't compute free memory on node %s, result"
" was '%s'" % (node, free_mem),
or we cannot check the node
"""
- nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
+ nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
for node in nodenames:
info = nodeinfo[node]
info.Raise("Cannot get current information from node %s" % node,
prereq=True, ecode=errors.ECODE_ENVIRON)
- vg_free = info.payload.get("vg_free", None)
+ (_, (vg_info, ), _) = info.payload
+ vg_free = vg_info.get("vg_free", None)
if not isinstance(vg_free, int):
raise errors.OpPrereqError("Can't compute free disk space on node"
" %s for vg %s, result was '%s'" %
errors.ECODE_NORES)
+def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
+ """Checks if nodes have enough physical CPUs
+
+ This function checks if all given nodes have the needed number of
+ physical CPUs. In case any node has less CPUs or we cannot get the
+ information from the node, this function raises an OpPrereqError
+ exception.
+
+ @type lu: C{LogicalUnit}
+ @param lu: a logical unit from which we get configuration data
+ @type nodenames: C{list}
+ @param nodenames: the list of node names to check
+ @type requested: C{int}
+ @param requested: the minimum acceptable number of physical CPUs
+ @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
+ or we cannot check the node
+
+ """
+ nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
+ for node in nodenames:
+ info = nodeinfo[node]
+ info.Raise("Cannot get current information from node %s" % node,
+ prereq=True, ecode=errors.ECODE_ENVIRON)
+ (_, _, (hv_info, )) = info.payload
+ num_cpus = hv_info.get("cpu_total", None)
+ if not isinstance(num_cpus, int):
+ raise errors.OpPrereqError("Can't compute the number of physical CPUs"
+ " on node %s, result was '%s'" %
+ (node, num_cpus), errors.ECODE_ENVIRON)
+ if requested > num_cpus:
+ raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
+ "required" % (node, num_cpus, requested),
+ errors.ECODE_NORES)
+
+
class LUInstanceStartup(LogicalUnit):
"""Starts an instance.
# extra beparams
if self.op.beparams:
# fill the beparams dict
+ objects.UpgradeBeParams(self.op.beparams)
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
def ExpandNames(self):
hv_type.CheckParameterSyntax(filled_hvp)
_CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
+ _CheckInstanceState(self, instance, INSTANCE_ONLINE)
+
self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
if self.primary_offline and self.op.ignore_offline_nodes:
if not remote_info.payload: # not running already
_CheckNodeFreeMemory(self, instance.primary_node,
"starting instance %s" % instance.name,
- bep[constants.BE_MEMORY], instance.hypervisor)
+ bep[constants.BE_MAXMEM], instance.hypervisor)
def Exec(self, feedback_fn):
"""Start the instance.
_StartInstanceDisks(self, instance, force)
- result = self.rpc.call_instance_start(node_current, instance,
- self.op.hvparams, self.op.beparams,
- self.op.startup_paused)
+ result = \
+ self.rpc.call_instance_start(node_current,
+ (instance, self.op.hvparams,
+ self.op.beparams),
+ self.op.startup_paused)
msg = result.fail_msg
if msg:
_ShutdownInstanceDisks(self, instance)
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
-
+ _CheckInstanceState(self, instance, INSTANCE_ONLINE)
_CheckNodeOnline(self, instance.primary_node)
# check bridges existence
self.LogInfo("Instance %s was already stopped, starting now",
instance.name)
_StartInstanceDisks(self, instance, ignore_secondaries)
- result = self.rpc.call_instance_start(node_current, instance,
- None, None, False)
+ result = self.rpc.call_instance_start(node_current,
+ (instance, None, None), False)
msg = result.fail_msg
if msg:
_ShutdownInstanceDisks(self, instance)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
+ _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
+
self.primary_offline = \
self.cfg.GetNodeInfo(self.instance.primary_node).offline
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name,
errors.ECODE_INVAL)
- _CheckInstanceDown(self, instance, "cannot reinstall")
+ _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
if self.op.os_type is not None:
# OS verification
try:
feedback_fn("Running the instance OS create scripts...")
# FIXME: pass debug option from opcode to backend
- result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
- self.op.debug_level,
- osparams=self.os_inst)
+ result = self.rpc.call_instance_os_add(inst.primary_node,
+ (inst, self.os_inst), True,
+ self.op.debug_level)
result.Raise("Could not install OS for instance %s on node %s" %
(inst.name, inst.primary_node))
finally:
# otherwise we need to lock all nodes for disk re-creation
primary_only = bool(self.op.nodes)
self._LockInstancesNodes(primary_only=primary_only)
+ elif level == locking.LEVEL_NODE_RES:
+ # Copy node locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.needed_locks[locking.LEVEL_NODE][:]
def BuildHooksEnv(self):
"""Build hooks env.
self.op.instance_name, errors.ECODE_INVAL)
# if we replace nodes *and* the old primary is offline, we don't
# check
- assert instance.primary_node in self.needed_locks[locking.LEVEL_NODE]
+ assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE)
+ assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE_RES)
old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
if not (self.op.nodes and old_pnode.offline):
- _CheckInstanceDown(self, instance, "cannot recreate disks")
+ _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
+ msg="cannot recreate disks")
if not self.op.disks:
self.op.disks = range(len(instance.disks))
"""
instance = self.instance
+ assert (self.owned_locks(locking.LEVEL_NODE) ==
+ self.owned_locks(locking.LEVEL_NODE_RES))
+
to_skip = []
mods = [] # keeps track of needed logical_id changes
instance = self.cfg.GetInstanceInfo(self.op.instance_name)
assert instance is not None
_CheckNodeOnline(self, instance.primary_node)
- _CheckInstanceDown(self, instance, "cannot rename")
+ _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
+ msg="cannot rename")
self.instance = instance
new_name = self.op.new_name
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
+ self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
+ elif level == locking.LEVEL_NODE_RES:
+ # Copy node locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.needed_locks[locking.LEVEL_NODE][:]
def BuildHooksEnv(self):
"""Build hooks env.
" node %s: %s" %
(instance.name, instance.primary_node, msg))
+ assert (self.owned_locks(locking.LEVEL_NODE) ==
+ self.owned_locks(locking.LEVEL_NODE_RES))
+ assert not (set(instance.all_nodes) -
+ self.owned_locks(locking.LEVEL_NODE)), \
+ "Not owning correct locks"
+
_RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
target_node = _ExpandNodeName(self.cfg, self.op.target_node)
self.op.target_node = target_node
self.needed_locks[locking.LEVEL_NODE] = [target_node]
+ self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes(primary_only=True)
+ elif level == locking.LEVEL_NODE_RES:
+ # Copy node locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.needed_locks[locking.LEVEL_NODE][:]
def BuildHooksEnv(self):
"""Build hooks env.
_CheckNodeNotDrained(self, target_node)
_CheckNodeVmCapable(self, target_node)
- if instance.admin_up:
+ if instance.admin_state == constants.ADMINST_UP:
# check memory requirements on the secondary node
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
- instance.name, bep[constants.BE_MEMORY],
+ instance.name, bep[constants.BE_MAXMEM],
instance.hypervisor)
else:
self.LogInfo("Not checking memory on the secondary node as"
self.LogInfo("Shutting down instance %s on source node %s",
instance.name, source_node)
+ assert (self.owned_locks(locking.LEVEL_NODE) ==
+ self.owned_locks(locking.LEVEL_NODE_RES))
+
result = self.rpc.call_instance_shutdown(source_node, instance,
self.op.shutdown_timeout)
msg = result.fail_msg
_RemoveDisks(self, instance, target_node=source_node)
# Only start the instance if it's marked as up
- if instance.admin_up:
+ if instance.admin_state == constants.ADMINST_UP:
self.LogInfo("Starting instance %s on node %s",
instance.name, target_node)
_ShutdownInstanceDisks(self, instance)
raise errors.OpExecError("Can't activate the instance's disks")
- result = self.rpc.call_instance_start(target_node, instance,
- None, None, False)
+ result = self.rpc.call_instance_start(target_node,
+ (instance, None, None), False)
msg = result.fail_msg
if msg:
_ShutdownInstanceDisks(self, instance)
@ivar shutdown_timeout: In case of failover timeout of the shutdown
"""
+
+ # Constants
+ _MIGRATION_POLL_INTERVAL = 1 # seconds
+ _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
+
def __init__(self, lu, instance_name, cleanup=False,
failover=False, fallback=False,
ignore_consistency=False,
assert instance is not None
self.instance = instance
- if (not self.cleanup and not instance.admin_up and not self.failover and
- self.fallback):
- self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
- " to failover")
+ if (not self.cleanup and
+ not instance.admin_state == constants.ADMINST_UP and
+ not self.failover and self.fallback):
+ self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
+ " switching to failover")
self.failover = True
if instance.disk_template not in constants.DTS_MIRRORED:
i_be = self.cfg.GetClusterInfo().FillBE(instance)
# check memory requirements on the secondary node
- if not self.failover or instance.admin_up:
+ if not self.failover or instance.admin_state == constants.ADMINST_UP:
_CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
- instance.name, i_be[constants.BE_MEMORY],
+ instance.name, i_be[constants.BE_MAXMEM],
instance.hypervisor)
else:
self.lu.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
+ # check if failover must be forced instead of migration
+ if (not self.cleanup and not self.failover and
+ i_be[constants.BE_ALWAYS_FAILOVER]):
+ if self.fallback:
+ self.lu.LogInfo("Instance configured to always failover; fallback"
+ " to failover")
+ self.failover = True
+ else:
+ raise errors.OpPrereqError("This instance has been configured to"
+ " always failover, please allow failover",
+ errors.ECODE_STATE)
+
# check bridge existance
_CheckInstanceBridgesExist(self.lu, instance, node=target_node)
"""
instance = self.instance
target_node = self.target_node
+ source_node = self.source_node
migration_info = self.migration_info
- abort_result = self.rpc.call_finalize_migration(target_node,
- instance,
- migration_info,
- False)
+ abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
+ instance,
+ migration_info,
+ False)
abort_msg = abort_result.fail_msg
if abort_msg:
logging.error("Aborting migration failed on target node %s: %s",
# Don't raise an exception here, as we stil have to try to revert the
# disk status, even if this step failed.
+ abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
+ instance, False, self.live)
+ abort_msg = abort_result.fail_msg
+ if abort_msg:
+ logging.error("Aborting migration failed on source node %s: %s",
+ source_node, abort_msg)
+
def _ExecMigration(self):
"""Migrate an instance.
# Check for hypervisor version mismatch and warn the user.
nodeinfo = self.rpc.call_node_info([source_node, target_node],
- None, self.instance.hypervisor)
- src_info = nodeinfo[source_node]
- dst_info = nodeinfo[target_node]
-
- if ((constants.HV_NODEINFO_KEY_VERSION in src_info.payload) and
- (constants.HV_NODEINFO_KEY_VERSION in dst_info.payload)):
- src_version = src_info.payload[constants.HV_NODEINFO_KEY_VERSION]
- dst_version = dst_info.payload[constants.HV_NODEINFO_KEY_VERSION]
+ None, [self.instance.hypervisor])
+ for ninfo in nodeinfo.values():
+ ninfo.Raise("Unable to retrieve node information from node '%s'" %
+ ninfo.node)
+ (_, _, (src_info, )) = nodeinfo[source_node].payload
+ (_, _, (dst_info, )) = nodeinfo[target_node].payload
+
+ if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
+ (constants.HV_NODEINFO_KEY_VERSION in dst_info)):
+ src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
+ dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
if src_version != dst_version:
self.feedback_fn("* warning: hypervisor version mismatch between"
" source (%s) and target (%s) node" %
raise errors.OpExecError("Could not migrate instance %s: %s" %
(instance.name, msg))
+ self.feedback_fn("* starting memory transfer")
+ last_feedback = time.time()
+ while True:
+ result = self.rpc.call_instance_get_migration_status(source_node,
+ instance)
+ msg = result.fail_msg
+ ms = result.payload # MigrationStatus instance
+ if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
+ logging.error("Instance migration failed, trying to revert"
+ " disk status: %s", msg)
+ self.feedback_fn("Migration failed, aborting")
+ self._AbortMigration()
+ self._RevertDiskStatus()
+ raise errors.OpExecError("Could not migrate instance %s: %s" %
+ (instance.name, msg))
+
+ if result.payload.status != constants.HV_MIGRATION_ACTIVE:
+ self.feedback_fn("* memory transfer complete")
+ break
+
+ if (utils.TimeoutExpired(last_feedback,
+ self._MIGRATION_FEEDBACK_INTERVAL) and
+ ms.transferred_ram is not None):
+ mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
+ self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
+ last_feedback = time.time()
+
+ time.sleep(self._MIGRATION_POLL_INTERVAL)
+
+ result = self.rpc.call_instance_finalize_migration_src(source_node,
+ instance,
+ True,
+ self.live)
+ msg = result.fail_msg
+ if msg:
+ logging.error("Instance migration succeeded, but finalization failed"
+ " on the source node: %s", msg)
+ raise errors.OpExecError("Could not finalize instance migration: %s" %
+ msg)
+
instance.primary_node = target_node
+
# distribute new instance config to the other nodes
self.cfg.Update(instance, self.feedback_fn)
- result = self.rpc.call_finalize_migration(target_node,
- instance,
- migration_info,
- True)
+ result = self.rpc.call_instance_finalize_migration_dst(target_node,
+ instance,
+ migration_info,
+ True)
msg = result.fail_msg
if msg:
- logging.error("Instance migration succeeded, but finalization failed:"
- " %s", msg)
+ logging.error("Instance migration succeeded, but finalization failed"
+ " on the target node: %s", msg)
raise errors.OpExecError("Could not finalize instance migration: %s" %
msg)
source_node = instance.primary_node
target_node = self.target_node
- if instance.admin_up:
+ if instance.admin_state == constants.ADMINST_UP:
self.feedback_fn("* checking disk consistency between source and target")
for dev in instance.disks:
# for drbd, these are drbd over lvm
self.cfg.Update(instance, self.feedback_fn)
# Only start the instance if it's marked as up
- if instance.admin_up:
+ if instance.admin_state == constants.ADMINST_UP:
self.feedback_fn("* activating the instance's disks on target node %s" %
target_node)
logging.info("Starting instance %s on node %s",
self.feedback_fn("* starting the instance on the target node %s" %
target_node)
- result = self.rpc.call_instance_start(target_node, instance, None, None,
+ result = self.rpc.call_instance_start(target_node, (instance, None, None),
False)
msg = result.fail_msg
if msg:
return results
+def _ComputeLDParams(disk_template, disk_params):
+ """Computes Logical Disk parameters from Disk Template parameters.
+
+ @type disk_template: string
+ @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
+ @type disk_params: dict
+ @param disk_params: disk template parameters; dict(template_name -> parameters
+ @rtype: list(dict)
+ @return: a list of dicts, one for each node of the disk hierarchy. Each dict
+ contains the LD parameters of the node. The tree is flattened in-order.
+
+ """
+ if disk_template not in constants.DISK_TEMPLATES:
+ raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
+
+ result = list()
+ dt_params = disk_params[disk_template]
+ if disk_template == constants.DT_DRBD8:
+ drbd_params = {
+ constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
+ constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
+ constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
+ constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
+ constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
+ constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
+ constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
+ constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
+ constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
+ constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
+ constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
+ constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
+ }
+
+ drbd_params = \
+ objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8],
+ drbd_params)
+
+ result.append(drbd_params)
+
+ # data LV
+ data_params = {
+ constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
+ }
+ data_params = \
+ objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
+ data_params)
+ result.append(data_params)
+
+ # metadata LV
+ meta_params = {
+ constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
+ }
+ meta_params = \
+ objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
+ meta_params)
+ result.append(meta_params)
+
+ elif (disk_template == constants.DT_FILE or
+ disk_template == constants.DT_SHARED_FILE):
+ result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
+
+ elif disk_template == constants.DT_PLAIN:
+ params = {
+ constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
+ }
+ params = \
+ objects.FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV],
+ params)
+ result.append(params)
+
+ elif disk_template == constants.DT_BLOCK:
+ result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
+
+ return result
+
+
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
- iv_name, p_minor, s_minor):
+ iv_name, p_minor, s_minor, drbd_params, data_params,
+ meta_params):
"""Generate a drbd8 device complete with its children.
"""
assert len(vgnames) == len(names) == 2
port = lu.cfg.AllocatePort()
shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
+
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
- logical_id=(vgnames[0], names[0]))
- dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
- logical_id=(vgnames[1], names[1]))
+ logical_id=(vgnames[0], names[0]),
+ params=data_params)
+ dev_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
+ logical_id=(vgnames[1], names[1]),
+ params=meta_params)
drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
logical_id=(primary, secondary, port,
p_minor, s_minor,
shared_secret),
children=[dev_data, dev_meta],
- iv_name=iv_name)
+ iv_name=iv_name, params=drbd_params)
return drbd_dev
instance_name, primary_node,
secondary_nodes, disk_info,
file_storage_dir, file_driver,
- base_index, feedback_fn):
+ base_index, feedback_fn, disk_params):
"""Generate the entire disk layout for a given template type.
"""
vgname = lu.cfg.GetVGName()
disk_count = len(disk_info)
disks = []
+ ld_params = _ComputeLDParams(template_name, disk_params)
if template_name == constants.DT_DISKLESS:
pass
elif template_name == constants.DT_PLAIN:
size=disk[constants.IDISK_SIZE],
logical_id=(vg, names[idx]),
iv_name="disk/%d" % disk_index,
- mode=disk[constants.IDISK_MODE])
+ mode=disk[constants.IDISK_MODE],
+ params=ld_params[0])
disks.append(disk_dev)
elif template_name == constants.DT_DRBD8:
+ drbd_params, data_params, meta_params = ld_params
if len(secondary_nodes) != 1:
raise errors.ProgrammerError("Wrong template configuration")
remote_node = secondary_nodes[0]
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
+ drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
data_vg = disk.get(constants.IDISK_VG, vgname)
- meta_vg = disk.get(constants.IDISK_METAVG, data_vg)
+ meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
disk[constants.IDISK_SIZE],
[data_vg, meta_vg],
names[idx * 2:idx * 2 + 2],
"disk/%d" % disk_index,
- minors[idx * 2], minors[idx * 2 + 1])
+ minors[idx * 2], minors[idx * 2 + 1],
+ drbd_params, data_params, meta_params)
disk_dev.mode = disk[constants.IDISK_MODE]
disks.append(disk_dev)
elif template_name == constants.DT_FILE:
logical_id=(file_driver,
"%s/disk%d" % (file_storage_dir,
disk_index)),
- mode=disk[constants.IDISK_MODE])
+ mode=disk[constants.IDISK_MODE],
+ params=ld_params[0])
disks.append(disk_dev)
elif template_name == constants.DT_SHARED_FILE:
if len(secondary_nodes) != 0:
logical_id=(file_driver,
"%s/disk%d" % (file_storage_dir,
disk_index)),
- mode=disk[constants.IDISK_MODE])
+ mode=disk[constants.IDISK_MODE],
+ params=ld_params[0])
disks.append(disk_dev)
elif template_name == constants.DT_BLOCK:
if len(secondary_nodes) != 0:
logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
disk[constants.IDISK_ADOPT]),
iv_name="disk/%d" % disk_index,
- mode=disk[constants.IDISK_MODE])
+ mode=disk[constants.IDISK_MODE],
+ params=ld_params[0])
disks.append(disk_dev)
else:
constants.DT_DISKLESS: {},
constants.DT_PLAIN: _compute(disks, 0),
# 128 MB are added for drbd metadata for each disk
- constants.DT_DRBD8: _compute(disks, 128),
+ constants.DT_DRBD8: _compute(disks, DRBD_META_SIZE),
constants.DT_FILE: {},
constants.DT_SHARED_FILE: {},
}
constants.DT_DISKLESS: None,
constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
# 128 MB are added for drbd metadata for each disk
- constants.DT_DRBD8: sum(d[constants.IDISK_SIZE] + 128 for d in disks),
+ constants.DT_DRBD8:
+ sum(d[constants.IDISK_SIZE] + DRBD_META_SIZE for d in disks),
constants.DT_FILE: None,
constants.DT_SHARED_FILE: 0,
constants.DT_BLOCK: 0,
"""
nodenames = _FilterVmNodes(lu, nodenames)
- hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
- hvname,
- hvparams)
+
+ cluster = lu.cfg.GetClusterInfo()
+ hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
+
+ hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, hvname, hvfull)
for node in nodenames:
info = hvinfo[node]
if info.offline:
"""
nodenames = _FilterVmNodes(lu, nodenames)
- result = lu.rpc.call_os_validate(required, nodenames, osname,
+ result = lu.rpc.call_os_validate(nodenames, required, osname,
[constants.OS_VALIDATE_PARAMETERS],
osparams)
for node, nres in result.items():
self.add_locks[locking.LEVEL_INSTANCE] = instance_name
if self.op.iallocator:
+ # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
+ # specifying a group on instance creation and then selecting nodes from
+ # that group
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
else:
self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
nodelist = [self.op.pnode]
self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
nodelist.append(self.op.snode)
self.needed_locks[locking.LEVEL_NODE] = nodelist
+ # Lock resources of instance's primary and secondary nodes (copy to
+ # prevent accidential modification)
+ self.needed_locks[locking.LEVEL_NODE_RES] = list(nodelist)
# in case of import lock the source node too
if self.op.mode == constants.INSTANCE_IMPORT:
tags=self.op.tags,
os=self.op.os_type,
vcpus=self.be_full[constants.BE_VCPUS],
- memory=self.be_full[constants.BE_MEMORY],
+ memory=self.be_full[constants.BE_MAXMEM],
disks=self.disks,
nics=nics,
hypervisor=self.op.hypervisor,
secondary_nodes=self.secondaries,
status=self.op.start,
os_type=self.op.os_type,
- memory=self.be_full[constants.BE_MEMORY],
+ minmem=self.be_full[constants.BE_MINMEM],
+ maxmem=self.be_full[constants.BE_MAXMEM],
vcpus=self.be_full[constants.BE_VCPUS],
nics=_NICListToTuple(self, self.nics),
disk_template=self.op.disk_template,
if einfo.has_option(constants.INISECT_INS, "disk_template"):
self.op.disk_template = einfo.get(constants.INISECT_INS,
"disk_template")
+ if self.op.disk_template not in constants.DISK_TEMPLATES:
+ raise errors.OpPrereqError("Disk template specified in configuration"
+ " file is not one of the allowed values:"
+ " %s" % " ".join(constants.DISK_TEMPLATES))
else:
raise errors.OpPrereqError("No disk template specified and the export"
" is missing the disk_template information",
errors.ECODE_INVAL)
if not self.op.disks:
- if einfo.has_option(constants.INISECT_INS, "disk_count"):
- disks = []
- # TODO: import the disk iv_name too
- for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
+ disks = []
+ # TODO: import the disk iv_name too
+ for idx in range(constants.MAX_DISKS):
+ if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
disks.append({constants.IDISK_SIZE: disk_sz})
- self.op.disks = disks
- else:
+ self.op.disks = disks
+ if not disks and self.op.disk_template != constants.DT_DISKLESS:
raise errors.OpPrereqError("No disk info specified and the export"
" is missing the disk information",
errors.ECODE_INVAL)
- if (not self.op.nics and
- einfo.has_option(constants.INISECT_INS, "nic_count")):
+ if not self.op.nics:
nics = []
- for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
- ndict = {}
- for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
- v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
- ndict[name] = v
- nics.append(ndict)
+ for idx in range(constants.MAX_NICS):
+ if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
+ ndict = {}
+ for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
+ v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
+ ndict[name] = v
+ nics.append(ndict)
+ else:
+ break
self.op.nics = nics
if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
for name, value in einfo.items(constants.INISECT_BEP):
if name not in self.op.beparams:
self.op.beparams[name] = value
+ # Compatibility for the old "memory" be param
+ if name == constants.BE_MEMORY:
+ if constants.BE_MAXMEM not in self.op.beparams:
+ self.op.beparams[constants.BE_MAXMEM] = value
+ if constants.BE_MINMEM not in self.op.beparams:
+ self.op.beparams[constants.BE_MINMEM] = value
else:
# try to read the parameters old style, from the main section
for name in constants.BES_PARAMETERS:
raise errors.OpPrereqError("Cluster does not support lvm-based"
" instances", errors.ECODE_STATE)
- if self.op.hypervisor is None:
+ if (self.op.hypervisor is None or
+ self.op.hypervisor == constants.VALUE_AUTO):
self.op.hypervisor = self.cfg.GetHypervisorType()
cluster = self.cfg.GetClusterInfo()
_CheckGlobalHvParams(self.op.hvparams)
# fill and remember the beparams dict
+ default_beparams = cluster.beparams[constants.PP_DEFAULT]
+ for param, value in self.op.beparams.iteritems():
+ if value == constants.VALUE_AUTO:
+ self.op.beparams[param] = default_beparams[param]
+ objects.UpgradeBeParams(self.op.beparams)
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
self.be_full = cluster.SimpleFillBE(self.op.beparams)
for idx, nic in enumerate(self.op.nics):
nic_mode_req = nic.get(constants.INIC_MODE, None)
nic_mode = nic_mode_req
- if nic_mode is None:
+ if nic_mode is None or nic_mode == constants.VALUE_AUTO:
nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
# in routed mode, for the first nic, the default ip is 'auto'
# Build nic parameters
link = nic.get(constants.INIC_LINK, None)
+ if link == constants.VALUE_AUTO:
+ link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
nicparams = {}
if nic_mode_req:
- nicparams[constants.NIC_MODE] = nic_mode_req
+ nicparams[constants.NIC_MODE] = nic_mode
if link:
nicparams[constants.NIC_LINK] = link
constants.IDISK_SIZE: size,
constants.IDISK_MODE: mode,
constants.IDISK_VG: data_vg,
- constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg),
}
+ if constants.IDISK_METAVG in disk:
+ new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
if constants.IDISK_ADOPT in disk:
new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
self.disks.append(new_disk)
if self.op.mode == constants.INSTANCE_IMPORT:
-
- # Check that the new instance doesn't have less disks than the export
- instance_disks = len(self.disks)
- export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
- if instance_disks < export_disks:
- raise errors.OpPrereqError("Not enough disks to import."
- " (instance: %d, export: %d)" %
- (instance_disks, export_disks),
- errors.ECODE_INVAL)
-
disk_images = []
- for idx in range(export_disks):
+ for idx in range(len(self.disks)):
option = "disk%d_dump" % idx
if export_info.has_option(constants.INISECT_INS, option):
# FIXME: are the old os-es, disk sizes, etc. useful?
self.src_images = disk_images
old_name = export_info.get(constants.INISECT_INS, "name")
- try:
- exp_nic_count = export_info.getint(constants.INISECT_INS, "nic_count")
- except (TypeError, ValueError), err:
- raise errors.OpPrereqError("Invalid export file, nic_count is not"
- " an integer: %s" % str(err),
- errors.ECODE_STATE)
if self.op.instance_name == old_name:
for idx, nic in enumerate(self.nics):
- if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
+ if nic.mac == constants.VALUE_AUTO:
nic_mac_ini = "nic%d_mac" % idx
nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
_CheckNodeVmCapable(self, self.op.snode)
self.secondaries.append(self.op.snode)
+ snode = self.cfg.GetNodeInfo(self.op.snode)
+ if pnode.group != snode.group:
+ self.LogWarning("The primary and secondary nodes are in two"
+ " different node groups; the disk parameters"
+ " from the first disk's node group will be"
+ " used")
+
nodenames = [pnode.name] + self.secondaries
+ # disk parameters (not customizable at instance or node level)
+ # just use the primary node parameters, ignoring the secondary.
+ self.diskparams = self.cfg.GetNodeGroup(pnode.group).diskparams
+
if not self.adopt_disks:
# Check lv size requirements, if not adopting
req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
_CheckNicsBridgesExist(self, self.nics, self.pnode.name)
# memory check on primary node
+ #TODO(dynmem): use MINMEM for checking
if self.op.start:
_CheckNodeFreeMemory(self, self.pnode.name,
"creating instance %s" % self.op.instance_name,
- self.be_full[constants.BE_MEMORY],
+ self.be_full[constants.BE_MAXMEM],
self.op.hypervisor)
self.dry_run_result = list(nodenames)
instance = self.op.instance_name
pnode_name = self.pnode.name
+ assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
+ self.owned_locks(locking.LEVEL_NODE)), \
+ "Node locks differ from node resource locks"
+
ht_kind = self.op.hypervisor
if ht_kind in constants.HTS_REQ_PORT:
network_port = self.cfg.AllocatePort()
self.instance_file_storage_dir,
self.op.file_driver,
0,
- feedback_fn)
+ feedback_fn,
+ self.diskparams)
iobj = objects.Instance(name=instance, os=self.op.os_type,
primary_node=pnode_name,
nics=self.nics, disks=disks,
disk_template=self.op.disk_template,
- admin_up=False,
+ admin_state=constants.ADMINST_DOWN,
network_port=network_port,
beparams=self.op.beparams,
hvparams=self.op.hvparams,
raise errors.OpExecError("There are some degraded disks for"
" this instance")
+ # Release all node resource locks
+ _ReleaseLocks(self, locking.LEVEL_NODE_RES)
+
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
if self.op.mode == constants.INSTANCE_CREATE:
if not self.op.no_install:
feedback_fn("* running the instance OS create scripts...")
# FIXME: pass debug option from opcode to backend
os_add_result = \
- self.rpc.call_instance_os_add(pnode_name, iobj, False,
+ self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
self.op.debug_level)
if pause_sync:
feedback_fn("* resuming disk sync")
raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
% self.op.mode)
+ assert not self.owned_locks(locking.LEVEL_NODE_RES)
+
if self.op.start:
- iobj.admin_up = True
+ iobj.admin_state = constants.ADMINST_UP
self.cfg.Update(iobj, feedback_fn)
logging.info("Starting instance %s on node %s", instance, pnode_name)
feedback_fn("* starting instance...")
- result = self.rpc.call_instance_start(pnode_name, iobj,
- None, None, False)
+ result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
+ False)
result.Raise("Could not start instance")
return list(iobj.all_nodes)
REQ_BGL = False
def ExpandNames(self):
+ self.share_locks = _ShareAll()
self._ExpandAndLockInstance()
def CheckPrereq(self):
node_insts.Raise("Can't get node information from %s" % node)
if instance.name not in node_insts.payload:
- if instance.admin_up:
+ if instance.admin_state == constants.ADMINST_UP:
state = constants.INSTST_ERRORDOWN
- else:
+ elif instance.admin_state == constants.ADMINST_DOWN:
state = constants.INSTST_ADMINDOWN
+ else:
+ state = constants.INSTST_ADMINOFFLINE
raise errors.OpExecError("Instance %s is not running (state %s)" %
(instance.name, state))
self._ExpandAndLockInstance()
assert locking.LEVEL_NODE not in self.needed_locks
+ assert locking.LEVEL_NODE_RES not in self.needed_locks
assert locking.LEVEL_NODEGROUP not in self.needed_locks
assert self.op.iallocator is None or self.op.remote_node is None, \
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
+ self.needed_locks[locking.LEVEL_NODE_RES] = []
+
self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
self.op.iallocator, self.op.remote_node,
self.op.disks, False, self.op.early_release)
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
self.share_locks[locking.LEVEL_NODEGROUP] = 1
+ # Lock all groups used by instance optimistically; this requires going
+ # via the node before it's locked, requiring verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetInstanceNodeGroups(self.op.instance_name)
for node_name in self.cfg.GetNodeGroup(group_uuid).members]
else:
self._LockInstancesNodes()
+ elif level == locking.LEVEL_NODE_RES:
+ # Reuse node locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.needed_locks[locking.LEVEL_NODE]
def BuildHooksEnv(self):
"""Build hooks env.
assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
self.op.iallocator is None)
+ # Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
if not self.disks:
self.disks = range(len(self.instance.disks))
+ # TODO: compute disk parameters
+ primary_node_info = self.cfg.GetNodeInfo(instance.primary_node)
+ secondary_node_info = self.cfg.GetNodeInfo(secondary_node)
+ if primary_node_info.group != secondary_node_info.group:
+ self.lu.LogInfo("The instance primary and secondary nodes are in two"
+ " different node groups; the disk parameters of the"
+ " primary node's group will be applied.")
+
+ self.diskparams = self.cfg.GetNodeGroup(primary_node_info.group).diskparams
+
for node in check_nodes:
_CheckNodeOnline(self.lu, node)
self.target_node]
if node_name is not None)
- # Release unneeded node locks
+ # Release unneeded node and node resource locks
_ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
+ _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
# Release any owned node group
if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
assert set(owned_nodes) == set(self.node_secondary_ip), \
("Incorrect node locks, owning %s, expected %s" %
(owned_nodes, self.node_secondary_ip.keys()))
+ assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
+ self.lu.owned_locks(locking.LEVEL_NODE_RES))
owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
assert list(owned_instances) == [self.instance_name], \
feedback_fn("Replacing disk(s) %s for %s" %
(utils.CommaJoin(self.disks), self.instance.name))
- activate_disks = (not self.instance.admin_up)
+ activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
# Activate the instance disks if we're replacing them on a down instance
if activate_disks:
if activate_disks:
_SafeShutdownInstanceDisks(self.lu, self.instance)
+ assert not self.lu.owned_locks(locking.LEVEL_NODE)
+
if __debug__:
# Verify owned locks
- owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
+ owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
nodes = frozenset(self.node_secondary_ip)
assert ((self.early_release and not owned_nodes) or
(not self.early_release and not (set(owned_nodes) - nodes))), \
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
names = _GenerateUniqueNames(self.lu, lv_names)
+ _, data_p, meta_p = _ComputeLDParams(constants.DT_DRBD8, self.diskparams)
+
vg_data = dev.children[0].logical_id[0]
lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
- logical_id=(vg_data, names[0]))
+ logical_id=(vg_data, names[0]), params=data_p)
vg_meta = dev.children[1].logical_id[0]
- lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
- logical_id=(vg_meta, names[1]))
+ lv_meta = objects.Disk(dev_type=constants.LD_LV, size=DRBD_META_SIZE,
+ logical_id=(vg_meta, names[1]), params=meta_p)
new_lvs = [lv_data, lv_meta]
old_lvs = [child.Copy() for child in dev.children]
"volumes"))
raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
- cstep = 5
+ cstep = itertools.count(5)
+
if self.early_release:
- self.lu.LogStep(cstep, steps_total, "Removing old storage")
- cstep += 1
+ self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node, iv_names)
- # WARNING: we release both node locks here, do not do other RPCs
- # than WaitForSync to the primary node
- _ReleaseLocks(self.lu, locking.LEVEL_NODE,
- names=[self.target_node, self.other_node])
+ # TODO: Check if releasing locks early still makes sense
+ _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
+ else:
+ # Release all resource locks except those used by the instance
+ _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
+ keep=self.node_secondary_ip.keys())
+
+ # Release all node locks while waiting for sync
+ _ReleaseLocks(self.lu, locking.LEVEL_NODE)
+
+ # TODO: Can the instance lock be downgraded here? Take the optional disk
+ # shutdown in the caller into consideration.
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
- self.lu.LogStep(cstep, steps_total, "Sync devices")
- cstep += 1
+ self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
_WaitForSync(self.lu, self.instance)
# Check all devices manually
# Step: remove old storage
if not self.early_release:
- self.lu.LogStep(cstep, steps_total, "Removing old storage")
- cstep += 1
+ self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node, iv_names)
def _ExecDrbd8Secondary(self, feedback_fn):
iv_names[idx] = (dev, dev.children, new_net_id)
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
new_net_id)
+ drbd_params, _, _ = _ComputeLDParams(constants.DT_DRBD8, self.diskparams)
new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
logical_id=new_alone_id,
children=dev.children,
- size=dev.size)
+ size=dev.size,
+ params=drbd_params)
try:
_CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
_GetInstanceInfoText(self.instance), False)
self.cfg.Update(self.instance, feedback_fn)
+ # Release all node locks (the configuration has been updated)
+ _ReleaseLocks(self.lu, locking.LEVEL_NODE)
+
# and now perform the drbd attach
self.lu.LogInfo("Attaching primary drbds to new secondary"
" (standalone => connected)")
to_node, msg,
hint=("please do a gnt-instance info to see the"
" status of disks"))
- cstep = 5
+
+ cstep = itertools.count(5)
+
if self.early_release:
- self.lu.LogStep(cstep, steps_total, "Removing old storage")
- cstep += 1
+ self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node, iv_names)
- # WARNING: we release all node locks here, do not do other RPCs
- # than WaitForSync to the primary node
- _ReleaseLocks(self.lu, locking.LEVEL_NODE,
- names=[self.instance.primary_node,
- self.target_node,
- self.new_node])
+ # TODO: Check if releasing locks early still makes sense
+ _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
+ else:
+ # Release all resource locks except those used by the instance
+ _ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
+ keep=self.node_secondary_ip.keys())
+
+ # TODO: Can the instance lock be downgraded here? Take the optional disk
+ # shutdown in the caller into consideration.
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
- self.lu.LogStep(cstep, steps_total, "Sync devices")
- cstep += 1
+ self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
_WaitForSync(self.lu, self.instance)
# Check all devices manually
# Step: remove old storage
if not self.early_release:
- self.lu.LogStep(cstep, steps_total, "Removing old storage")
+ self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node, iv_names)
"""
# Check whether any instance on this node has faulty disks
for inst in _GetNodeInstances(self.cfg, self.op.node_name):
- if not inst.admin_up:
+ if inst.admin_state != constants.ADMINST_UP:
continue
check_nodes = set(inst.all_nodes)
check_nodes.discard(self.op.node_name)
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
- self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+ self.needed_locks[locking.LEVEL_NODE_RES] = []
+ self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
+ elif level == locking.LEVEL_NODE_RES:
+ # Copy node locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.needed_locks[locking.LEVEL_NODE][:]
def BuildHooksEnv(self):
"""Build hooks env.
instance = self.instance
disk = self.disk
+ assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
+ assert (self.owned_locks(locking.LEVEL_NODE) ==
+ self.owned_locks(locking.LEVEL_NODE_RES))
+
disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
if not disks_ok:
raise errors.OpExecError("Cannot activate block device to grow")
+ feedback_fn("Growing disk %s of instance '%s' by %s" %
+ (self.op.disk, instance.name,
+ utils.FormatUnit(self.op.amount, "h")))
+
# First run all grow ops in dry-run mode
for node in instance.all_nodes:
self.cfg.SetDiskID(disk, node)
disk.RecordGrow(self.op.amount)
self.cfg.Update(instance, feedback_fn)
+
+ # Changes have been recorded, release node lock
+ _ReleaseLocks(self, locking.LEVEL_NODE)
+
+ # Downgrade lock while waiting for sync
+ self.glm.downgrade(locking.LEVEL_INSTANCE)
+
if self.op.wait_for_sync:
disk_abort = not _WaitForSync(self, instance, disks=[disk])
if disk_abort:
self.proc.LogWarning("Disk sync-ing has not returned a good"
" status; please check the instance")
- if not instance.admin_up:
+ if instance.admin_state != constants.ADMINST_UP:
_SafeShutdownInstanceDisks(self, instance, disks=[disk])
- elif not instance.admin_up:
+ elif instance.admin_state != constants.ADMINST_UP:
self.proc.LogWarning("Not shutting down the disk even if the instance is"
" not supposed to be running because no wait for"
" sync mode was requested")
+ assert self.owned_locks(locking.LEVEL_NODE_RES)
+ assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
+
class LUInstanceQueryData(NoHooksLU):
"""Query runtime instance data.
if remote_info and "state" in remote_info:
remote_state = "up"
else:
- remote_state = "down"
-
- if instance.admin_up:
- config_state = "up"
- else:
- config_state = "down"
+ if instance.admin_state == constants.ADMINST_UP:
+ remote_state = "down"
+ else:
+ remote_state = instance.admin_state
disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
instance.disks)
result[instance.name] = {
"name": instance.name,
- "config_state": config_state,
+ "config_state": instance.admin_state,
"run_state": remote_state,
"pnode": instance.primary_node,
"snodes": instance.secondary_nodes,
def CheckArguments(self):
if not (self.op.nics or self.op.disks or self.op.disk_template or
- self.op.hvparams or self.op.beparams or self.op.os_name):
+ self.op.hvparams or self.op.beparams or self.op.os_name or
+ self.op.online_inst or self.op.offline_inst):
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
if self.op.hvparams:
def ExpandNames(self):
self._ExpandAndLockInstance()
+ # Can't even acquire node locks in shared mode as upcoming changes in
+ # Ganeti 2.6 will start to modify the node object on disk conversion
self.needed_locks[locking.LEVEL_NODE] = []
+ self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if self.op.disk_template and self.op.remote_node:
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
+ elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
+ # Copy node locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.needed_locks[locking.LEVEL_NODE][:]
def BuildHooksEnv(self):
"""Build hooks env.
"""
args = dict()
- if constants.BE_MEMORY in self.be_new:
- args["memory"] = self.be_new[constants.BE_MEMORY]
+ if constants.BE_MINMEM in self.be_new:
+ args["minmem"] = self.be_new[constants.BE_MINMEM]
+ if constants.BE_MAXMEM in self.be_new:
+ args["maxmem"] = self.be_new[constants.BE_MAXMEM]
if constants.BE_VCPUS in self.be_new:
args["vcpus"] = self.be_new[constants.BE_VCPUS]
# TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
"Cannot retrieve locked instance %s" % self.op.instance_name
pnode = instance.primary_node
nodelist = list(instance.all_nodes)
+ pnode_info = self.cfg.GetNodeInfo(pnode)
+ self.diskparams = self.cfg.GetNodeGroup(pnode_info.group).diskparams
# OS change
if self.op.os_name and not self.op.force:
" %s to %s" % (instance.disk_template,
self.op.disk_template),
errors.ECODE_INVAL)
- _CheckInstanceDown(self, instance, "cannot change disk template")
+ _CheckInstanceState(self, instance, INSTANCE_DOWN,
+ msg="cannot change disk template")
if self.op.disk_template in constants.DTS_INT_MIRROR:
if self.op.remote_node == pnode:
raise errors.OpPrereqError("Given new secondary node %s is the same"
required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
_CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
+ snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
+ if pnode_info.group != snode_info.group:
+ self.LogWarning("The primary and secondary nodes are in two"
+ " different node groups; the disk parameters"
+ " from the first disk's node group will be"
+ " used")
+
# hvparams processing
if self.op.hvparams:
hv_type = instance.hypervisor
# local check
hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
_CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
- self.hv_new = hv_new # the new actual values
+ self.hv_proposed = self.hv_new = hv_new # the new actual values
self.hv_inst = i_hvdict # the new dict (without defaults)
else:
+ self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
+ instance.hvparams)
self.hv_new = self.hv_inst = {}
# beparams processing
if self.op.beparams:
i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
use_none=True)
+ objects.UpgradeBeParams(i_bedict)
utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
be_new = cluster.SimpleFillBE(i_bedict)
- self.be_new = be_new # the new actual values
+ self.be_proposed = self.be_new = be_new # the new actual values
self.be_inst = i_bedict # the new dict (without defaults)
else:
self.be_new = self.be_inst = {}
+ self.be_proposed = cluster.SimpleFillBE(instance.beparams)
be_old = cluster.FillBE(instance)
+ # CPU param validation -- checking every time a paramtere is
+ # changed to cover all cases where either CPU mask or vcpus have
+ # changed
+ if (constants.BE_VCPUS in self.be_proposed and
+ constants.HV_CPU_MASK in self.hv_proposed):
+ cpu_list = \
+ utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
+ # Verify mask is consistent with number of vCPUs. Can skip this
+ # test if only 1 entry in the CPU mask, which means same mask
+ # is applied to all vCPUs.
+ if (len(cpu_list) > 1 and
+ len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
+ raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
+ " CPU mask [%s]" %
+ (self.be_proposed[constants.BE_VCPUS],
+ self.hv_proposed[constants.HV_CPU_MASK]),
+ errors.ECODE_INVAL)
+
+ # Only perform this test if a new CPU mask is given
+ if constants.HV_CPU_MASK in self.hv_new:
+ # Calculate the largest CPU number requested
+ max_requested_cpu = max(map(max, cpu_list))
+ # Check that all of the instance's nodes have enough physical CPUs to
+ # satisfy the requested CPU mask
+ _CheckNodesPhysicalCPUs(self, instance.all_nodes,
+ max_requested_cpu + 1, instance.hypervisor)
+
# osparams processing
if self.op.osparams:
i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
self.warn = []
- if (constants.BE_MEMORY in self.op.beparams and not self.op.force and
- be_new[constants.BE_MEMORY] > be_old[constants.BE_MEMORY]):
+ #TODO(dynmem): do the appropriate check involving MINMEM
+ if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
+ be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
mem_check_list = [pnode]
if be_new[constants.BE_AUTO_BALANCE]:
# either we changed auto_balance to yes or it was from before
instance_info = self.rpc.call_instance_info(pnode, instance.name,
instance.hypervisor)
nodeinfo = self.rpc.call_node_info(mem_check_list, None,
- instance.hypervisor)
+ [instance.hypervisor])
pninfo = nodeinfo[pnode]
msg = pninfo.fail_msg
if msg:
# Assume the primary node is unreachable and go ahead
self.warn.append("Can't get info from primary node %s: %s" %
(pnode, msg))
- elif not isinstance(pninfo.payload.get("memory_free", None), int):
- self.warn.append("Node data from primary node %s doesn't contain"
- " free memory information" % pnode)
- elif instance_info.fail_msg:
- self.warn.append("Can't get instance runtime information: %s" %
- instance_info.fail_msg)
else:
- if instance_info.payload:
- current_mem = int(instance_info.payload["memory"])
+ (_, _, (pnhvinfo, )) = pninfo.payload
+ if not isinstance(pnhvinfo.get("memory_free", None), int):
+ self.warn.append("Node data from primary node %s doesn't contain"
+ " free memory information" % pnode)
+ elif instance_info.fail_msg:
+ self.warn.append("Can't get instance runtime information: %s" %
+ instance_info.fail_msg)
else:
- # Assume instance not running
- # (there is a slight race condition here, but it's not very probable,
- # and we have no other way to check)
- current_mem = 0
- miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
- pninfo.payload["memory_free"])
- if miss_mem > 0:
- raise errors.OpPrereqError("This change will prevent the instance"
- " from starting, due to %d MB of memory"
- " missing on its primary node" % miss_mem,
- errors.ECODE_NORES)
+ if instance_info.payload:
+ current_mem = int(instance_info.payload["memory"])
+ else:
+ # Assume instance not running
+ # (there is a slight race condition here, but it's not very
+ # probable, and we have no other way to check)
+ # TODO: Describe race condition
+ current_mem = 0
+ #TODO(dynmem): do the appropriate check involving MINMEM
+ miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
+ pnhvinfo["memory_free"])
+ if miss_mem > 0:
+ raise errors.OpPrereqError("This change will prevent the instance"
+ " from starting, due to %d MB of memory"
+ " missing on its primary node" %
+ miss_mem,
+ errors.ECODE_NORES)
if be_new[constants.BE_AUTO_BALANCE]:
for node, nres in nodeinfo.items():
continue
nres.Raise("Can't get info from secondary node %s" % node,
prereq=True, ecode=errors.ECODE_STATE)
- if not isinstance(nres.payload.get("memory_free", None), int):
+ (_, _, (nhvinfo, )) = nres.payload
+ if not isinstance(nhvinfo.get("memory_free", None), int):
raise errors.OpPrereqError("Secondary node %s didn't return free"
" memory information" % node,
errors.ECODE_STATE)
- elif be_new[constants.BE_MEMORY] > nres.payload["memory_free"]:
+ #TODO(dynmem): do the appropriate check involving MINMEM
+ elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
raise errors.OpPrereqError("This change will prevent the instance"
" from failover to its secondary node"
" %s, due to not enough memory" % node,
if len(instance.disks) == 1:
raise errors.OpPrereqError("Cannot remove the last disk of"
" an instance", errors.ECODE_INVAL)
- _CheckInstanceDown(self, instance, "cannot remove disks")
+ _CheckInstanceState(self, instance, INSTANCE_DOWN,
+ msg="cannot remove disks")
if (disk_op == constants.DDM_ADD and
len(instance.disks) >= constants.MAX_DISKS):
(disk_op, len(instance.disks)),
errors.ECODE_INVAL)
- return
+ # disabling the instance
+ if self.op.offline_inst:
+ _CheckInstanceState(self, instance, INSTANCE_DOWN,
+ msg="cannot change instance state to offline")
+
+ # enabling the instance
+ if self.op.online_inst:
+ _CheckInstanceState(self, instance, INSTANCE_OFFLINE,
+ msg="cannot make instance go online")
def _ConvertPlainToDrbd(self, feedback_fn):
"""Converts an instance from plain to drbd.
pnode = instance.primary_node
snode = self.op.remote_node
+ assert instance.disk_template == constants.DT_PLAIN
+
# create a fake disk info for _GenerateDiskTemplate
disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
constants.IDISK_VG: d.logical_id[0]}
for d in instance.disks]
new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
instance.name, pnode, [snode],
- disk_info, None, None, 0, feedback_fn)
+ disk_info, None, None, 0, feedback_fn,
+ self.diskparams)
info = _GetInstanceInfoText(instance)
feedback_fn("Creating aditional volumes...")
# first, create the missing data and meta devices
instance.disks = new_disks
self.cfg.Update(instance, feedback_fn)
+ # Release node locks while waiting for sync
+ _ReleaseLocks(self, locking.LEVEL_NODE)
+
# disks are created, waiting for sync
disk_abort = not _WaitForSync(self, instance,
oneshot=not self.op.wait_for_sync)
raise errors.OpExecError("There are some degraded disks for"
" this instance, please cleanup manually")
+ # Node resource locks will be released by caller
+
def _ConvertDrbdToPlain(self, feedback_fn):
"""Converts an instance from drbd to plain.
"""
instance = self.instance
+
assert len(instance.secondary_nodes) == 1
+ assert instance.disk_template == constants.DT_DRBD8
+
pnode = instance.primary_node
snode = instance.secondary_nodes[0]
feedback_fn("Converting template to plain")
instance.disk_template = constants.DT_PLAIN
self.cfg.Update(instance, feedback_fn)
+ # Release locks in case removing disks takes a while
+ _ReleaseLocks(self, locking.LEVEL_NODE)
+
feedback_fn("Removing volumes on the secondary node...")
for disk in old_disks:
self.cfg.SetDiskID(disk, snode)
tcp_port = disk.logical_id[2]
self.cfg.AddTcpUdpPort(tcp_port)
+ # Node resource locks will be released by caller
+
def Exec(self, feedback_fn):
"""Modifies an instance.
for warn in self.warn:
feedback_fn("WARNING: %s" % warn)
+ assert ((self.op.disk_template is None) ^
+ bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
+ "Not owning any node resource locks"
+
result = []
instance = self.instance
# disk changes
[disk_dict],
file_path,
file_driver,
- disk_idx_base, feedback_fn)[0]
+ disk_idx_base,
+ feedback_fn,
+ self.diskparams)[0]
instance.disks.append(new_disk)
info = _GetInstanceInfoText(instance)
disk_dict[constants.IDISK_MODE]))
if self.op.disk_template:
+ if __debug__:
+ check_nodes = set(instance.all_nodes)
+ if self.op.remote_node:
+ check_nodes.add(self.op.remote_node)
+ for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
+ owned = self.owned_locks(level)
+ assert not (check_nodes - owned), \
+ ("Not owning the correct locks, owning %r, expected at least %r" %
+ (owned, check_nodes))
+
r_shut = _ShutdownInstanceDisks(self, instance)
if not r_shut:
raise errors.OpExecError("Cannot shutdown instance disks, unable to"
raise
result.append(("disk_template", self.op.disk_template))
+ assert instance.disk_template == self.op.disk_template, \
+ ("Expected disk template '%s', found '%s'" %
+ (self.op.disk_template, instance.disk_template))
+
+ # Release node and resource locks if there are any (they might already have
+ # been released during disk conversion)
+ _ReleaseLocks(self, locking.LEVEL_NODE)
+ _ReleaseLocks(self, locking.LEVEL_NODE_RES)
+
# NIC changes
for nic_op, nic_dict in self.op.nics:
if nic_op == constants.DDM_REMOVE:
for key, val in self.op.osparams.iteritems():
result.append(("os/%s" % key, val))
+ # online/offline instance
+ if self.op.online_inst:
+ self.cfg.MarkInstanceDown(instance.name)
+ result.append(("admin_state", constants.ADMINST_DOWN))
+ if self.op.offline_inst:
+ self.cfg.MarkInstanceOffline(instance.name)
+ result.append(("admin_state", constants.ADMINST_OFFLINE))
+
self.cfg.Update(instance, feedback_fn)
+ assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
+ self.owned_locks(locking.LEVEL_NODE)), \
+ "All node locks should have been released by now"
+
return result
_DISK_CONVERSIONS = {
"Cannot retrieve locked instance %s" % self.op.instance_name
_CheckNodeOnline(self, self.instance.primary_node)
- if (self.op.remove_instance and self.instance.admin_up and
+ if (self.op.remove_instance and
+ self.instance.admin_state == constants.ADMINST_UP and
not self.op.shutdown):
raise errors.OpPrereqError("Can not remove instance without shutting it"
" down before")
for disk in instance.disks:
self.cfg.SetDiskID(disk, src_node)
- activate_disks = (not instance.admin_up)
+ activate_disks = (instance.admin_state != constants.ADMINST_UP)
if activate_disks:
# Activate the instance disks if we'exporting a stopped instance
helper.CreateSnapshots()
try:
- if (self.op.shutdown and instance.admin_up and
+ if (self.op.shutdown and
+ instance.admin_state == constants.ADMINST_UP and
not self.op.remove_instance):
assert not activate_disks
feedback_fn("Starting instance %s" % instance.name)
- result = self.rpc.call_instance_start(src_node, instance,
- None, None, False)
+ result = self.rpc.call_instance_start(src_node,
+ (instance, None, None), False)
msg = result.fail_msg
if msg:
feedback_fn("Failed to start instance: %s" % msg)
if self.op.ndparams:
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
+ if self.op.hv_state:
+ self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
+ else:
+ self.new_hv_state = None
+
+ if self.op.disk_state:
+ self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
+ else:
+ self.new_disk_state = None
+
+ if self.op.diskparams:
+ for templ in constants.DISK_TEMPLATES:
+ if templ not in self.op.diskparams:
+ self.op.diskparams[templ] = {}
+ utils.ForceDictType(self.op.diskparams[templ], constants.DISK_DT_TYPES)
+ else:
+ self.op.diskparams = self.cfg.GetClusterInfo().diskparams
+
+ if self.op.ipolicy:
+ cluster = self.cfg.GetClusterInfo()
+ full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
+ objects.InstancePolicy.CheckParameterSyntax(full_ipolicy)
+
def BuildHooksEnv(self):
"""Build hooks env.
group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
uuid=self.group_uuid,
alloc_policy=self.op.alloc_policy,
- ndparams=self.op.ndparams)
+ ndparams=self.op.ndparams,
+ diskparams=self.op.diskparams,
+ ipolicy=self.op.ipolicy,
+ hv_state_static=self.new_hv_state,
+ disk_state_static=self.new_disk_state)
self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
del self.remove_locks[locking.LEVEL_NODEGROUP]
lu.needed_locks = {}
self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
+ self._cluster = lu.cfg.GetClusterInfo()
name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
if not self.names:
# Do not pass on node information if it was not requested.
group_to_nodes = None
- return query.GroupQueryData([self._all_groups[uuid]
+ return query.GroupQueryData(self._cluster,
+ [self._all_groups[uuid]
for uuid in self.wanted],
group_to_nodes, group_to_instances)
def CheckArguments(self):
all_changes = [
self.op.ndparams,
+ self.op.diskparams,
self.op.alloc_policy,
+ self.op.hv_state,
+ self.op.disk_state,
+ self.op.ipolicy,
]
if all_changes.count(None) == len(all_changes):
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
self.new_ndparams = new_ndparams
+ if self.op.diskparams:
+ self.new_diskparams = dict()
+ for templ in constants.DISK_TEMPLATES:
+ if templ not in self.op.diskparams:
+ self.op.diskparams[templ] = {}
+ new_templ_params = _GetUpdatedParams(self.group.diskparams[templ],
+ self.op.diskparams[templ])
+ utils.ForceDictType(new_templ_params, constants.DISK_DT_TYPES)
+ self.new_diskparams[templ] = new_templ_params
+
+ if self.op.hv_state:
+ self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
+ self.group.hv_state_static)
+
+ if self.op.disk_state:
+ self.new_disk_state = \
+ _MergeAndVerifyDiskState(self.op.disk_state,
+ self.group.disk_state_static)
+
+ if self.op.ipolicy:
+ g_ipolicy = {}
+ for key, value in self.op.ipolicy.iteritems():
+ g_ipolicy[key] = _GetUpdatedParams(self.group.ipolicy.get(key, {}),
+ value,
+ use_none=True)
+ utils.ForceDictType(g_ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
+ self.new_ipolicy = g_ipolicy
+ objects.InstancePolicy.CheckParameterSyntax(self.new_ipolicy)
+
def BuildHooksEnv(self):
"""Build hooks env.
self.group.ndparams = self.new_ndparams
result.append(("ndparams", str(self.group.ndparams)))
+ if self.op.diskparams:
+ self.group.diskparams = self.new_diskparams
+ result.append(("diskparams", str(self.group.diskparams)))
+
if self.op.alloc_policy:
self.group.alloc_policy = self.op.alloc_policy
+ if self.op.hv_state:
+ self.group.hv_state_static = self.new_hv_state
+
+ if self.op.disk_state:
+ self.group.disk_state_static = self.new_disk_state
+
+ if self.op.ipolicy:
+ self.group.ipolicy = self.new_ipolicy
+
self.cfg.Update(self.group, feedback_fn)
return result
# pylint: disable=R0902
# lots of instance attributes
- def __init__(self, cfg, rpc, mode, **kwargs):
+ def __init__(self, cfg, rpc_runner, mode, **kwargs):
self.cfg = cfg
- self.rpc = rpc
+ self.rpc = rpc_runner
# init buffer variables
self.in_text = self.out_text = self.in_data = self.out_data = None
# init all input fields so that pylint is happy
elif self.mode == constants.IALLOCATOR_MODE_RELOC:
hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
else:
- hypervisor_name = cluster_info.enabled_hypervisors[0]
+ hypervisor_name = cluster_info.primary_hypervisor
- node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
- hypervisor_name)
+ node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()],
+ [hypervisor_name])
node_iinfo = \
self.rpc.call_all_instances_info(node_list,
cluster_info.enabled_hypervisors)
@param node_results: the basic node structures as filled from the config
"""
+ #TODO(dynmem): compute the right data on MAX and MIN memory
# make a copy of the current dict
node_results = dict(node_results)
for nname, nresult in node_data.items():
nresult.Raise("Can't get data for node %s" % nname)
node_iinfo[nname].Raise("Can't get node instance info from node %s" %
nname)
- remote_info = nresult.payload
+ remote_info = _MakeLegacyNodeInfo(nresult.payload)
for attr in ["memory_total", "memory_free", "memory_dom0",
"vg_size", "vg_free", "cpu_total"]:
i_p_mem = i_p_up_mem = 0
for iinfo, beinfo in i_list:
if iinfo.primary_node == nname:
- i_p_mem += beinfo[constants.BE_MEMORY]
+ i_p_mem += beinfo[constants.BE_MAXMEM]
if iinfo.name not in node_iinfo[nname].payload:
i_used_mem = 0
else:
i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
- i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
+ i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
remote_info["memory_free"] -= max(0, i_mem_diff)
- if iinfo.admin_up:
- i_p_up_mem += beinfo[constants.BE_MEMORY]
+ if iinfo.admin_state == constants.ADMINST_UP:
+ i_p_up_mem += beinfo[constants.BE_MAXMEM]
# compute memory used by instances
pnr_dyn = {
nic_data.append(nic_dict)
pir = {
"tags": list(iinfo.GetTags()),
- "admin_up": iinfo.admin_up,
+ "admin_state": iinfo.admin_state,
"vcpus": beinfo[constants.BE_VCPUS],
- "memory": beinfo[constants.BE_MEMORY],
+ "memory": beinfo[constants.BE_MAXMEM],
"os": iinfo.os,
"nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
"nics": nic_data,
self._my_hostname = netutils.Hostname.GetSysName()
self._last_cluster_serial = -1
self._cfg_id = None
+ self._context = None
self._OpenConfig(accept_foreign)
+ def _GetRpc(self, address_list):
+ """Returns RPC runner for configuration.
+
+ """
+ return rpc.ConfigRunner(self._context, address_list)
+
+ def SetContext(self, context):
+ """Sets Ganeti context.
+
+ """
+ self._context = context
+
# this method needs to be static, so that we can call it on the class
@staticmethod
def IsCluster():
except errors.ConfigurationError, err:
result.append("%s has invalid nicparams: %s" % (owner, err))
+ def _helper_ipolicy(owner, params):
+ try:
+ objects.InstancePolicy.CheckParameterSyntax(params)
+ except errors.ConfigurationError, err:
+ result.append("%s has invalid instance policy: %s" % (owner, err))
+
+ def _helper_ispecs(owner, params):
+ for key, value in params.iteritems():
+ fullkey = "ipolicy/" + key
+ _helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES)
+
# check cluster parameters
_helper("cluster", "beparams", cluster.SimpleFillBE({}),
constants.BES_PARAMETER_TYPES)
_helper_nic("cluster", cluster.SimpleFillNIC({}))
_helper("cluster", "ndparams", cluster.SimpleFillND({}),
constants.NDS_PARAMETER_TYPES)
+ _helper_ipolicy("cluster", cluster.SimpleFillIPolicy({}))
+ _helper_ispecs("cluster", cluster.SimpleFillIPolicy({}))
# per-instance checks
for instance_name in data.instances:
result.append("duplicate node group name '%s'" % nodegroup.name)
else:
nodegroups_names.add(nodegroup.name)
+ group_name = "group %s" % nodegroup.name
+ _helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy))
+ _helper_ispecs(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy))
if nodegroup.ndparams:
- _helper("group %s" % nodegroup.name, "ndparams",
+ _helper(group_name, "ndparams",
cluster.SimpleFillND(nodegroup.ndparams),
constants.NDS_PARAMETER_TYPES)
return self._config_data.cluster.master_netdev
@locking.ssynchronized(_config_lock, shared=1)
+ def GetMasterNetmask(self):
+ """Get the netmask of the master node for this cluster.
+
+ """
+ return self._config_data.cluster.master_netmask
+
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetUseExternalMipScript(self):
+ """Get flag representing whether to use the external master IP setup script.
+
+ """
+ return self._config_data.cluster.use_external_mip_script
+
+ @locking.ssynchronized(_config_lock, shared=1)
def GetFileStorageDir(self):
"""Get the file storage dir for this cluster.
"""
return self._config_data.cluster.primary_ip_family
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetMasterNetworkParameters(self):
+ """Get network parameters of the master node.
+
+ @rtype: L{object.MasterNetworkParameters}
+ @return: network parameters of the master node
+
+ """
+ cluster = self._config_data.cluster
+ result = objects.MasterNetworkParameters(name=cluster.master_node,
+ ip=cluster.master_ip,
+ netmask=cluster.master_netmask,
+ netdev=cluster.master_netdev,
+ ip_family=cluster.primary_ip_family)
+
+ return result
+
@locking.ssynchronized(_config_lock)
def AddNodeGroup(self, group, ec_id, check_uuid=True):
"""Add a node group to the configuration.
"""Set the instance's status to a given value.
"""
- assert isinstance(status, bool), \
+ assert status in constants.ADMINST_ALL, \
"Invalid status '%s' passed to SetInstanceStatus" % (status,)
if instance_name not in self._config_data.instances:
raise errors.ConfigurationError("Unknown instance '%s'" %
instance_name)
instance = self._config_data.instances[instance_name]
- if instance.admin_up != status:
- instance.admin_up = status
+ if instance.admin_state != status:
+ instance.admin_state = status
instance.serial_no += 1
instance.mtime = time.time()
self._WriteConfig()
"""Mark the instance status to up in the config.
"""
- self._SetInstanceStatus(instance_name, True)
+ self._SetInstanceStatus(instance_name, constants.ADMINST_UP)
+
+ @locking.ssynchronized(_config_lock)
+ def MarkInstanceOffline(self, instance_name):
+ """Mark the instance status to down in the config.
+
+ """
+ self._SetInstanceStatus(instance_name, constants.ADMINST_OFFLINE)
@locking.ssynchronized(_config_lock)
def RemoveInstance(self, instance_name):
"""Mark the status of an instance to down in the configuration.
"""
- self._SetInstanceStatus(instance_name, False)
+ self._SetInstanceStatus(instance_name, constants.ADMINST_DOWN)
def _UnlockedGetInstanceList(self):
"""Get the list of instances.
for instance in self._UnlockedGetInstanceList()])
return my_dict
+ @locking.ssynchronized(_config_lock, shared=1)
+ def GetInstancesInfoByFilter(self, filter_fn):
+ """Get instance configuration with a filter.
+
+ @type filter_fn: callable
+ @param filter_fn: Filter function receiving instance object as parameter,
+ returning boolean. Important: this function is called while the
+ configuration locks is held. It must not do any complex work or call
+ functions potentially leading to a deadlock. Ideally it doesn't call any
+ other functions and just compares instance attributes.
+
+ """
+ return dict((name, inst)
+ for (name, inst) in self._config_data.instances.items()
+ if filter_fn(inst))
+
@locking.ssynchronized(_config_lock)
def AddNode(self, node, ec_id):
"""Add a node to the configuration.
would GetNodeInfo return for the node
"""
- my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
- for node in self._UnlockedGetNodeList()])
- return my_dict
+ return self._UnlockedGetAllNodesInfo()
+
+ def _UnlockedGetAllNodesInfo(self):
+ """Gets configuration of all nodes.
+
+ @note: See L{GetAllNodesInfo}
+
+ """
+ return dict([(node, self._UnlockedGetNodeInfo(node))
+ for node in self._UnlockedGetNodeList()])
@locking.ssynchronized(_config_lock, shared=1)
def GetNodeGroupsFromNodes(self, nodes):
# Update timestamps and serials (only once per node/group object)
now = time.time()
- for obj in frozenset(itertools.chain(*resmod)): # pylint: disable-msg=W0142
+ for obj in frozenset(itertools.chain(*resmod)): # pylint: disable=W0142
obj.serial_no += 1
obj.mtime = now
node_list.append(node_info.name)
addr_list.append(node_info.primary_ip)
- result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
- address_list=addr_list)
+ # TODO: Use dedicated resolver talking to config writer for name resolution
+ result = \
+ self._GetRpc(addr_list).call_upload_file(node_list, self._cfg_file)
for to_node, to_result in result.items():
msg = to_result.fail_msg
if msg:
# Write ssconf files on all nodes (including locally)
if self._last_cluster_serial < self._config_data.cluster.serial_no:
if not self._offline:
- result = rpc.RpcRunner.call_write_ssconf_files(
+ result = self._GetRpc(None).call_write_ssconf_files(
self._UnlockedGetOnlineNodeList(),
self._UnlockedGetSsconfValues())
constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
constants.SS_MASTER_IP: cluster.master_ip,
constants.SS_MASTER_NETDEV: cluster.master_netdev,
+ constants.SS_MASTER_NETMASK: str(cluster.master_netmask),
constants.SS_MASTER_NODE: cluster.master_node,
constants.SS_NODE_LIST: node_data,
constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
NODED_USER = _autoconf.NODED_USER
NODED_GROUP = _autoconf.NODED_GROUP
+# cpu pinning separators and constants
+CPU_PINNING_SEP = ":"
+CPU_PINNING_ALL = "all"
+# internal representation of "all"
+CPU_PINNING_ALL_VAL = -1
+# one "all" entry in a CPU list means CPU pinning is off
+CPU_PINNING_OFF = [CPU_PINNING_ALL_VAL]
+
+# A Xen-specific implementation detail - there is no way to actually say
+# "use any cpu for pinning" in a Xen configuration file, as opposed to the
+# command line, where you can say "xm vcpu-pin <domain> <vcpu> all".
+# The workaround used in Xen is "0-63" (see source code function
+# xm_vcpu_pin in <xen-source>/tools/python/xen/xm/main.py).
+# To support future changes, the following constant is treated as a
+# blackbox string that simply means use-any-cpu-for-pinning-under-xen.
+CPU_PINNING_ALL_XEN = "0-63"
+
+# A KVM-specific implementation detail - the following value is used
+# to set CPU affinity to all processors (#0 through #31), per taskset
+# man page.
+CPU_PINNING_ALL_KVM = 0xFFFFFFFF
# Wipe
DD_CMD = "dd"
NODED_CERT_FILE = DATA_DIR + "/server.pem"
RAPI_CERT_FILE = DATA_DIR + "/rapi.pem"
CONFD_HMAC_KEY = DATA_DIR + "/hmac.key"
+SPICE_CERT_FILE = DATA_DIR + "/spice.pem"
+SPICE_CACERT_FILE = DATA_DIR + "/spice-ca.pem"
CLUSTER_DOMAIN_SECRET_FILE = DATA_DIR + "/cluster-domain-secret"
INSTANCE_STATUS_FILE = RUN_GANETI_DIR + "/instance-status"
SSH_KNOWN_HOSTS_FILE = DATA_DIR + "/known_hosts"
SYSCONFDIR = _autoconf.SYSCONFDIR
TOOLSDIR = _autoconf.TOOLSDIR
CONF_DIR = SYSCONFDIR + "/ganeti"
+USER_SCRIPTS_DIR = CONF_DIR + "/scripts"
+ENABLE_CONFD = _autoconf.ENABLE_CONFD
#: Lock file for watcher, locked in shared mode by watcher; lock in exclusive
# mode to block watcher (see L{cli._RunWhileClusterStoppedHelper.Call}
#: File containing Unix timestamp until which watcher should be paused
WATCHER_PAUSEFILE = DATA_DIR + "/watcher.pause"
-ALL_CERT_FILES = frozenset([NODED_CERT_FILE, RAPI_CERT_FILE])
+# Master IP address setup scripts paths (default and user-provided)
+DEFAULT_MASTER_SETUP_SCRIPT = TOOLSDIR + "/master-ip-setup"
+EXTERNAL_MASTER_SETUP_SCRIPT = USER_SCRIPTS_DIR + "/master-ip-setup"
+
+ALL_CERT_FILES = frozenset([
+ NODED_CERT_FILE,
+ RAPI_CERT_FILE,
+ SPICE_CERT_FILE,
+ SPICE_CACERT_FILE,
+ ])
MASTER_SOCKET = SOCKET_DIR + "/ganeti-master"
LD_DRBD8 = "drbd8"
LD_FILE = "file"
LD_BLOCKDEV = "blockdev"
+LOGICAL_DISK_TYPES = frozenset([
+ LD_LV,
+ LD_DRBD8,
+ LD_FILE,
+ LD_BLOCKDEV,
+ ])
+
LDS_BLOCK = frozenset([LD_LV, LD_DRBD8, LD_BLOCKDEV])
# drbd constants
DRBD_HMAC_ALG = "md5"
DRBD_NET_PROTOCOL = "C"
-DRBD_BARRIERS = _autoconf.DRBD_BARRIERS
+
+# drbd barrier types
+DRBD_B_NONE = "n"
+DRBD_B_DISK_BARRIERS = "b"
+DRBD_B_DISK_DRAIN = "d"
+DRBD_B_DISK_FLUSH = "f"
+
+# Valid barrier combinations: "n" or any non-null subset of "bfd"
+DRBD_VALID_BARRIER_OPT = frozenset([
+ frozenset([DRBD_B_NONE]),
+ frozenset([DRBD_B_DISK_BARRIERS]),
+ frozenset([DRBD_B_DISK_DRAIN]),
+ frozenset([DRBD_B_DISK_FLUSH]),
+ frozenset([DRBD_B_DISK_DRAIN, DRBD_B_DISK_FLUSH]),
+ frozenset([DRBD_B_DISK_DRAIN, DRBD_B_DISK_FLUSH]),
+ frozenset([DRBD_B_DISK_BARRIERS, DRBD_B_DISK_DRAIN]),
+ frozenset([DRBD_B_DISK_BARRIERS, DRBD_B_DISK_FLUSH]),
+ frozenset([DRBD_B_DISK_BARRIERS, DRBD_B_DISK_FLUSH, DRBD_B_DISK_DRAIN]),
+ ])
# file backend driver
FD_LOOP = "loop"
# others
DEFAULT_BRIDGE = "xen-br0"
-SYNC_SPEED = 60 * 1024
+CLASSIC_DRBD_SYNC_SPEED = 60 * 1024 # 60 MiB, expressed in KiB
IP4_ADDRESS_LOCALHOST = "127.0.0.1"
IP4_ADDRESS_ANY = "0.0.0.0"
IP6_ADDRESS_LOCALHOST = "::1"
DEFAULT_DRBD_HELPER = "/bin/true"
MIN_VG_SIZE = 20480
DEFAULT_MAC_PREFIX = "aa:00:00"
-LVM_STRIPECOUNT = _autoconf.LVM_STRIPECOUNT
# default maximum instance wait time, in seconds.
DEFAULT_SHUTDOWN_TIMEOUT = 120
NODE_MAX_CLOCK_SKEW = 150
HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR = "spice_zlib_glz_wan_compression"
HV_KVM_SPICE_STREAMING_VIDEO_DETECTION = "spice_streaming_video"
HV_KVM_SPICE_AUDIO_COMPR = "spice_playback_compression"
+HV_KVM_SPICE_USE_TLS = "spice_use_tls"
+HV_KVM_SPICE_TLS_CIPHERS = "spice_tls_ciphers"
+HV_KVM_SPICE_USE_VDAGENT = "spice_use_vdagent"
HV_ACPI = "acpi"
HV_PAE = "pae"
HV_USE_BOOTLOADER = "use_bootloader"
HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR: VTYPE_STRING,
HV_KVM_SPICE_STREAMING_VIDEO_DETECTION: VTYPE_STRING,
HV_KVM_SPICE_AUDIO_COMPR: VTYPE_BOOL,
+ HV_KVM_SPICE_USE_TLS: VTYPE_BOOL,
+ HV_KVM_SPICE_TLS_CIPHERS: VTYPE_STRING,
+ HV_KVM_SPICE_USE_VDAGENT: VTYPE_BOOL,
HV_ACPI: VTYPE_BOOL,
HV_PAE: VTYPE_BOOL,
HV_USE_BOOTLOADER: VTYPE_BOOL,
HVS_PARAMETERS = frozenset(HVS_PARAMETER_TYPES.keys())
+# Migration statuses
+HV_MIGRATION_COMPLETED = "completed"
+HV_MIGRATION_ACTIVE = "active"
+HV_MIGRATION_FAILED = "failed"
+HV_MIGRATION_CANCELLED = "cancelled"
+
+HV_MIGRATION_VALID_STATUSES = frozenset([
+ HV_MIGRATION_COMPLETED,
+ HV_MIGRATION_ACTIVE,
+ HV_MIGRATION_FAILED,
+ HV_MIGRATION_CANCELLED,
+ ])
+
+HV_MIGRATION_FAILED_STATUSES = frozenset([
+ HV_MIGRATION_FAILED,
+ HV_MIGRATION_CANCELLED,
+ ])
+
+# KVM-specific statuses
+HV_KVM_MIGRATION_VALID_STATUSES = HV_MIGRATION_VALID_STATUSES
+
# Node info keys
HV_NODEINFO_KEY_VERSION = "hv_version"
+# Hypervisor state
+HVST_MEMORY_TOTAL = "mem_total"
+HVST_MEMORY_NODE = "mem_node"
+HVST_MEMORY_HV = "mem_hv"
+HVST_CPU_TOTAL = "cpu_total"
+HVST_CPU_NODE = "cpu_node"
+
+HVST_DEFAULTS = {
+ HVST_MEMORY_TOTAL: 0,
+ HVST_MEMORY_NODE: 0,
+ HVST_MEMORY_HV: 0,
+ HVST_CPU_TOTAL: 1,
+ HVST_CPU_NODE: 1,
+ }
+
+HVSTS_PARAMETER_TYPES = {
+ HVST_MEMORY_TOTAL: VTYPE_INT,
+ HVST_MEMORY_NODE: VTYPE_INT,
+ HVST_MEMORY_HV: VTYPE_INT,
+ HVST_CPU_TOTAL: VTYPE_INT,
+ HVST_CPU_NODE: VTYPE_INT,
+ }
+
+HVSTS_PARAMETERS = frozenset(HVSTS_PARAMETER_TYPES.keys())
+
+# Disk state
+DS_DISK_TOTAL = "disk_total"
+DS_DISK_RESERVED = "disk_reserved"
+DS_DISK_OVERHEAD = "disk_overhead"
+
+DS_DEFAULTS = {
+ DS_DISK_TOTAL: 0,
+ DS_DISK_RESERVED: 0,
+ DS_DISK_OVERHEAD: 0,
+ }
+
+DSS_PARAMETER_TYPES = {
+ DS_DISK_TOTAL: VTYPE_INT,
+ DS_DISK_RESERVED: VTYPE_INT,
+ DS_DISK_OVERHEAD: VTYPE_INT,
+ }
+
+DSS_PARAMETERS = frozenset(DSS_PARAMETER_TYPES.keys())
+DS_VALID_TYPES = frozenset([LD_LV])
+
# Backend parameter names
-BE_MEMORY = "memory"
+BE_MEMORY = "memory" # deprecated and replaced by max and min mem
+BE_MAXMEM = "maxmem"
+BE_MINMEM = "minmem"
BE_VCPUS = "vcpus"
BE_AUTO_BALANCE = "auto_balance"
+BE_ALWAYS_FAILOVER = "always_failover"
BES_PARAMETER_TYPES = {
- BE_MEMORY: VTYPE_SIZE,
- BE_VCPUS: VTYPE_INT,
- BE_AUTO_BALANCE: VTYPE_BOOL,
- }
+ BE_MAXMEM: VTYPE_SIZE,
+ BE_MINMEM: VTYPE_SIZE,
+ BE_VCPUS: VTYPE_INT,
+ BE_AUTO_BALANCE: VTYPE_BOOL,
+ BE_ALWAYS_FAILOVER: VTYPE_BOOL,
+ }
+
+BES_PARAMETER_COMPAT = {
+ BE_MEMORY: VTYPE_SIZE,
+ }
+BES_PARAMETER_COMPAT.update(BES_PARAMETER_TYPES)
BES_PARAMETERS = frozenset(BES_PARAMETER_TYPES.keys())
+# instance specs
+ISPEC_MEM_SIZE = "memory-size"
+ISPEC_CPU_COUNT = "cpu-count"
+ISPEC_DISK_COUNT = "disk-count"
+ISPEC_DISK_SIZE = "disk-size"
+ISPEC_NIC_COUNT = "nic-count"
+
+ISPECS_PARAMETER_TYPES = {
+ ISPEC_MEM_SIZE: VTYPE_INT,
+ ISPEC_CPU_COUNT: VTYPE_INT,
+ ISPEC_DISK_COUNT: VTYPE_INT,
+ ISPEC_DISK_SIZE: VTYPE_INT,
+ ISPEC_NIC_COUNT: VTYPE_INT,
+ }
+
+ISPECS_PARAMETERS = frozenset(ISPECS_PARAMETER_TYPES.keys())
+
+ISPECS_MIN = "min"
+ISPECS_MAX = "max"
+ISPECS_STD = "std"
+
+IPOLICY_PARAMETERS = frozenset([
+ ISPECS_MIN,
+ ISPECS_MAX,
+ ISPECS_STD,
+ ])
+
# Node parameter names
ND_OOB_PROGRAM = "oob_program"
NDS_PARAMETER_TYPES = {
- ND_OOB_PROGRAM: VTYPE_MAYBE_STRING,
- }
+ ND_OOB_PROGRAM: VTYPE_MAYBE_STRING,
+ }
NDS_PARAMETERS = frozenset(NDS_PARAMETER_TYPES.keys())
+# Logical Disks parameters
+LDP_RESYNC_RATE = "resync-rate"
+LDP_STRIPES = "stripes"
+LDP_BARRIERS = "disabled-barriers"
+LDP_NO_META_FLUSH = "disable-meta-flush"
+LDP_DEFAULT_METAVG = "default-metavg"
+LDP_DISK_CUSTOM = "disk-custom"
+LDP_NET_CUSTOM = "net-custom"
+LDP_DYNAMIC_RESYNC = "dynamic-resync"
+LDP_PLAN_AHEAD = "c-plan-ahead"
+LDP_FILL_TARGET = "c-fill-target"
+LDP_DELAY_TARGET = "c-delay-target"
+LDP_MAX_RATE = "c-max-rate"
+LDP_MIN_RATE = "c-min-rate"
+DISK_LD_TYPES = {
+ LDP_RESYNC_RATE: VTYPE_INT,
+ LDP_STRIPES: VTYPE_INT,
+ LDP_BARRIERS: VTYPE_STRING,
+ LDP_NO_META_FLUSH: VTYPE_BOOL,
+ LDP_DEFAULT_METAVG: VTYPE_STRING,
+ LDP_DISK_CUSTOM: VTYPE_STRING,
+ LDP_NET_CUSTOM: VTYPE_STRING,
+ LDP_DYNAMIC_RESYNC: VTYPE_BOOL,
+ LDP_PLAN_AHEAD: VTYPE_INT,
+ LDP_FILL_TARGET: VTYPE_INT,
+ LDP_DELAY_TARGET: VTYPE_INT,
+ LDP_MAX_RATE: VTYPE_INT,
+ LDP_MIN_RATE: VTYPE_INT,
+ }
+DISK_LD_PARAMETERS = frozenset(DISK_LD_TYPES.keys())
+
+# Disk template parameters (can be set/changed by the user via gnt-cluster and
+# gnt-group)
+DRBD_RESYNC_RATE = "resync-rate"
+DRBD_DATA_STRIPES = "data-stripes"
+DRBD_META_STRIPES = "meta-stripes"
+DRBD_DISK_BARRIERS = "disk-barriers"
+DRBD_META_BARRIERS = "meta-barriers"
+DRBD_DEFAULT_METAVG = "metavg"
+DRBD_DISK_CUSTOM = "disk-custom"
+DRBD_NET_CUSTOM = "net-custom"
+DRBD_DYNAMIC_RESYNC = "dynamic-resync"
+DRBD_PLAN_AHEAD = "c-plan-ahead"
+DRBD_FILL_TARGET = "c-fill-target"
+DRBD_DELAY_TARGET = "c-delay-target"
+DRBD_MAX_RATE = "c-max-rate"
+DRBD_MIN_RATE = "c-min-rate"
+LV_STRIPES = "stripes"
+DISK_DT_TYPES = {
+ DRBD_RESYNC_RATE: VTYPE_INT,
+ DRBD_DATA_STRIPES: VTYPE_INT,
+ DRBD_META_STRIPES: VTYPE_INT,
+ DRBD_DISK_BARRIERS: VTYPE_STRING,
+ DRBD_META_BARRIERS: VTYPE_BOOL,
+ DRBD_DEFAULT_METAVG: VTYPE_STRING,
+ DRBD_DISK_CUSTOM: VTYPE_STRING,
+ DRBD_NET_CUSTOM: VTYPE_STRING,
+ DRBD_DYNAMIC_RESYNC: VTYPE_BOOL,
+ DRBD_PLAN_AHEAD: VTYPE_INT,
+ DRBD_FILL_TARGET: VTYPE_INT,
+ DRBD_DELAY_TARGET: VTYPE_INT,
+ DRBD_MAX_RATE: VTYPE_INT,
+ DRBD_MIN_RATE: VTYPE_INT,
+ LV_STRIPES: VTYPE_INT,
+ }
+
+DISK_DT_PARAMETERS = frozenset(DISK_DT_TYPES.keys())
+
# OOB supported commands
OOB_POWER_ON = "power-on"
OOB_POWER_OFF = "power-off"
NIC_VALID_MODES = frozenset([NIC_MODE_BRIDGED, NIC_MODE_ROUTED])
NICS_PARAMETER_TYPES = {
- NIC_MODE: VTYPE_STRING,
- NIC_LINK: VTYPE_STRING,
- }
+ NIC_MODE: VTYPE_STRING,
+ NIC_LINK: VTYPE_STRING,
+ }
NICS_PARAMETERS = frozenset(NICS_PARAMETER_TYPES.keys())
VERIFY_NPLUSONE_MEM = "nplusone_mem"
VERIFY_OPTIONAL_CHECKS = frozenset([VERIFY_NPLUSONE_MEM])
+# Cluster Verify error classes
+CV_TCLUSTER = "cluster"
+CV_TNODE = "node"
+CV_TINSTANCE = "instance"
+
+# Cluster Verify error codes and documentation
+CV_ECLUSTERCFG = \
+ (CV_TCLUSTER, "ECLUSTERCFG", "Cluster configuration verification failure")
+CV_ECLUSTERCERT = \
+ (CV_TCLUSTER, "ECLUSTERCERT",
+ "Cluster certificate files verification failure")
+CV_ECLUSTERFILECHECK = \
+ (CV_TCLUSTER, "ECLUSTERFILECHECK",
+ "Cluster configuration verification failure")
+CV_ECLUSTERDANGLINGNODES = \
+ (CV_TNODE, "ECLUSTERDANGLINGNODES",
+ "Some nodes belong to non-existing groups")
+CV_ECLUSTERDANGLINGINST = \
+ (CV_TNODE, "ECLUSTERDANGLINGINST",
+ "Some instances have a non-existing primary node")
+CV_EINSTANCEBADNODE = \
+ (CV_TINSTANCE, "EINSTANCEBADNODE",
+ "Instance marked as running lives on an offline node")
+CV_EINSTANCEDOWN = \
+ (CV_TINSTANCE, "EINSTANCEDOWN", "Instance not running on its primary node")
+CV_EINSTANCELAYOUT = \
+ (CV_TINSTANCE, "EINSTANCELAYOUT", "Instance has multiple secondary nodes")
+CV_EINSTANCEMISSINGDISK = \
+ (CV_TINSTANCE, "EINSTANCEMISSINGDISK", "Missing volume on an instance")
+CV_EINSTANCEFAULTYDISK = \
+ (CV_TINSTANCE, "EINSTANCEFAULTYDISK",
+ "Impossible to retrieve status for a disk")
+CV_EINSTANCEWRONGNODE = \
+ (CV_TINSTANCE, "EINSTANCEWRONGNODE", "Instance running on the wrong node")
+CV_EINSTANCESPLITGROUPS = \
+ (CV_TINSTANCE, "EINSTANCESPLITGROUPS",
+ "Instance with primary and secondary nodes in different groups")
+CV_EINSTANCEPOLICY = \
+ (CV_TINSTANCE, "EINSTANCEPOLICY",
+ "Instance does not meet policy")
+CV_ENODEDRBD = \
+ (CV_TNODE, "ENODEDRBD", "Error parsing the DRBD status file")
+CV_ENODEDRBDHELPER = \
+ (CV_TNODE, "ENODEDRBDHELPER", "Error caused by the DRBD helper")
+CV_ENODEFILECHECK = \
+ (CV_TNODE, "ENODEFILECHECK",
+ "Error retrieving the checksum of the node files")
+CV_ENODEHOOKS = \
+ (CV_TNODE, "ENODEHOOKS", "Communication failure in hooks execution")
+CV_ENODEHV = \
+ (CV_TNODE, "ENODEHV", "Hypervisor parameters verification failure")
+CV_ENODELVM = \
+ (CV_TNODE, "ENODELVM", "LVM-related node error")
+CV_ENODEN1 = \
+ (CV_TNODE, "ENODEN1", "Not enough memory to accommodate instance failovers")
+CV_ENODENET = \
+ (CV_TNODE, "ENODENET", "Network-related node error")
+CV_ENODEOS = \
+ (CV_TNODE, "ENODEOS", "OS-related node error")
+CV_ENODEORPHANINSTANCE = \
+ (CV_TNODE, "ENODEORPHANINSTANCE", "Unknown intance running on a node")
+CV_ENODEORPHANLV = \
+ (CV_TNODE, "ENODEORPHANLV", "Unknown LVM logical volume")
+CV_ENODERPC = \
+ (CV_TNODE, "ENODERPC",
+ "Error during connection to the primary node of an instance")
+CV_ENODESSH = \
+ (CV_TNODE, "ENODESSH", "SSH-related node error")
+CV_ENODEVERSION = \
+ (CV_TNODE, "ENODEVERSION",
+ "Protocol version mismatch or Ganeti version mismatch")
+CV_ENODESETUP = \
+ (CV_TNODE, "ENODESETUP", "Node setup error")
+CV_ENODETIME = \
+ (CV_TNODE, "ENODETIME", "Node returned invalid time")
+CV_ENODEOOBPATH = \
+ (CV_TNODE, "ENODEOOBPATH", "Invalid Out Of Band path")
+CV_ENODEUSERSCRIPTS = \
+ (CV_TNODE, "ENODEUSERSCRIPTS", "User scripts not present or not executable")
+
+CV_ALL_ECODES = frozenset([
+ CV_ECLUSTERCFG,
+ CV_ECLUSTERCERT,
+ CV_ECLUSTERFILECHECK,
+ CV_ECLUSTERDANGLINGNODES,
+ CV_ECLUSTERDANGLINGINST,
+ CV_EINSTANCEBADNODE,
+ CV_EINSTANCEDOWN,
+ CV_EINSTANCELAYOUT,
+ CV_EINSTANCEMISSINGDISK,
+ CV_EINSTANCEFAULTYDISK,
+ CV_EINSTANCEWRONGNODE,
+ CV_EINSTANCESPLITGROUPS,
+ CV_EINSTANCEPOLICY,
+ CV_ENODEDRBD,
+ CV_ENODEDRBDHELPER,
+ CV_ENODEFILECHECK,
+ CV_ENODEHOOKS,
+ CV_ENODEHV,
+ CV_ENODELVM,
+ CV_ENODEN1,
+ CV_ENODENET,
+ CV_ENODEOS,
+ CV_ENODEORPHANINSTANCE,
+ CV_ENODEORPHANLV,
+ CV_ENODERPC,
+ CV_ENODESSH,
+ CV_ENODEVERSION,
+ CV_ENODESETUP,
+ CV_ENODETIME,
+ CV_ENODEOOBPATH,
+ CV_ENODEUSERSCRIPTS,
+ ])
+
+CV_ALL_ECODES_STRINGS = frozenset(estr for (_, estr, _) in CV_ALL_ECODES)
+
# Node verify constants
NV_DRBDHELPER = "drbd-helper"
NV_DRBDLIST = "drbd-list"
NV_VMNODES = "vmnodes"
NV_OOB_PATHS = "oob-paths"
NV_BRIDGES = "bridges"
+NV_USERSCRIPTS = "user-scripts"
# Instance status
INSTST_RUNNING = "running"
INSTST_ADMINDOWN = "ADMIN_down"
+INSTST_ADMINOFFLINE = "ADMIN_offline"
INSTST_NODEOFFLINE = "ERROR_nodeoffline"
INSTST_NODEDOWN = "ERROR_nodedown"
INSTST_WRONGNODE = "ERROR_wrongnode"
INSTST_ALL = frozenset([
INSTST_RUNNING,
INSTST_ADMINDOWN,
+ INSTST_ADMINOFFLINE,
INSTST_NODEOFFLINE,
INSTST_NODEDOWN,
INSTST_WRONGNODE,
INSTST_ERRORDOWN,
])
+# Admin states
+ADMINST_UP = "up"
+ADMINST_DOWN = "down"
+ADMINST_OFFLINE = "offline"
+ADMINST_ALL = frozenset([
+ ADMINST_UP,
+ ADMINST_DOWN,
+ ADMINST_OFFLINE,
+ ])
+
# Node roles
NR_REGULAR = "R"
NR_MASTER = "M"
SS_MASTER_CANDIDATES_IPS = "master_candidates_ips"
SS_MASTER_IP = "master_ip"
SS_MASTER_NETDEV = "master_netdev"
+SS_MASTER_NETMASK = "master_netmask"
SS_MASTER_NODE = "master_node"
SS_NODE_LIST = "node_list"
SS_NODE_PRIMARY_IPS = "node_primary_ips"
HV_MIGRATION_MODE: HT_MIGRATION_LIVE,
HV_BLOCKDEV_PREFIX: "sd",
HV_REBOOT_BEHAVIOR: INSTANCE_REBOOT_ALLOWED,
+ HV_CPU_MASK: CPU_PINNING_ALL,
},
HT_XEN_HVM: {
HV_BOOT_ORDER: "cd",
HV_USE_LOCALTIME: False,
HV_BLOCKDEV_PREFIX: "hd",
HV_REBOOT_BEHAVIOR: INSTANCE_REBOOT_ALLOWED,
+ HV_CPU_MASK: CPU_PINNING_ALL,
},
HT_KVM: {
HV_KERNEL_PATH: "/boot/vmlinuz-2.6-kvmU",
HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR: "",
HV_KVM_SPICE_STREAMING_VIDEO_DETECTION: "",
HV_KVM_SPICE_AUDIO_COMPR: True,
+ HV_KVM_SPICE_USE_TLS: False,
+ HV_KVM_SPICE_TLS_CIPHERS: OPENSSL_CIPHERS,
+ HV_KVM_SPICE_USE_VDAGENT: True,
HV_KVM_FLOPPY_IMAGE_PATH: "",
HV_CDROM_IMAGE_PATH: "",
HV_KVM_CDROM2_IMAGE_PATH: "",
HV_KVM_USE_CHROOT: False,
HV_MEM_PATH: "",
HV_REBOOT_BEHAVIOR: INSTANCE_REBOOT_ALLOWED,
+ HV_CPU_MASK: CPU_PINNING_ALL,
},
HT_FAKE: {
},
])
BEC_DEFAULTS = {
- BE_MEMORY: 128,
+ BE_MINMEM: 128,
+ BE_MAXMEM: 128,
BE_VCPUS: 1,
BE_AUTO_BALANCE: True,
+ BE_ALWAYS_FAILOVER: False,
}
NDC_DEFAULTS = {
ND_OOB_PROGRAM: None,
}
+DISK_LD_DEFAULTS = {
+ LD_DRBD8: {
+ LDP_RESYNC_RATE: CLASSIC_DRBD_SYNC_SPEED,
+ LDP_BARRIERS: _autoconf.DRBD_BARRIERS,
+ LDP_NO_META_FLUSH: _autoconf.DRBD_NO_META_FLUSH,
+ LDP_DEFAULT_METAVG: DEFAULT_VG,
+ LDP_DISK_CUSTOM: "",
+ LDP_NET_CUSTOM: "",
+ LDP_DYNAMIC_RESYNC: False,
+
+ # The default values for the DRBD dynamic resync speed algorithm are taken
+ # from the drbsetup 8.3.11 man page, except for c-plan-ahead (that we
+ # don't need to set to 0, because we have a separate option to enable it)
+ # and for c-max-rate, that we cap to the default value for the static resync
+ # rate.
+ LDP_PLAN_AHEAD: 20, # ds
+ LDP_FILL_TARGET: 0, # sectors
+ LDP_DELAY_TARGET: 1, # ds
+ LDP_MAX_RATE: CLASSIC_DRBD_SYNC_SPEED, # KiB/s
+ LDP_MIN_RATE: 4 * 1024, # KiB/s
+ },
+ LD_LV: {
+ LDP_STRIPES: _autoconf.LVM_STRIPECOUNT
+ },
+ LD_FILE: {
+ },
+ LD_BLOCKDEV: {
+ },
+ }
+
+# readability shortcuts
+_LV_DEFAULTS = DISK_LD_DEFAULTS[LD_LV]
+_DRBD_DEFAULTS = DISK_LD_DEFAULTS[LD_DRBD8]
+
+DISK_DT_DEFAULTS = {
+ DT_PLAIN: {
+ LV_STRIPES: DISK_LD_DEFAULTS[LD_LV][LDP_STRIPES],
+ },
+ DT_DRBD8: {
+ DRBD_RESYNC_RATE: _DRBD_DEFAULTS[LDP_RESYNC_RATE],
+ DRBD_DATA_STRIPES: _LV_DEFAULTS[LDP_STRIPES],
+ DRBD_META_STRIPES: _LV_DEFAULTS[LDP_STRIPES],
+ DRBD_DISK_BARRIERS: _DRBD_DEFAULTS[LDP_BARRIERS],
+ DRBD_META_BARRIERS: _DRBD_DEFAULTS[LDP_NO_META_FLUSH],
+ DRBD_DEFAULT_METAVG: _DRBD_DEFAULTS[LDP_DEFAULT_METAVG],
+ DRBD_DISK_CUSTOM: _DRBD_DEFAULTS[LDP_DISK_CUSTOM],
+ DRBD_NET_CUSTOM: _DRBD_DEFAULTS[LDP_NET_CUSTOM],
+ DRBD_DYNAMIC_RESYNC: _DRBD_DEFAULTS[LDP_DYNAMIC_RESYNC],
+ DRBD_PLAN_AHEAD: _DRBD_DEFAULTS[LDP_PLAN_AHEAD],
+ DRBD_FILL_TARGET: _DRBD_DEFAULTS[LDP_FILL_TARGET],
+ DRBD_DELAY_TARGET: _DRBD_DEFAULTS[LDP_DELAY_TARGET],
+ DRBD_MAX_RATE: _DRBD_DEFAULTS[LDP_MAX_RATE],
+ DRBD_MIN_RATE: _DRBD_DEFAULTS[LDP_MIN_RATE],
+ },
+ DT_DISKLESS: {
+ },
+ DT_FILE: {
+ },
+ DT_SHARED_FILE: {
+ },
+ DT_BLOCK: {
+ },
+ }
+
+# we don't want to export the shortcuts
+del _LV_DEFAULTS, _DRBD_DEFAULTS
+
NICC_DEFAULTS = {
NIC_MODE: NIC_MODE_BRIDGED,
NIC_LINK: DEFAULT_BRIDGE,
}
+IPOLICY_DEFAULTS = {
+ ISPECS_MIN: {
+ ISPEC_MEM_SIZE: 128,
+ ISPEC_CPU_COUNT: 1,
+ ISPEC_DISK_COUNT: 1,
+ ISPEC_DISK_SIZE: 1024,
+ ISPEC_NIC_COUNT: 1,
+ },
+ ISPECS_MAX: {
+ ISPEC_MEM_SIZE: 128,
+ ISPEC_CPU_COUNT: 1,
+ ISPEC_DISK_COUNT: 1,
+ ISPEC_DISK_SIZE: 1024,
+ ISPEC_NIC_COUNT: 1,
+ },
+ ISPECS_STD: {
+ ISPEC_MEM_SIZE: 128,
+ ISPEC_CPU_COUNT: 1,
+ ISPEC_DISK_COUNT: 1,
+ ISPEC_DISK_SIZE: 1024,
+ ISPEC_NIC_COUNT: 1,
+ }
+ }
+
MASTER_POOL_SIZE_DEFAULT = 10
CONFD_PROTOCOL_VERSION = 1
# Temporary external/shared storage parameters
BLOCKDEV_DRIVER_MANUAL = "manual"
+# qemu-img path, required for ovfconverter
+QEMUIMG_PATH = _autoconf.QEMUIMG_PATH
+
# Whether htools was enabled at compilation time
HTOOLS = _autoconf.HTOOLS
# The hail iallocator
# backend.RunLocalHooks
FAKE_OP_MASTER_TURNUP = "OP_CLUSTER_IP_TURNUP"
FAKE_OP_MASTER_TURNDOWN = "OP_CLUSTER_IP_TURNDOWN"
+
+# Do not re-export imported modules
+del re, _vcsversion, _autoconf
"""
def __init__(self, timefunc):
- sched.scheduler.__init__(self, timefunc, AsyncoreDelayFunction)
+ """Initializes this class.
+
+ """
+ sched.scheduler.__init__(self, timefunc, self._LimitedDelay)
+ self._max_delay = None
+
+ def run(self, max_delay=None): # pylint: disable=W0221
+ """Run any pending events.
+
+ @type max_delay: None or number
+ @param max_delay: Maximum delay (useful if caller has timeouts running)
+
+ """
+ assert self._max_delay is None
+
+ # The delay function used by the scheduler can't be different on each run,
+ # hence an instance variable must be used.
+ if max_delay is None:
+ self._max_delay = None
+ else:
+ self._max_delay = utils.RunningTimeout(max_delay, False)
+
+ try:
+ return sched.scheduler.run(self)
+ finally:
+ self._max_delay = None
+
+ def _LimitedDelay(self, duration):
+ """Custom delay function for C{sched.scheduler}.
+
+ """
+ if self._max_delay is None:
+ timeout = duration
+ else:
+ timeout = min(duration, self._max_delay.Remaining())
+
+ return AsyncoreDelayFunction(timeout)
class GanetiBaseAsyncoreDispatcher(asyncore.dispatcher):
self.out_socket.send("\0")
+class _ShutdownCheck:
+ """Logic for L{Mainloop} shutdown.
+
+ """
+ def __init__(self, fn):
+ """Initializes this class.
+
+ @type fn: callable
+ @param fn: Function returning C{None} if mainloop can be stopped or a
+ duration in seconds after which the function should be called again
+ @see: L{Mainloop.Run}
+
+ """
+ assert callable(fn)
+
+ self._fn = fn
+ self._defer = None
+
+ def CanShutdown(self):
+ """Checks whether mainloop can be stopped.
+
+ @rtype: bool
+
+ """
+ if self._defer and self._defer.Remaining() > 0:
+ # A deferred check has already been scheduled
+ return False
+
+ # Ask mainloop driver whether we can stop or should check again
+ timeout = self._fn()
+
+ if timeout is None:
+ # Yes, can stop mainloop
+ return True
+
+ # Schedule another check in the future
+ self._defer = utils.RunningTimeout(timeout, True)
+
+ return False
+
+
class Mainloop(object):
"""Generic mainloop for daemons
timed events
"""
+ _SHUTDOWN_TIMEOUT_PRIORITY = -(sys.maxint - 1)
+
def __init__(self):
"""Constructs a new Mainloop instance.
@utils.SignalHandled([signal.SIGCHLD])
@utils.SignalHandled([signal.SIGTERM])
@utils.SignalHandled([signal.SIGINT])
- def Run(self, signal_handlers=None):
+ def Run(self, shutdown_wait_fn=None, signal_handlers=None):
"""Runs the mainloop.
+ @type shutdown_wait_fn: callable
+ @param shutdown_wait_fn: Function to check whether loop can be terminated;
+ B{important}: function must be idempotent and must return either None
+ for shutting down or a timeout for another call
@type signal_handlers: dict
@param signal_handlers: signal->L{utils.SignalHandler} passed by decorator
assert isinstance(signal_handlers, dict) and \
len(signal_handlers) > 0, \
"Broken SignalHandled decorator"
- running = True
+
+ # Counter for received signals
+ shutdown_signals = 0
+
+ # Logic to wait for shutdown
+ shutdown_waiter = None
# Start actual main loop
- while running:
- if not self.scheduler.empty():
+ while True:
+ if shutdown_signals == 1 and shutdown_wait_fn is not None:
+ if shutdown_waiter is None:
+ shutdown_waiter = _ShutdownCheck(shutdown_wait_fn)
+
+ # Let mainloop driver decide if we can already abort
+ if shutdown_waiter.CanShutdown():
+ break
+
+ # Re-evaluate in a second
+ timeout = 1.0
+
+ elif shutdown_signals >= 1:
+ # Abort loop if more than one signal has been sent or no callback has
+ # been given
+ break
+
+ else:
+ # Wait forever on I/O events
+ timeout = None
+
+ if self.scheduler.empty():
+ asyncore.loop(count=1, timeout=timeout, use_poll=True)
+ else:
try:
- self.scheduler.run()
+ self.scheduler.run(max_delay=timeout)
except SchedulerBreakout:
pass
- else:
- asyncore.loop(count=1, use_poll=True)
# Check whether a signal was raised
- for sig in signal_handlers:
- handler = signal_handlers[sig]
+ for (sig, handler) in signal_handlers.items():
if handler.called:
self._CallSignalWaiters(sig)
- running = sig not in (signal.SIGTERM, signal.SIGINT)
+ if sig in (signal.SIGTERM, signal.SIGINT):
+ logging.info("Received signal %s asking for shutdown", sig)
+ shutdown_signals += 1
handler.Clear()
def _CallSignalWaiters(self, signum):
"""
+class X509CertError(GenericError):
+ """Invalid X509 certificate.
+
+ This error has two arguments: the certificate filename and the error cause.
+
+ """
+
+
class TagError(GenericError):
"""Generic tag error.
import logging
import pycurl
+import threading
from cStringIO import StringIO
from ganeti import http
from ganeti import compat
from ganeti import netutils
+from ganeti import locking
class HttpClientRequest(object):
def __init__(self, host, port, method, path, headers=None, post_data=None,
- read_timeout=None, curl_config_fn=None):
+ read_timeout=None, curl_config_fn=None, nicename=None,
+ completion_cb=None):
"""Describes an HTTP request.
@type host: string
timeout while reading the response from the server
@type curl_config_fn: callable
@param curl_config_fn: Function to configure cURL object before request
- (Note: if the function configures the connection in
- a way where it wouldn't be efficient to reuse them,
- a "identity" property should be defined, see
- L{HttpClientRequest.identity})
+ @type nicename: string
+ @param nicename: Name, presentable to a user, to describe this request (no
+ whitespace)
+ @type completion_cb: callable accepting this request object as a single
+ parameter
+ @param completion_cb: Callback for request completion
"""
assert path.startswith("/"), "Path must start with slash (/)"
assert curl_config_fn is None or callable(curl_config_fn)
+ assert completion_cb is None or callable(completion_cb)
# Request attributes
self.host = host
self.path = path
self.read_timeout = read_timeout
self.curl_config_fn = curl_config_fn
+ self.nicename = nicename
+ self.completion_cb = completion_cb
if post_data is None:
self.post_data = ""
# TODO: Support for non-SSL requests
return "https://%s%s" % (address, self.path)
- @property
- def identity(self):
- """Returns identifier for retrieving a pooled connection for this request.
- This allows cURL client objects to be re-used and to cache information
- (e.g. SSL session IDs or connections).
+def _StartRequest(curl, req):
+ """Starts a request on a cURL object.
- """
- parts = [self.host, self.port]
+ @type curl: pycurl.Curl
+ @param curl: cURL object
+ @type req: L{HttpClientRequest}
+ @param req: HTTP request
- if self.curl_config_fn:
- try:
- parts.append(self.curl_config_fn.identity)
- except AttributeError:
- pass
+ """
+ logging.debug("Starting request %r", req)
- return "/".join(str(i) for i in parts)
+ url = req.url
+ method = req.method
+ post_data = req.post_data
+ headers = req.headers
+ # PycURL requires strings to be non-unicode
+ assert isinstance(method, str)
+ assert isinstance(url, str)
+ assert isinstance(post_data, str)
+ assert compat.all(isinstance(i, str) for i in headers)
-class _HttpClient(object):
- def __init__(self, curl_config_fn):
- """Initializes this class.
+ # Buffer for response
+ resp_buffer = StringIO()
- @type curl_config_fn: callable
- @param curl_config_fn: Function to configure cURL object after
- initialization
+ # Configure client for request
+ curl.setopt(pycurl.VERBOSE, False)
+ curl.setopt(pycurl.NOSIGNAL, True)
+ curl.setopt(pycurl.USERAGENT, http.HTTP_GANETI_VERSION)
+ curl.setopt(pycurl.PROXY, "")
+ curl.setopt(pycurl.CUSTOMREQUEST, str(method))
+ curl.setopt(pycurl.URL, url)
+ curl.setopt(pycurl.POSTFIELDS, post_data)
+ curl.setopt(pycurl.HTTPHEADER, headers)
- """
- self._req = None
+ if req.read_timeout is None:
+ curl.setopt(pycurl.TIMEOUT, 0)
+ else:
+ curl.setopt(pycurl.TIMEOUT, int(req.read_timeout))
- curl = self._CreateCurlHandle()
- curl.setopt(pycurl.VERBOSE, False)
- curl.setopt(pycurl.NOSIGNAL, True)
- curl.setopt(pycurl.USERAGENT, http.HTTP_GANETI_VERSION)
- curl.setopt(pycurl.PROXY, "")
+ # Disable SSL session ID caching (pycurl >= 7.16.0)
+ if hasattr(pycurl, "SSL_SESSIONID_CACHE"):
+ curl.setopt(pycurl.SSL_SESSIONID_CACHE, False)
- # Disable SSL session ID caching (pycurl >= 7.16.0)
- if hasattr(pycurl, "SSL_SESSIONID_CACHE"):
- curl.setopt(pycurl.SSL_SESSIONID_CACHE, False)
+ curl.setopt(pycurl.WRITEFUNCTION, resp_buffer.write)
- # Pass cURL object to external config function
- if curl_config_fn:
- curl_config_fn(curl)
+ # Pass cURL object to external config function
+ if req.curl_config_fn:
+ req.curl_config_fn(curl)
- self._curl = curl
+ return _PendingRequest(curl, req, resp_buffer.getvalue)
- @staticmethod
- def _CreateCurlHandle():
- """Returns a new cURL object.
+
+class _PendingRequest:
+ def __init__(self, curl, req, resp_buffer_read):
+ """Initializes this class.
+
+ @type curl: pycurl.Curl
+ @param curl: cURL object
+ @type req: L{HttpClientRequest}
+ @param req: HTTP request
+ @type resp_buffer_read: callable
+ @param resp_buffer_read: Function to read response body
"""
- return pycurl.Curl()
+ assert req.success is None
+
+ self._curl = curl
+ self._req = req
+ self._resp_buffer_read = resp_buffer_read
def GetCurlHandle(self):
"""Returns the cURL object.
def GetCurrentRequest(self):
"""Returns the current request.
- @rtype: L{HttpClientRequest} or None
-
"""
return self._req
- def StartRequest(self, req):
- """Starts a request on this client.
-
- @type req: L{HttpClientRequest}
- @param req: HTTP request
-
- """
- assert not self._req, "Another request is already started"
-
- logging.debug("Starting request %r", req)
-
- self._req = req
- self._resp_buffer = StringIO()
-
- url = req.url
- method = req.method
- post_data = req.post_data
- headers = req.headers
-
- # PycURL requires strings to be non-unicode
- assert isinstance(method, str)
- assert isinstance(url, str)
- assert isinstance(post_data, str)
- assert compat.all(isinstance(i, str) for i in headers)
-
- # Configure cURL object for request
- curl = self._curl
- curl.setopt(pycurl.CUSTOMREQUEST, str(method))
- curl.setopt(pycurl.URL, url)
- curl.setopt(pycurl.POSTFIELDS, post_data)
- curl.setopt(pycurl.WRITEFUNCTION, self._resp_buffer.write)
- curl.setopt(pycurl.HTTPHEADER, headers)
-
- if req.read_timeout is None:
- curl.setopt(pycurl.TIMEOUT, 0)
- else:
- curl.setopt(pycurl.TIMEOUT, int(req.read_timeout))
-
- # Pass cURL object to external config function
- if req.curl_config_fn:
- req.curl_config_fn(curl)
-
def Done(self, errmsg):
"""Finishes a request.
@param errmsg: Error message if request failed
"""
+ curl = self._curl
req = self._req
- assert req, "No request"
- logging.debug("Request %s finished, errmsg=%s", req, errmsg)
+ assert req.success is None, "Request has already been finalized"
- curl = self._curl
+ logging.debug("Request %s finished, errmsg=%s", req, errmsg)
req.success = not bool(errmsg)
req.error = errmsg
# Get HTTP response code
req.resp_status_code = curl.getinfo(pycurl.RESPONSE_CODE)
- req.resp_body = self._resp_buffer.getvalue()
-
- # Reset client object
- self._req = None
- self._resp_buffer = None
+ req.resp_body = self._resp_buffer_read()
# Ensure no potentially large variables are referenced
curl.setopt(pycurl.POSTFIELDS, "")
curl.setopt(pycurl.WRITEFUNCTION, lambda _: None)
+ if req.completion_cb:
+ req.completion_cb(req)
-class _PooledHttpClient:
- """Data structure for HTTP client pool.
-
- """
- def __init__(self, identity, client):
- """Initializes this class.
-
- @type identity: string
- @param identity: Client identifier for pool
- @type client: L{_HttpClient}
- @param client: HTTP client
-
- """
- self.identity = identity
- self.client = client
- self.lastused = 0
-
- def __repr__(self):
- status = ["%s.%s" % (self.__class__.__module__, self.__class__.__name__),
- "id=%s" % self.identity,
- "lastuse=%s" % self.lastused,
- repr(self.client)]
- return "<%s at %#x>" % (" ".join(status), id(self))
+class _NoOpRequestMonitor: # pylint: disable=W0232
+ """No-op request monitor.
+ """
+ @staticmethod
+ def acquire(*args, **kwargs):
+ pass
-class HttpClientPool:
- """A simple HTTP client pool.
+ release = acquire
+ Disable = acquire
- Supports one pooled connection per identity (see
- L{HttpClientRequest.identity}).
- """
- #: After how many generations to drop unused clients
- _MAX_GENERATIONS_DROP = 25
+class _PendingRequestMonitor:
+ _LOCK = "_lock"
- def __init__(self, curl_config_fn):
+ def __init__(self, owner, pending_fn):
"""Initializes this class.
- @type curl_config_fn: callable
- @param curl_config_fn: Function to configure cURL object after
- initialization
-
"""
- self._curl_config_fn = curl_config_fn
- self._generation = 0
- self._pool = {}
+ self._owner = owner
+ self._pending_fn = pending_fn
- # Create custom logger for HTTP client pool. Change logging level to
- # C{logging.NOTSET} to get more details.
- self._logger = logging.getLogger(self.__class__.__name__)
- self._logger.setLevel(logging.INFO)
+ # The lock monitor runs in another thread, hence locking is necessary
+ self._lock = locking.SharedLock("PendingHttpRequests")
+ self.acquire = self._lock.acquire
+ self.release = self._lock.release
- @staticmethod
- def _GetHttpClientCreator():
- """Returns callable to create HTTP client.
+ @locking.ssynchronized(_LOCK)
+ def Disable(self):
+ """Disable monitor.
"""
- return _HttpClient
+ self._pending_fn = None
- def _Get(self, identity):
- """Gets an HTTP client from the pool.
+ @locking.ssynchronized(_LOCK, shared=1)
+ def GetLockInfo(self, requested): # pylint: disable=W0613
+ """Retrieves information about pending requests.
- @type identity: string
- @param identity: Client identifier
+ @type requested: set
+ @param requested: Requested information, see C{query.LQ_*}
"""
- try:
- pclient = self._pool.pop(identity)
- except KeyError:
- # Need to create new client
- client = self._GetHttpClientCreator()(self._curl_config_fn)
- pclient = _PooledHttpClient(identity, client)
- self._logger.debug("Created new client %s", pclient)
- else:
- self._logger.debug("Reusing client %s", pclient)
+ # No need to sort here, that's being done by the lock manager and query
+ # library. There are no priorities for requests, hence all show up as
+ # one item under "pending".
+ result = []
- assert pclient.identity == identity
+ if self._pending_fn:
+ owner_name = self._owner.getName()
- return pclient
+ for client in self._pending_fn():
+ req = client.GetCurrentRequest()
+ if req:
+ if req.nicename is None:
+ name = "%s%s" % (req.host, req.path)
+ else:
+ name = req.nicename
+ result.append(("rpc/%s" % name, None, [owner_name], None))
- def _StartRequest(self, req):
- """Starts a request.
+ return result
- @type req: L{HttpClientRequest}
- @param req: HTTP request
- """
- pclient = self._Get(req.identity)
+def _ProcessCurlRequests(multi, requests):
+ """cURL request processor.
- assert req.identity not in self._pool
+ This generator yields a tuple once for every completed request, successful or
+ not. The first value in the tuple is the handle, the second an error message
+ or C{None} for successful requests.
- pclient.client.StartRequest(req)
- pclient.lastused = self._generation
+ @type multi: C{pycurl.CurlMulti}
+ @param multi: cURL multi object
+ @type requests: sequence
+ @param requests: cURL request handles
- return pclient
-
- def _Return(self, pclients):
- """Returns HTTP clients to the pool.
+ """
+ for curl in requests:
+ multi.add_handle(curl)
- """
- for pc in pclients:
- self._logger.debug("Returning client %s to pool", pc)
- assert pc.identity not in self._pool
- assert pc not in self._pool.values()
- self._pool[pc.identity] = pc
-
- # Check for unused clients
- for pc in self._pool.values():
- if (pc.lastused + self._MAX_GENERATIONS_DROP) < self._generation:
- self._logger.debug("Removing client %s which hasn't been used"
- " for %s generations",
- pc, self._MAX_GENERATIONS_DROP)
- self._pool.pop(pc.identity, None)
-
- assert compat.all(pc.lastused >= (self._generation -
- self._MAX_GENERATIONS_DROP)
- for pc in self._pool.values())
+ while True:
+ (ret, active) = multi.perform()
+ assert ret in (pycurl.E_MULTI_OK, pycurl.E_CALL_MULTI_PERFORM)
- @staticmethod
- def _CreateCurlMultiHandle():
- """Creates new cURL multi handle.
+ if ret == pycurl.E_CALL_MULTI_PERFORM:
+ # cURL wants to be called again
+ continue
- """
- return pycurl.CurlMulti()
+ while True:
+ (remaining_messages, successful, failed) = multi.info_read()
- def ProcessRequests(self, requests):
- """Processes any number of HTTP client requests using pooled objects.
+ for curl in successful:
+ multi.remove_handle(curl)
+ yield (curl, None)
- @type requests: list of L{HttpClientRequest}
- @param requests: List of all requests
+ for curl, errnum, errmsg in failed:
+ multi.remove_handle(curl)
+ yield (curl, "Error %s: %s" % (errnum, errmsg))
- """
- multi = self._CreateCurlMultiHandle()
+ if remaining_messages == 0:
+ break
- # For client cleanup
- self._generation += 1
+ if active == 0:
+ # No active handles anymore
+ break
- assert compat.all((req.error is None and
- req.success is None and
- req.resp_status_code is None and
- req.resp_body is None)
- for req in requests)
+ # Wait for I/O. The I/O timeout shouldn't be too long so that HTTP
+ # timeouts, which are only evaluated in multi.perform, aren't
+ # unnecessarily delayed.
+ multi.select(1.0)
- curl_to_pclient = {}
- for req in requests:
- pclient = self._StartRequest(req)
- curl = pclient.client.GetCurlHandle()
- curl_to_pclient[curl] = pclient
- multi.add_handle(curl)
- assert pclient.client.GetCurrentRequest() == req
- assert pclient.lastused >= 0
- assert len(curl_to_pclient) == len(requests)
+def ProcessRequests(requests, lock_monitor_cb=None, _curl=pycurl.Curl,
+ _curl_multi=pycurl.CurlMulti,
+ _curl_process=_ProcessCurlRequests):
+ """Processes any number of HTTP client requests.
- done_count = 0
- while True:
- (ret, _) = multi.perform()
- assert ret in (pycurl.E_MULTI_OK, pycurl.E_CALL_MULTI_PERFORM)
-
- if ret == pycurl.E_CALL_MULTI_PERFORM:
- # cURL wants to be called again
- continue
-
- while True:
- (remaining_messages, successful, failed) = multi.info_read()
-
- for curl in successful:
- multi.remove_handle(curl)
- done_count += 1
- pclient = curl_to_pclient[curl]
- req = pclient.client.GetCurrentRequest()
- pclient.client.Done(None)
- assert req.success
- assert not pclient.client.GetCurrentRequest()
-
- for curl, errnum, errmsg in failed:
- multi.remove_handle(curl)
- done_count += 1
- pclient = curl_to_pclient[curl]
- req = pclient.client.GetCurrentRequest()
- pclient.client.Done("Error %s: %s" % (errnum, errmsg))
- assert req.error
- assert not pclient.client.GetCurrentRequest()
-
- if remaining_messages == 0:
- break
-
- assert done_count <= len(requests)
-
- if done_count == len(requests):
- break
+ @type requests: list of L{HttpClientRequest}
+ @param requests: List of all requests
+ @param lock_monitor_cb: Callable for registering with lock monitor
- # Wait for I/O. The I/O timeout shouldn't be too long so that HTTP
- # timeouts, which are only evaluated in multi.perform, aren't
- # unnecessarily delayed.
- multi.select(1.0)
+ """
+ assert compat.all((req.error is None and
+ req.success is None and
+ req.resp_status_code is None and
+ req.resp_body is None)
+ for req in requests)
+
+ # Prepare all requests
+ curl_to_client = \
+ dict((client.GetCurlHandle(), client)
+ for client in map(lambda req: _StartRequest(_curl(), req), requests))
+
+ assert len(curl_to_client) == len(requests)
+
+ if lock_monitor_cb:
+ monitor = _PendingRequestMonitor(threading.currentThread(),
+ curl_to_client.values)
+ lock_monitor_cb(monitor)
+ else:
+ monitor = _NoOpRequestMonitor
+
+ # Process all requests and act based on the returned values
+ for (curl, msg) in _curl_process(_curl_multi(), curl_to_client.keys()):
+ monitor.acquire(shared=0)
+ try:
+ curl_to_client.pop(curl).Done(msg)
+ finally:
+ monitor.release()
- assert compat.all(pclient.client.GetCurrentRequest() is None
- for pclient in curl_to_pclient.values())
+ assert not curl_to_client, "Not all requests were processed"
- # Return clients to pool
- self._Return(curl_to_pclient.values())
+ # Don't try to read information anymore as all requests have been processed
+ monitor.Disable()
- assert done_count == len(requests)
- assert compat.all(req.error is not None or
- (req.success and
- req.resp_status_code is not None and
- req.resp_body is not None)
- for req in requests)
+ assert compat.all(req.error is not None or
+ (req.success and
+ req.resp_status_code is not None and
+ req.resp_body is not None)
+ for req in requests)
def _IsCpuMaskWellFormed(cpu_mask):
+ """Verifies if the given single CPU mask is valid
+
+ The single CPU mask should be in the form "a,b,c,d", where each
+ letter is a positive number or range.
+
+ """
try:
cpu_list = utils.ParseCpuMask(cpu_mask)
except errors.ParseError, _:
return isinstance(cpu_list, list) and len(cpu_list) > 0
+def _IsMultiCpuMaskWellFormed(cpu_mask):
+ """Verifies if the given multiple CPU mask is valid
+
+ A valid multiple CPU mask is in the form "a:b:c:d", where each
+ letter is a single CPU mask.
+
+ """
+ try:
+ utils.ParseMultiCpuMask(cpu_mask)
+ except errors.ParseError, _:
+ return False
+
+ return True
+
+
# Read the BaseHypervisor.PARAMETERS docstring for the syntax of the
# _CHECK values
"CPU mask definition is not well-formed",
None, None)
+# Multiple CPU mask must be well-formed
+_MULTI_CPU_MASK_CHECK = (_IsMultiCpuMaskWellFormed,
+ "Multiple CPU mask definition is not well-formed",
+ None, None)
+
# Check for validity of port number
_NET_PORT_CHECK = (lambda x: 0 < x < 65535, "invalid port number",
None, None)
OPT_NET_PORT_CHECK = (False, ) + _NET_PORT_CHECK
REQ_CPU_MASK_CHECK = (True, ) + _CPU_MASK_CHECK
OPT_CPU_MASK_CHECK = (False, ) + _CPU_MASK_CHECK
+REQ_MULTI_CPU_MASK_CHECK = (True, ) + _MULTI_CPU_MASK_CHECK
+OPT_MULTI_CPU_MASK_CHECK = (False, ) + _MULTI_CPU_MASK_CHECK
# no checks at all
NO_CHECK = (False, None, None, None, None)
"""
pass
- def FinalizeMigration(self, instance, info, success):
- """Finalized an instance migration.
+ def FinalizeMigrationDst(self, instance, info, success):
+ """Finalize the instance migration on the target node.
Should finalize or revert any preparation done to accept the instance.
Since by default we do no preparation, we also don't have anything to do
"""
raise NotImplementedError
+ def FinalizeMigrationSource(self, instance, success, live):
+ """Finalize the instance migration on the source node.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that was migrated
+ @type success: bool
+ @param success: whether the migration succeeded or not
+ @type live: bool
+ @param live: whether the user requested a live migration or not
+
+ """
+ pass
+
+ def GetMigrationStatus(self, instance):
+ """Get the migration status
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that is being migrated
+ @rtype: L{objects.MigrationStatus}
+ @return: the status of the current migration (one of
+ L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
+ progress info that can be retrieved from the hypervisor
+
+ """
+ raise NotImplementedError
+
@classmethod
def CheckParameterSyntax(cls, hvparams):
"""Check the given parameters for validity.
"""
raise HypervisorError("Migration not supported by the chroot hypervisor")
+
+ def GetMigrationStatus(self, instance):
+ """Get the migration status
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that is being migrated
+ @rtype: L{objects.MigrationStatus}
+ @return: the status of the current migration (one of
+ L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
+ progress info that can be retrieved from the hypervisor
+
+ """
+ raise HypervisorError("Migration not supported by the chroot hypervisor")
fh = file(file_name, "w")
try:
fh.write("0\n%d\n%d\n" %
- (instance.beparams[constants.BE_MEMORY],
+ (instance.beparams[constants.BE_MAXMEM],
instance.beparams[constants.BE_VCPUS]))
finally:
fh.close()
logging.debug("Fake hypervisor migrating %s to %s (live=%s)",
instance, target, live)
- self._MarkDown(instance.name)
-
- def FinalizeMigration(self, instance, info, success):
- """Finalize an instance migration.
+ def FinalizeMigrationDst(self, instance, info, success):
+ """Finalize the instance migration on the target node.
For the fake hv, this just marks the instance up.
@type instance: L{objects.Instance}
@param instance: instance whose migration is being finalized
+ @type info: string/data (opaque)
+ @param info: migration information, from the source node
+ @type success: boolean
+ @param success: whether the migration was a success or a failure
"""
if success:
else:
# ensure it's down
self._MarkDown(instance.name)
+
+ def PostMigrationCleanup(self, instance):
+ """Clean-up after a migration.
+
+ To be executed on the source node.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that was migrated
+
+ """
+ pass
+
+ def FinalizeMigrationSource(self, instance, success, live):
+ """Finalize the instance migration on the source node.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that was migrated
+ @type success: bool
+ @param success: whether the migration succeeded or not
+ @type live: bool
+ @param live: whether the user requested a live migration or not
+
+ """
+ # pylint: disable=W0613
+ if success:
+ self._MarkDown(instance.name)
+
+ def GetMigrationStatus(self, instance):
+ """Get the migration status
+
+ The fake hypervisor migration always succeeds.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that is being migrated
+ @rtype: L{objects.MigrationStatus}
+ @return: the status of the current migration (one of
+ L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
+ progress info that can be retrieved from the hypervisor
+
+ """
+ return objects.MigrationStatus(status=constants.HV_MIGRATION_COMPLETED)
import fcntl
import shutil
import socket
+import stat
import StringIO
+try:
+ import affinity # pylint: disable=F0401
+except ImportError:
+ affinity = None
from ganeti import utils
from ganeti import constants
_KVM_NETWORK_SCRIPT = constants.SYSCONFDIR + "/ganeti/kvm-vif-bridge"
+_KVM_START_PAUSED_FLAG = "-S"
# TUN/TAP driver constants, taken from <linux/if_tun.h>
# They are architecture-independent and already hardcoded in qemu-kvm source,
"""QEMU Messaging Protocol (QMP) message.
"""
-
def __init__(self, data):
"""Creates a new QMP message based on the passed data.
is not contained in the message
"""
-
- if field_name in self.data:
- return self.data[field_name]
-
- return None
+ return self.data.get(field_name, None)
def __setitem__(self, field_name, field_value):
"""Set the value of the required field_name to field_value.
return QmpMessage(data)
def __str__(self):
- # The protocol expects the JSON object to be sent as a single
- # line, hence the need for indent=False.
- return serializer.DumpJson(self.data, indent=False)
+ # The protocol expects the JSON object to be sent as a single line.
+ return serializer.DumpJson(self.data)
def __eq__(self, other):
# When comparing two QmpMessages, we are interested in comparing
_FIRST_MESSAGE_KEY = "QMP"
_EVENT_KEY = "event"
_ERROR_KEY = "error"
+ _RETURN_KEY = RETURN_KEY = "return"
+ _ACTUAL_KEY = ACTUAL_KEY = "actual"
_ERROR_CLASS_KEY = "class"
_ERROR_DATA_KEY = "data"
_ERROR_DESC_KEY = "desc"
self._connected = False
self._buf = ""
+ def _check_socket(self):
+ sock_stat = None
+ try:
+ sock_stat = os.stat(self.monitor_filename)
+ except EnvironmentError, err:
+ if err.errno == errno.ENOENT:
+ raise errors.HypervisorError("No qmp socket found")
+ else:
+ raise errors.HypervisorError("Error checking qmp socket: %s",
+ utils.ErrnoOrStr(err))
+ if not stat.S_ISSOCK(sock_stat.st_mode):
+ raise errors.HypervisorError("Qmp socket is not a socket")
+
def _check_connection(self):
"""Make sure that the connection is established.
@raise errors.ProgrammerError: when there are data serialization errors
"""
- self.sock.connect(self.monitor_filename)
+ if self._connected:
+ raise errors.ProgrammerError("Cannot connect twice")
+
+ self._check_socket()
+
+ # Check file existance/stuff
+ try:
+ self.sock.connect(self.monitor_filename)
+ except EnvironmentError:
+ raise errors.HypervisorError("Can't connect to qmp socket")
self._connected = True
# Check if we receive a correct greeting message from the server
class KVMHypervisor(hv_base.BaseHypervisor):
- """KVM hypervisor interface"""
+ """KVM hypervisor interface
+
+ """
CAN_MIGRATE = True
_ROOT_DIR = constants.RUN_GANETI_DIR + "/kvm-hypervisor"
hv_base.ParamInSet(False,
constants.HT_KVM_SPICE_VALID_VIDEO_STREAM_DETECTION_OPTIONS),
constants.HV_KVM_SPICE_AUDIO_COMPR: hv_base.NO_CHECK,
+ constants.HV_KVM_SPICE_USE_TLS: hv_base.NO_CHECK,
+ constants.HV_KVM_SPICE_TLS_CIPHERS: hv_base.NO_CHECK,
+ constants.HV_KVM_SPICE_USE_VDAGENT: hv_base.NO_CHECK,
constants.HV_KVM_FLOPPY_IMAGE_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_CDROM_IMAGE_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_KVM_CDROM2_IMAGE_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_KVM_USE_CHROOT: hv_base.NO_CHECK,
constants.HV_MEM_PATH: hv_base.OPT_DIR_CHECK,
constants.HV_REBOOT_BEHAVIOR:
- hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS)
+ hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS),
+ constants.HV_CPU_MASK: hv_base.OPT_MULTI_CPU_MASK_CHECK,
}
_MIGRATION_STATUS_RE = re.compile("Migration\s+status:\s+(\w+)",
re.M | re.I)
+ _MIGRATION_PROGRESS_RE = \
+ re.compile(r"\s*transferred\s+ram:\s+(?P<transferred>\d+)\s+kbytes\s*\n"
+ r"\s*remaining\s+ram:\s+(?P<remaining>\d+)\s+kbytes\s*\n"
+ r"\s*total\s+ram:\s+(?P<total>\d+)\s+kbytes\s*\n", re.I)
+
_MIGRATION_INFO_MAX_BAD_ANSWERS = 5
_MIGRATION_INFO_RETRY_DELAY = 2
_VERSION_RE = re.compile(r"\b(\d+)\.(\d+)(\.(\d+))?\b")
+ _CPU_INFO_RE = re.compile(r"cpu\s+\#(\d+).*thread_id\s*=\s*(\d+)", re.I)
+ _CPU_INFO_CMD = "info cpus"
+ _CONT_CMD = "cont"
+
ANCILLARY_FILES = [
_KVM_NETWORK_SCRIPT,
]
@type tap: str
"""
-
if instance.tags:
tags = " ".join(instance.tags)
else:
" Network configuration script output: %s" %
(tap, result.fail_reason, result.output))
+ @staticmethod
+ def _VerifyAffinityPackage():
+ if affinity is None:
+ raise errors.HypervisorError("affinity Python package not"
+ " found; cannot use CPU pinning under KVM")
+
+ @staticmethod
+ def _BuildAffinityCpuMask(cpu_list):
+ """Create a CPU mask suitable for sched_setaffinity from a list of
+ CPUs.
+
+ See man taskset for more info on sched_setaffinity masks.
+ For example: [ 0, 2, 5, 6 ] will return 101 (0x65, 0..01100101).
+
+ @type cpu_list: list of int
+ @param cpu_list: list of physical CPU numbers to map to vCPUs in order
+ @rtype: int
+ @return: a bit mask of CPU affinities
+
+ """
+ if cpu_list == constants.CPU_PINNING_OFF:
+ return constants.CPU_PINNING_ALL_KVM
+ else:
+ return sum(2 ** cpu for cpu in cpu_list)
+
+ @classmethod
+ def _AssignCpuAffinity(cls, cpu_mask, process_id, thread_dict):
+ """Change CPU affinity for running VM according to given CPU mask.
+
+ @param cpu_mask: CPU mask as given by the user. e.g. "0-2,4:all:1,3"
+ @type cpu_mask: string
+ @param process_id: process ID of KVM process. Used to pin entire VM
+ to physical CPUs.
+ @type process_id: int
+ @param thread_dict: map of virtual CPUs to KVM thread IDs
+ @type thread_dict: dict int:int
+
+ """
+ # Convert the string CPU mask to a list of list of int's
+ cpu_list = utils.ParseMultiCpuMask(cpu_mask)
+
+ if len(cpu_list) == 1:
+ all_cpu_mapping = cpu_list[0]
+ if all_cpu_mapping == constants.CPU_PINNING_OFF:
+ # If CPU pinning has 1 entry that's "all", then do nothing
+ pass
+ else:
+ # If CPU pinning has one non-all entry, map the entire VM to
+ # one set of physical CPUs
+ cls._VerifyAffinityPackage()
+ affinity.set_process_affinity_mask(process_id,
+ cls._BuildAffinityCpuMask(all_cpu_mapping))
+ else:
+ # The number of vCPUs mapped should match the number of vCPUs
+ # reported by KVM. This was already verified earlier, so
+ # here only as a sanity check.
+ assert len(thread_dict) == len(cpu_list)
+ cls._VerifyAffinityPackage()
+
+ # For each vCPU, map it to the proper list of physical CPUs
+ for vcpu, i in zip(cpu_list, range(len(cpu_list))):
+ affinity.set_process_affinity_mask(thread_dict[i],
+ cls._BuildAffinityCpuMask(vcpu))
+
+ def _GetVcpuThreadIds(self, instance_name):
+ """Get a mapping of vCPU no. to thread IDs for the instance
+
+ @type instance_name: string
+ @param instance_name: instance in question
+ @rtype: dictionary of int:int
+ @return: a dictionary mapping vCPU numbers to thread IDs
+
+ """
+ result = {}
+ output = self._CallMonitorCommand(instance_name, self._CPU_INFO_CMD)
+ for line in output.stdout.splitlines():
+ match = self._CPU_INFO_RE.search(line)
+ if not match:
+ continue
+ grp = map(int, match.groups())
+ result[grp[0]] = grp[1]
+
+ return result
+
+ def _ExecuteCpuAffinity(self, instance_name, cpu_mask):
+ """Complete CPU pinning.
+
+ @type instance_name: string
+ @param instance_name: name of instance
+ @type cpu_mask: string
+ @param cpu_mask: CPU pinning mask as entered by user
+
+ """
+ # Get KVM process ID, to be used if need to pin entire VM
+ _, pid, _ = self._InstancePidAlive(instance_name)
+ # Get vCPU thread IDs, to be used if need to pin vCPUs separately
+ thread_dict = self._GetVcpuThreadIds(instance_name)
+ # Run CPU pinning, based on configured mask
+ self._AssignCpuAffinity(cpu_mask, pid, thread_dict)
+
def ListInstances(self):
"""Get the list of running instances.
return None
_, memory, vcpus = self._InstancePidInfo(pid)
- stat = "---b-"
+ istat = "---b-"
times = "0"
- return (instance_name, pid, memory, vcpus, stat, times)
+ try:
+ qmp = QmpConnection(self._InstanceQmpMonitor(instance_name))
+ qmp.connect()
+ vcpus = len(qmp.Execute("query-cpus")[qmp.RETURN_KEY])
+ # Will fail if ballooning is not enabled, but we can then just resort to
+ # the value above.
+ mem_bytes = qmp.Execute("query-balloon")[qmp.RETURN_KEY][qmp.ACTUAL_KEY]
+ memory = mem_bytes / 1048576
+ except errors.HypervisorError:
+ pass
+
+ return (instance_name, pid, memory, vcpus, istat, times)
def GetAllInstancesInfo(self):
"""Get properties of all instances.
"""Generate KVM information to start an instance.
"""
- # pylint: disable=R0914
+ # pylint: disable=R0914,R0915
_, v_major, v_min, _ = self._GetKVMVersion()
pidfile = self._InstancePidFile(instance.name)
kvm_cmd = [kvm]
# used just by the vnc server, if enabled
kvm_cmd.extend(["-name", instance.name])
- kvm_cmd.extend(["-m", instance.beparams[constants.BE_MEMORY]])
+ kvm_cmd.extend(["-m", instance.beparams[constants.BE_MAXMEM]])
kvm_cmd.extend(["-smp", instance.beparams[constants.BE_VCPUS]])
kvm_cmd.extend(["-pidfile", pidfile])
+ kvm_cmd.extend(["-balloon", "virtio"])
kvm_cmd.extend(["-daemonize"])
if not instance.hvparams[constants.HV_ACPI]:
kvm_cmd.extend(["-no-acpi"])
- if startup_paused:
- kvm_cmd.extend(["-S"])
if instance.hvparams[constants.HV_REBOOT_BEHAVIOR] == \
constants.INSTANCE_REBOOT_EXIT:
kvm_cmd.extend(["-no-reboot"])
self.ValidateParameters(hvp)
+ if startup_paused:
+ kvm_cmd.extend([_KVM_START_PAUSED_FLAG])
+
if hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_ENABLED:
kvm_cmd.extend(["-enable-kvm"])
elif hvp[constants.HV_KVM_FLAG] == constants.HT_KVM_DISABLED:
# we have both ipv4 and ipv6, let's use the cluster default IP
# version
cluster_family = ssconf.SimpleStore().GetPrimaryIPFamily()
- spice_ip_version = netutils.IPAddress.GetVersionFromAddressFamily(
- cluster_family)
+ spice_ip_version = \
+ netutils.IPAddress.GetVersionFromAddressFamily(cluster_family)
elif addresses[constants.IP4_VERSION]:
spice_ip_version = constants.IP4_VERSION
elif addresses[constants.IP6_VERSION]:
# ValidateParameters checked it.
spice_address = spice_bind
- spice_arg = "addr=%s,port=%s" % (spice_address, instance.network_port)
+ spice_arg = "addr=%s" % spice_address
+ if hvp[constants.HV_KVM_SPICE_USE_TLS]:
+ spice_arg = "%s,tls-port=%s,x509-cacert-file=%s" % (spice_arg,
+ instance.network_port, constants.SPICE_CACERT_FILE)
+ spice_arg = "%s,x509-key-file=%s,x509-cert-file=%s" % (spice_arg,
+ constants.SPICE_CERT_FILE, constants.SPICE_CERT_FILE)
+ tls_ciphers = hvp[constants.HV_KVM_SPICE_TLS_CIPHERS]
+ if tls_ciphers:
+ spice_arg = "%s,tls-ciphers=%s" % (spice_arg, tls_ciphers)
+ else:
+ spice_arg = "%s,port=%s" % (spice_arg, instance.network_port)
+
if not hvp[constants.HV_KVM_SPICE_PASSWORD_FILE]:
spice_arg = "%s,disable-ticketing" % spice_arg
# Audio compression, by default in qemu-kvm it is on
if not hvp[constants.HV_KVM_SPICE_AUDIO_COMPR]:
spice_arg = "%s,playback-compression=off" % spice_arg
+ if not hvp[constants.HV_KVM_SPICE_USE_VDAGENT]:
+ spice_arg = "%s,agent-mouse=off" % spice_arg
logging.info("KVM: SPICE will listen on port %s", instance.network_port)
kvm_cmd.extend(["-spice", spice_arg])
continue
self._ConfigureNIC(instance, nic_seq, nic, taps[nic_seq])
+ # CPU affinity requires kvm to start paused, so we set this flag if the
+ # instance is not already paused and if we are not going to accept a
+ # migrating instance. In the latter case, pausing is not needed.
+ start_kvm_paused = not (_KVM_START_PAUSED_FLAG in kvm_cmd) and not incoming
+
+ # Note: CPU pinning is using up_hvp since changes take effect
+ # during instance startup anyway, and to avoid problems when soft
+ # rebooting the instance.
+ cpu_pinning = False
+ if up_hvp.get(constants.HV_CPU_MASK, None):
+ cpu_pinning = True
+ if start_kvm_paused:
+ kvm_cmd.extend([_KVM_START_PAUSED_FLAG])
+
if security_model == constants.HT_SM_POOL:
ss = ssconf.SimpleStore()
uid_pool = uidpool.ParseUidPool(ss.GetUidPool(), separator="\n")
# for connection.
spice_password_file = conf_hvp[constants.HV_KVM_SPICE_PASSWORD_FILE]
if spice_password_file:
+ spice_pwd = ""
try:
spice_pwd = utils.ReadOneLineFile(spice_password_file, strict=True)
- qmp = QmpConnection(self._InstanceQmpMonitor(instance.name))
- qmp.connect()
- arguments = {
- "protocol": "spice",
- "password": spice_pwd,
- }
- qmp.Execute("set_password", arguments)
except EnvironmentError, err:
raise errors.HypervisorError("Failed to open SPICE password file %s: %s"
% (spice_password_file, err))
+ qmp = QmpConnection(self._InstanceQmpMonitor(instance.name))
+ qmp.connect()
+ arguments = {
+ "protocol": "spice",
+ "password": spice_pwd,
+ }
+ qmp.Execute("set_password", arguments)
+
for filename in temp_files:
utils.RemoveFile(filename)
+ # If requested, set CPU affinity and resume instance execution
+ if cpu_pinning:
+ try:
+ self._ExecuteCpuAffinity(instance.name, up_hvp[constants.HV_CPU_MASK])
+ finally:
+ if start_kvm_paused:
+ # To control CPU pinning, the VM was started frozen, so we need
+ # to resume its execution, but only if freezing was not
+ # explicitly requested.
+ # Note: this is done even when an exception occurred so the VM
+ # is not unintentionally frozen.
+ self._CallMonitorCommand(instance.name, self._CONT_CMD)
+
def StartInstance(self, instance, block_devices, startup_paused):
"""Start an instance.
incoming_address = (target, instance.hvparams[constants.HV_MIGRATION_PORT])
self._ExecuteKVMRuntime(instance, kvm_runtime, incoming=incoming_address)
- def FinalizeMigration(self, instance, info, success):
- """Finalize an instance migration.
+ def FinalizeMigrationDst(self, instance, info, success):
+ """Finalize the instance migration on the target node.
Stop the incoming mode KVM.
"""
instance_name = instance.name
port = instance.hvparams[constants.HV_MIGRATION_PORT]
- pidfile, pid, alive = self._InstancePidAlive(instance_name)
+ _, _, alive = self._InstancePidAlive(instance_name)
if not alive:
raise errors.HypervisorError("Instance not running, cannot migrate")
migrate_command = "migrate -d tcp:%s:%s" % (target, port)
self._CallMonitorCommand(instance_name, migrate_command)
+ def FinalizeMigrationSource(self, instance, success, live):
+ """Finalize the instance migration on the source node.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that was migrated
+ @type success: bool
+ @param success: whether the migration succeeded or not
+ @type live: bool
+ @param live: whether the user requested a live migration or not
+
+ """
+ if success:
+ pidfile, pid, _ = self._InstancePidAlive(instance.name)
+ utils.KillProcess(pid)
+ self._RemoveInstanceRuntimeFiles(pidfile, instance.name)
+ elif live:
+ self._CallMonitorCommand(instance.name, self._CONT_CMD)
+
+ def GetMigrationStatus(self, instance):
+ """Get the migration status
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that is being migrated
+ @rtype: L{objects.MigrationStatus}
+ @return: the status of the current migration (one of
+ L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
+ progress info that can be retrieved from the hypervisor
+
+ """
info_command = "info migrate"
- done = False
- broken_answers = 0
- while not done:
- result = self._CallMonitorCommand(instance_name, info_command)
+ for _ in range(self._MIGRATION_INFO_MAX_BAD_ANSWERS):
+ result = self._CallMonitorCommand(instance.name, info_command)
match = self._MIGRATION_STATUS_RE.search(result.stdout)
if not match:
- broken_answers += 1
if not result.stdout:
logging.info("KVM: empty 'info migrate' result")
else:
logging.warning("KVM: unknown 'info migrate' result: %s",
result.stdout)
- time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
else:
status = match.group(1)
- if status == "completed":
- done = True
- elif status == "active":
- # reset the broken answers count
- broken_answers = 0
- time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
- elif status == "failed" or status == "cancelled":
- if not live:
- self._CallMonitorCommand(instance_name, 'cont')
- raise errors.HypervisorError("Migration %s at the kvm level" %
- status)
- else:
- logging.warning("KVM: unknown migration status '%s'", status)
- broken_answers += 1
- time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
- if broken_answers >= self._MIGRATION_INFO_MAX_BAD_ANSWERS:
- raise errors.HypervisorError("Too many 'info migrate' broken answers")
+ if status in constants.HV_KVM_MIGRATION_VALID_STATUSES:
+ migration_status = objects.MigrationStatus(status=status)
+ match = self._MIGRATION_PROGRESS_RE.search(result.stdout)
+ if match:
+ migration_status.transferred_ram = match.group("transferred")
+ migration_status.total_ram = match.group("total")
- utils.KillProcess(pid)
- self._RemoveInstanceRuntimeFiles(pidfile, instance_name)
+ return migration_status
+
+ logging.warning("KVM: unknown migration status '%s'", status)
+
+ time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
+
+ return objects.MigrationStatus(status=constants.HV_MIGRATION_FAILED,
+ info="Too many 'info migrate' broken answers")
def GetNodeInfo(self):
"""Return information about the node.
constants.HV_KVM_SPICE_JPEG_IMG_COMPR,
constants.HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR,
constants.HV_KVM_SPICE_STREAMING_VIDEO_DETECTION,
+ constants.HV_KVM_SPICE_USE_TLS,
])
for param in spice_additional_params:
if hvparams[param]:
"""
raise HypervisorError("Migration is not supported by the LXC hypervisor")
+
+ def GetMigrationStatus(self, instance):
+ """Get the migration status
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that is being migrated
+ @rtype: L{objects.MigrationStatus}
+ @return: the status of the current migration (one of
+ L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
+ progress info that can be retrieved from the hypervisor
+
+ """
+ raise HypervisorError("Migration is not supported by the LXC hypervisor")
XEND_CONFIG_FILE = "/etc/xen/xend-config.sxp"
XL_CONFIG_FILE = "/etc/xen/xl.conf"
VIF_BRIDGE_SCRIPT = "/etc/xen/scripts/vif-bridge"
+_DOM0_NAME = "Domain-0"
class XenHypervisor(hv_base.BaseHypervisor):
"""
utils.RemoveFile(XenHypervisor._ConfigFileName(instance_name))
+ @classmethod
+ def _CreateConfigCpus(cls, cpu_mask):
+ """Create a CPU config string that's compatible with Xen's
+ configuration file.
+
+ """
+ # Convert the string CPU mask to a list of list of int's
+ cpu_list = utils.ParseMultiCpuMask(cpu_mask)
+
+ if len(cpu_list) == 1:
+ all_cpu_mapping = cpu_list[0]
+ if all_cpu_mapping == constants.CPU_PINNING_OFF:
+ # If CPU pinning has 1 entry that's "all", then remove the
+ # parameter from the config file
+ return None
+ else:
+ # If CPU pinning has one non-all entry, mapping all vCPUS (the entire
+ # VM) to one physical CPU, using format 'cpu = "C"'
+ return "cpu = \"%s\"" % ",".join(map(str, all_cpu_mapping))
+ else:
+ def _GetCPUMap(vcpu):
+ if vcpu[0] == constants.CPU_PINNING_ALL_VAL:
+ cpu_map = constants.CPU_PINNING_ALL_XEN
+ else:
+ cpu_map = ",".join(map(str, vcpu))
+ return "\"%s\"" % cpu_map
+
+ # build the result string in format 'cpus = [ "c", "c", "c" ]',
+ # where each c is a physical CPU number, a range, a list, or any
+ # combination
+ return "cpus = [ %s ]" % ", ".join(map(_GetCPUMap, cpu_list))
+
@staticmethod
def _RunXmList(xmlist_errors):
"""Helper function for L{_GetXMList} to run "xm list".
" line: %s, error: %s" % (line, err))
# skip the Domain-0 (optional)
- if include_node or data[0] != "Domain-0":
+ if include_node or data[0] != _DOM0_NAME:
result.append(data)
return result
@return: tuple (name, id, memory, vcpus, stat, times)
"""
- xm_list = self._GetXMList(instance_name == "Domain-0")
+ xm_list = self._GetXMList(instance_name == _DOM0_NAME)
result = None
for data in xm_list:
if data[0] == instance_name:
- hv_version: the hypervisor version in the form (major, minor)
"""
- # note: in xen 3, memory has changed to total_memory
result = utils.RunCmd([constants.XEN_CMD, "info"])
if result.failed:
logging.error("Can't run 'xm info' (%s): %s", result.fail_reason,
result = {}
cores_per_socket = threads_per_core = nr_cpus = None
xen_major, xen_minor = None, None
+ memory_total = None
+ memory_free = None
+
for line in xmoutput:
splitfields = line.split(":", 1)
if len(splitfields) > 1:
key = splitfields[0].strip()
val = splitfields[1].strip()
+
+ # note: in xen 3, memory has changed to total_memory
if key == "memory" or key == "total_memory":
- result["memory_total"] = int(val)
+ memory_total = int(val)
elif key == "free_memory":
- result["memory_free"] = int(val)
+ memory_free = int(val)
elif key == "nr_cpus":
nr_cpus = result["cpu_total"] = int(val)
elif key == "nr_nodes":
elif key == "xen_minor":
xen_minor = int(val)
- if (cores_per_socket is not None and
- threads_per_core is not None and nr_cpus is not None):
+ if None not in [cores_per_socket, threads_per_core, nr_cpus]:
result["cpu_sockets"] = nr_cpus / (cores_per_socket * threads_per_core)
- dom0_info = self.GetInstanceInfo("Domain-0")
- if dom0_info is not None:
- result["memory_dom0"] = dom0_info[2]
+ total_instmem = 0
+ for (name, _, mem, vcpus, _, _) in self._GetXMList(True):
+ if name == _DOM0_NAME:
+ result["memory_dom0"] = mem
+ result["dom0_cpus"] = vcpus
+
+ # Include Dom0 in total memory usage
+ total_instmem += mem
+
+ if memory_free is not None:
+ result["memory_free"] = memory_free
+
+ if memory_total is not None:
+ result["memory_total"] = memory_total
+
+ # Calculate memory used by hypervisor
+ if None not in [memory_total, memory_free, total_instmem]:
+ result["memory_hv"] = memory_total - memory_free - total_instmem
if not (xen_major is None or xen_minor is None):
result[constants.HV_NODEINFO_KEY_VERSION] = (xen_major, xen_minor)
"""
pass
- def FinalizeMigration(self, instance, info, success):
+ def FinalizeMigrationDst(self, instance, info, success):
"""Finalize an instance migration.
After a successful migration we write the xen config file.
if result.failed:
raise errors.HypervisorError("Failed to migrate instance %s: %s" %
(instance.name, result.output))
- # remove old xen file after migration succeeded
- try:
- self._RemoveConfigFile(instance.name)
- except EnvironmentError:
- logging.exception("Failure while removing instance config file")
+
+ def FinalizeMigrationSource(self, instance, success, live):
+ """Finalize the instance migration on the source node.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that was migrated
+ @type success: bool
+ @param success: whether the migration succeeded or not
+ @type live: bool
+ @param live: whether the user requested a live migration or not
+
+ """
+ # pylint: disable=W0613
+ if success:
+ # remove old xen file after migration succeeded
+ try:
+ self._RemoveConfigFile(instance.name)
+ except EnvironmentError:
+ logging.exception("Failure while removing instance config file")
+
+ def GetMigrationStatus(self, instance):
+ """Get the migration status
+
+ As MigrateInstance for Xen is still blocking, if this method is called it
+ means that MigrateInstance has completed successfully. So we can safely
+ assume that the migration was successful and notify this fact to the client.
+
+ @type instance: L{objects.Instance}
+ @param instance: the instance that is being migrated
+ @rtype: L{objects.MigrationStatus}
+ @return: the status of the current migration (one of
+ L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
+ progress info that can be retrieved from the hypervisor
+
+ """
+ return objects.MigrationStatus(status=constants.HV_MIGRATION_COMPLETED)
@classmethod
def PowercycleNode(cls):
# TODO: Add a check for the blockdev prefix (matching [a-z:] or similar).
constants.HV_BLOCKDEV_PREFIX: hv_base.NO_CHECK,
constants.HV_REBOOT_BEHAVIOR:
- hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS)
+ hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS),
+ constants.HV_CPU_MASK: hv_base.OPT_MULTI_CPU_MASK_CHECK,
}
@classmethod
config.write("ramdisk = '%s'\n" % initrd_path)
# rest of the settings
- config.write("memory = %d\n" % instance.beparams[constants.BE_MEMORY])
+ config.write("memory = %d\n" % instance.beparams[constants.BE_MAXMEM])
config.write("vcpus = %d\n" % instance.beparams[constants.BE_VCPUS])
+ cpu_pinning = cls._CreateConfigCpus(hvp[constants.HV_CPU_MASK])
+ if cpu_pinning:
+ config.write("%s\n" % cpu_pinning)
+
config.write("name = '%s'\n" % instance.name)
vif_data = []
# TODO: Add a check for the blockdev prefix (matching [a-z:] or similar).
constants.HV_BLOCKDEV_PREFIX: hv_base.NO_CHECK,
constants.HV_REBOOT_BEHAVIOR:
- hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS)
+ hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS),
+ constants.HV_CPU_MASK: hv_base.OPT_MULTI_CPU_MASK_CHECK,
}
@classmethod
config.write("kernel = '%s'\n" % kpath)
config.write("builder = 'hvm'\n")
- config.write("memory = %d\n" % instance.beparams[constants.BE_MEMORY])
+ config.write("memory = %d\n" % instance.beparams[constants.BE_MAXMEM])
config.write("vcpus = %d\n" % instance.beparams[constants.BE_VCPUS])
+ cpu_pinning = cls._CreateConfigCpus(hvp[constants.HV_CPU_MASK])
+ if cpu_pinning:
+ config.write("%s\n" % cpu_pinning)
+
config.write("name = '%s'\n" % instance.name)
if hvp[constants.HV_PAE]:
config.write("pae = 1\n")
return wrapper
+def _RequireNonDrainedQueue(fn):
+ """Decorator checking for a non-drained queue.
+
+ To be used with functions submitting new jobs.
+
+ """
+ def wrapper(self, *args, **kwargs):
+ """Wrapper function.
+
+ @raise errors.JobQueueDrainError: if the job queue is marked for draining
+
+ """
+ # Ok when sharing the big job queue lock, as the drain file is created when
+ # the lock is exclusive.
+ # Needs access to protected member, pylint: disable=W0212
+ if self._drained:
+ raise errors.JobQueueDrainError("Job queue is drained, refusing job")
+
+ if not self._accepting_jobs:
+ raise errors.JobQueueError("Job queue is shutting down, refusing job")
+
+ return fn(self, *args, **kwargs)
+ return wrapper
+
+
class JobQueue(object):
"""Queue used to manage the jobs.
self.acquire = self._lock.acquire
self.release = self._lock.release
+ # Accept jobs by default
+ self._accepting_jobs = True
+
# Initialize the queue, and acquire the filelock.
# This ensures no other process is working on the job queue.
self._queue_filelock = jstore.InitAndVerifyQueue(must_lock=True)
# TODO: Check consistency across nodes
- self._queue_size = 0
+ self._queue_size = None
self._UpdateQueueSizeUnlocked()
+ assert ht.TInt(self._queue_size)
self._drained = jstore.CheckDrainFlag()
# Job dependencies
logging.info("Job queue inspection finished")
+ def _GetRpc(self, address_list):
+ """Gets RPC runner with context.
+
+ """
+ return rpc.JobQueueRunner(self.context, address_list)
+
@locking.ssynchronized(_LOCK)
@_RequireOpenQueue
def AddNode(self, node):
assert node_name != self._my_hostname
# Clean queue directory on added node
- result = rpc.RpcRunner.call_jobqueue_purge(node_name)
+ result = self._GetRpc(None).call_jobqueue_purge(node_name)
msg = result.fail_msg
if msg:
logging.warning("Cannot cleanup queue directory on node %s: %s",
# Upload current serial file
files.append(constants.JOB_QUEUE_SERIAL_FILE)
+ # Static address list
+ addrs = [node.primary_ip]
+
for file_name in files:
# Read file content
content = utils.ReadFile(file_name)
- result = rpc.RpcRunner.call_jobqueue_update([node_name],
- [node.primary_ip],
- file_name, content)
+ result = self._GetRpc(addrs).call_jobqueue_update([node_name], file_name,
+ content)
msg = result[node_name].fail_msg
if msg:
logging.error("Failed to upload file %s to node %s: %s",
if replicate:
names, addrs = self._GetNodeIp()
- result = rpc.RpcRunner.call_jobqueue_update(names, addrs, file_name, data)
+ result = self._GetRpc(addrs).call_jobqueue_update(names, file_name, data)
self._CheckRpcResult(result, self._nodes, "Updating %s" % file_name)
def _RenameFilesUnlocked(self, rename):
# ... and on all nodes
names, addrs = self._GetNodeIp()
- result = rpc.RpcRunner.call_jobqueue_rename(names, addrs, rename)
+ result = self._GetRpc(addrs).call_jobqueue_rename(names, rename)
self._CheckRpcResult(result, self._nodes, "Renaming files (%r)" % rename)
@staticmethod
@param ops: The list of OpCodes that will become the new job.
@rtype: L{_QueuedJob}
@return: the job object to be queued
- @raise errors.JobQueueDrainError: if the job queue is marked for draining
@raise errors.JobQueueFull: if the job queue has too many jobs in it
@raise errors.GenericError: If an opcode is not valid
"""
- # Ok when sharing the big job queue lock, as the drain file is created when
- # the lock is exclusive.
- if self._drained:
- raise errors.JobQueueDrainError("Job queue is drained, refusing job")
-
if self._queue_size >= constants.JOB_QUEUE_SIZE_HARD_LIMIT:
raise errors.JobQueueFull()
@locking.ssynchronized(_LOCK)
@_RequireOpenQueue
+ @_RequireNonDrainedQueue
def SubmitJob(self, ops):
"""Create and store a new job.
@locking.ssynchronized(_LOCK)
@_RequireOpenQueue
+ @_RequireNonDrainedQueue
def SubmitManyJobs(self, jobs):
"""Create and store multiple jobs.
assert job.writable, "Can't update read-only job"
filename = self._GetJobPath(job.id)
- data = serializer.DumpJson(job.Serialize(), indent=False)
+ data = serializer.DumpJson(job.Serialize())
logging.debug("Writing job %s to %s", job.id, filename)
self._UpdateJobQueueFile(filename, data, replicate)
return jobs
@locking.ssynchronized(_LOCK)
+ def PrepareShutdown(self):
+ """Prepare to stop the job queue.
+
+ Disables execution of jobs in the workerpool and returns whether there are
+ any jobs currently running. If the latter is the case, the job queue is not
+ yet ready for shutdown. Once this function returns C{True} L{Shutdown} can
+ be called without interfering with any job. Queued and unfinished jobs will
+ be resumed next time.
+
+ Once this function has been called no new job submissions will be accepted
+ (see L{_RequireNonDrainedQueue}).
+
+ @rtype: bool
+ @return: Whether there are any running jobs
+
+ """
+ if self._accepting_jobs:
+ self._accepting_jobs = False
+
+ # Tell worker pool to stop processing pending tasks
+ self._wpool.SetActive(False)
+
+ return self._wpool.HasRunningTasks()
+
+ @locking.ssynchronized(_LOCK)
@_RequireOpenQueue
def Shutdown(self):
"""Stops the job queue.
except AttributeError:
self._acquire_restore = self._base_acquire_restore
try:
- self._is_owned = lock._is_owned
+ self._is_owned = lock.is_owned
except AttributeError:
self._is_owned = self._base_is_owned
else:
return self.__is_exclusive()
- def _is_owned(self, shared=-1):
+ def is_owned(self, shared=-1):
"""Is the current thread somehow owning the lock at this time?
@param shared:
finally:
self.__lock.release()
- is_owned = _is_owned
+ #: Necessary to remain compatible with threading.Condition, which tries to
+ #: retrieve a locks' "_is_owned" attribute
+ _is_owned = is_owned
def _count_pending(self):
"""Returns the number of pending acquires.
"""
return self.__lockdict
- def _is_owned(self):
- """Is the current thread a current level owner?"""
+ def is_owned(self):
+ """Is the current thread a current level owner?
+
+ @note: Use L{check_owned} to check if a specific lock is held
+
+ """
return threading.currentThread() in self.__owners
+ def check_owned(self, names, shared=-1):
+ """Check if locks are owned in a specific mode.
+
+ @type names: sequence or string
+ @param names: Lock names (or a single lock name)
+ @param shared: See L{SharedLock.is_owned}
+ @rtype: bool
+ @note: Use L{is_owned} to check if the current thread holds I{any} lock and
+ L{list_owned} to get the names of all owned locks
+
+ """
+ if isinstance(names, basestring):
+ names = [names]
+
+ # Avoid check if no locks are owned anyway
+ if names and self.is_owned():
+ candidates = []
+
+ # Gather references to all locks (in case they're deleted in the meantime)
+ for lname in names:
+ try:
+ lock = self.__lockdict[lname]
+ except KeyError:
+ raise errors.LockError("Non-existing lock '%s' in set '%s' (it may"
+ " have been removed)" % (lname, self.name))
+ else:
+ candidates.append(lock)
+
+ return compat.all(lock.is_owned(shared=shared) for lock in candidates)
+ else:
+ return False
+
def _add_owned(self, name=None):
"""Note the current thread owns the given lock"""
if name is None:
- if not self._is_owned():
+ if not self.is_owned():
self.__owners[threading.currentThread()] = set()
else:
- if self._is_owned():
+ if self.is_owned():
self.__owners[threading.currentThread()].add(name)
else:
self.__owners[threading.currentThread()] = set([name])
def _del_owned(self, name=None):
"""Note the current thread owns the given lock"""
- assert not (name is None and self.__lock._is_owned()), \
+ assert not (name is None and self.__lock.is_owned()), \
"Cannot hold internal lock when deleting owner status"
if name is not None:
self.__owners[threading.currentThread()].remove(name)
# Only remove the key if we don't hold the set-lock as well
- if (not self.__lock._is_owned() and
+ if (not self.__lock.is_owned() and
not self.__owners[threading.currentThread()]):
del self.__owners[threading.currentThread()]
- def _list_owned(self):
+ def list_owned(self):
"""Get the set of resource names owned by the current thread"""
- if self._is_owned():
+ if self.is_owned():
return self.__owners[threading.currentThread()].copy()
else:
return set()
def _release_and_delete_owned(self):
"""Release and delete all resources owned by the current thread"""
- for lname in self._list_owned():
+ for lname in self.list_owned():
lock = self.__lockdict[lname]
- if lock._is_owned():
+ if lock.is_owned():
lock.release()
self._del_owned(name=lname)
# If we don't already own the set-level lock acquired
# we'll get it and note we need to release it later.
release_lock = False
- if not self.__lock._is_owned():
+ if not self.__lock.is_owned():
release_lock = True
self.__lock.acquire(shared=1)
try:
assert timeout is None or timeout >= 0.0
# Check we don't already own locks at this level
- assert not self._is_owned(), ("Cannot acquire locks in the same set twice"
- " (lockset %s)" % self.name)
+ assert not self.is_owned(), ("Cannot acquire locks in the same set twice"
+ " (lockset %s)" % self.name)
if priority is None:
priority = _DEFAULT_PRIORITY
# We shouldn't have problems adding the lock to the owners list, but
# if we did we'll try to release this lock and re-raise exception.
# Of course something is going to be really wrong after this.
- if lock._is_owned():
+ if lock.is_owned():
lock.release()
raise
The locks must have been acquired in exclusive mode.
"""
- assert self._is_owned(), ("downgrade on lockset %s while not owning any"
- " lock" % self.name)
+ assert self.is_owned(), ("downgrade on lockset %s while not owning any"
+ " lock" % self.name)
# Support passing in a single resource to downgrade rather than many
if isinstance(names, basestring):
names = [names]
- owned = self._list_owned()
+ owned = self.list_owned()
if names is None:
names = owned
self.__lockdict[lockname].downgrade()
# Do we own the lockset in exclusive mode?
- if self.__lock._is_owned(shared=0):
+ if self.__lock.is_owned(shared=0):
# Have all locks been downgraded?
- if not compat.any(lock._is_owned(shared=0)
+ if not compat.any(lock.is_owned(shared=0)
for lock in self.__lockdict.values()):
self.__lock.downgrade()
- assert self.__lock._is_owned(shared=1)
+ assert self.__lock.is_owned(shared=1)
return True
(defaults to all the locks acquired at that level).
"""
- assert self._is_owned(), ("release() on lock set %s while not owner" %
- self.name)
+ assert self.is_owned(), ("release() on lock set %s while not owner" %
+ self.name)
# Support passing in a single resource to release rather than many
if isinstance(names, basestring):
names = [names]
if names is None:
- names = self._list_owned()
+ names = self.list_owned()
else:
names = set(names)
- assert self._list_owned().issuperset(names), (
+ assert self.list_owned().issuperset(names), (
"release() on unheld resources %s (set %s)" %
- (names.difference(self._list_owned()), self.name))
+ (names.difference(self.list_owned()), self.name))
# First of all let's release the "all elements" lock, if set.
# After this 'add' can work again
- if self.__lock._is_owned():
+ if self.__lock.is_owned():
self.__lock.release()
self._del_owned()
"""
# Check we don't already own locks at this level
- assert not self._is_owned() or self.__lock._is_owned(shared=0), \
+ assert not self.is_owned() or self.__lock.is_owned(shared=0), \
("Cannot add locks if the set %s is only partially owned, or shared" %
self.name)
# If we don't already own the set-level lock acquired in an exclusive way
# we'll get it and note we need to release it later.
release_lock = False
- if not self.__lock._is_owned():
+ if not self.__lock.is_owned():
release_lock = True
self.__lock.acquire()
# If we own any subset of this lock it must be a superset of what we want
# to delete. The ownership must also be exclusive, but that will be checked
# by the lock itself.
- assert not self._is_owned() or self._list_owned().issuperset(names), (
+ assert not self.is_owned() or self.list_owned().issuperset(names), (
"remove() on acquired lockset %s while not owning all elements" %
self.name)
removed.append(lname)
except (KeyError, errors.LockError):
# This cannot happen if we were already holding it, verify:
- assert not self._is_owned(), ("remove failed while holding lockset %s"
- % self.name)
+ assert not self.is_owned(), ("remove failed while holding lockset %s" %
+ self.name)
else:
# If no LockError was raised we are the ones who deleted the lock.
# This means we can safely remove it from lockdict, as any further or
# it's the job of the one who actually deleted it.
del self.__lockdict[lname]
# And let's remove it from our private list if we owned it.
- if self._is_owned():
+ if self.is_owned():
self._del_owned(name=lname)
return removed
LEVEL_INSTANCE = 1
LEVEL_NODEGROUP = 2
LEVEL_NODE = 3
+LEVEL_NODE_RES = 4
-LEVELS = [LEVEL_CLUSTER,
- LEVEL_INSTANCE,
- LEVEL_NODEGROUP,
- LEVEL_NODE]
+LEVELS = [
+ LEVEL_CLUSTER,
+ LEVEL_INSTANCE,
+ LEVEL_NODEGROUP,
+ LEVEL_NODE,
+ LEVEL_NODE_RES,
+ ]
# Lock levels which are modifiable
-LEVELS_MOD = [LEVEL_NODE, LEVEL_NODEGROUP, LEVEL_INSTANCE]
-
+LEVELS_MOD = frozenset([
+ LEVEL_NODE_RES,
+ LEVEL_NODE,
+ LEVEL_NODEGROUP,
+ LEVEL_INSTANCE,
+ ])
+
+#: Lock level names (make sure to use singular form)
LEVEL_NAMES = {
LEVEL_CLUSTER: "cluster",
LEVEL_INSTANCE: "instance",
LEVEL_NODEGROUP: "nodegroup",
LEVEL_NODE: "node",
+ LEVEL_NODE_RES: "node-res",
}
# Constant for the big ganeti lock
# The keyring contains all the locks, at their level and in the correct
# locking order.
self.__keyring = {
- LEVEL_CLUSTER: LockSet([BGL], "BGL", monitor=self._monitor),
- LEVEL_NODE: LockSet(nodes, "nodes", monitor=self._monitor),
- LEVEL_NODEGROUP: LockSet(nodegroups, "nodegroups", monitor=self._monitor),
- LEVEL_INSTANCE: LockSet(instances, "instances",
+ LEVEL_CLUSTER: LockSet([BGL], "cluster", monitor=self._monitor),
+ LEVEL_NODE: LockSet(nodes, "node", monitor=self._monitor),
+ LEVEL_NODE_RES: LockSet(nodes, "node-res", monitor=self._monitor),
+ LEVEL_NODEGROUP: LockSet(nodegroups, "nodegroup", monitor=self._monitor),
+ LEVEL_INSTANCE: LockSet(instances, "instance",
monitor=self._monitor),
}
+ assert compat.all(ls.name == LEVEL_NAMES[level]
+ for (level, ls) in self.__keyring.items())
+
def AddToLockMonitor(self, provider):
"""Registers a new lock with the monitor.
assert level in LEVELS, "Invalid locking level %s" % level
return self.__keyring[level]._names()
- def _is_owned(self, level):
+ def is_owned(self, level):
"""Check whether we are owning locks at the given level
"""
- return self.__keyring[level]._is_owned()
-
- is_owned = _is_owned
+ return self.__keyring[level].is_owned()
- def _list_owned(self, level):
+ def list_owned(self, level):
"""Get the set of owned locks at the given level
"""
- return self.__keyring[level]._list_owned()
+ return self.__keyring[level].list_owned()
- list_owned = _list_owned
+ def check_owned(self, level, names, shared=-1):
+ """Check if locks at a certain level are owned in a specific mode.
+
+ @see: L{LockSet.check_owned}
+
+ """
+ return self.__keyring[level].check_owned(names, shared=shared)
def _upper_owned(self, level):
"""Check that we don't own any lock at a level greater than the given one.
"""
# This way of checking only works if LEVELS[i] = i, which we check for in
# the test cases.
- return compat.any((self._is_owned(l) for l in LEVELS[level + 1:]))
+ return compat.any((self.is_owned(l) for l in LEVELS[level + 1:]))
def _BGL_owned(self): # pylint: disable=C0103
"""Check if the current thread owns the BGL.
Both an exclusive or a shared acquisition work.
"""
- return BGL in self.__keyring[LEVEL_CLUSTER]._list_owned()
+ return BGL in self.__keyring[LEVEL_CLUSTER].list_owned()
@staticmethod
def _contains_BGL(level, names): # pylint: disable=C0103
not self._upper_owned(LEVEL_CLUSTER)), (
"Cannot release the Big Ganeti Lock while holding something"
" at upper levels (%r)" %
- (utils.CommaJoin(["%s=%r" % (LEVEL_NAMES[i], self._list_owned(i))
+ (utils.CommaJoin(["%s=%r" % (LEVEL_NAMES[i], self.list_owned(i))
for i in self.__keyring.keys()]), ))
# Release will complain if we don't own the locks already
# Check we either own the level or don't own anything from here
# up. LockSet.remove() will check the case in which we don't own
# all the needed resources, or we have a shared ownership.
- assert self._is_owned(level) or not self._upper_owned(level), (
+ assert self.is_owned(level) or not self._upper_owned(level), (
"Cannot remove locks at a level while not owning it or"
" owning some at a greater one")
return self.__keyring[level].remove(names)
request[KEY_VERSION] = version
# Serialize the request
- return serializer.DumpJson(request, indent=False)
+ return serializer.DumpJson(request)
def CallLuxiMethod(transport_cb, method, args, version=None):
"""Send a generic request and return the response.
"""
+ if not isinstance(args, (list, tuple)):
+ raise errors.ProgrammerError("Invalid parameter passed to CallMethod:"
+ " expected list, got %s" % type(args))
return CallLuxiMethod(self._SendMethodCall, method, args,
version=constants.LUXI_VERSION)
def SetQueueDrainFlag(self, drain_flag):
- return self.CallMethod(REQ_QUEUE_SET_DRAIN_FLAG, drain_flag)
+ return self.CallMethod(REQ_QUEUE_SET_DRAIN_FLAG, (drain_flag, ))
def SetWatcherPause(self, until):
- return self.CallMethod(REQ_SET_WATCHER_PAUSE, [until])
+ return self.CallMethod(REQ_SET_WATCHER_PAUSE, (until, ))
def SubmitJob(self, ops):
ops_state = map(lambda op: op.__getstate__(), ops)
return self.CallMethod(REQ_SUBMIT_MANY_JOBS, jobs_state)
def CancelJob(self, job_id):
- return self.CallMethod(REQ_CANCEL_JOB, job_id)
+ return self.CallMethod(REQ_CANCEL_JOB, (job_id, ))
def ArchiveJob(self, job_id):
- return self.CallMethod(REQ_ARCHIVE_JOB, job_id)
+ return self.CallMethod(REQ_ARCHIVE_JOB, (job_id, ))
def AutoArchiveJobs(self, age):
timeout = (DEF_RWTO - 1) / 2
break
return result
- def Query(self, what, fields, filter_):
+ def Query(self, what, fields, qfilter):
"""Query for resources/items.
@param what: One of L{constants.QR_VIA_LUXI}
@type fields: List of strings
@param fields: List of requested fields
- @type filter_: None or list
- @param filter_: Query filter
+ @type qfilter: None or list
+ @param qfilter: Query filter
@rtype: L{objects.QueryResponse}
"""
- req = objects.QueryRequest(what=what, fields=fields, filter=filter_)
- result = self.CallMethod(REQ_QUERY, req.ToDict())
+ result = self.CallMethod(REQ_QUERY, (what, fields, qfilter))
return objects.QueryResponse.FromDict(result)
def QueryFields(self, what, fields):
@rtype: L{objects.QueryFieldsResponse}
"""
- req = objects.QueryFieldsRequest(what=what, fields=fields)
- result = self.CallMethod(REQ_QUERY_FIELDS, req.ToDict())
+ result = self.CallMethod(REQ_QUERY_FIELDS, (what, fields))
return objects.QueryFieldsResponse.FromDict(result)
def QueryJobs(self, job_ids, fields):
return self.CallMethod(REQ_QUERY_CLUSTER_INFO, ())
def QueryConfigValues(self, fields):
- return self.CallMethod(REQ_QUERY_CONFIG_VALUES, fields)
+ return self.CallMethod(REQ_QUERY_CONFIG_VALUES, (fields, ))
def QueryTags(self, kind, name):
return self.CallMethod(REQ_QUERY_TAGS, (kind, name))
"""
-def _TimeoutExpired(epoch, timeout, _time_fn=time.time):
- """Checks whether a timeout has expired.
-
- """
- return _time_fn() > (epoch + timeout)
-
-
class _DiskImportExportBase(object):
MODE_TEXT = None
assert self._ts_begin is not None
if not data:
- if _TimeoutExpired(self._ts_begin, self._timeouts.ready):
+ if utils.TimeoutExpired(self._ts_begin, self._timeouts.ready):
raise _ImportExportError("Didn't become ready after %s seconds" %
self._timeouts.ready)
if self._ts_last_error is None:
self._ts_last_error = time.time()
- elif _TimeoutExpired(self._ts_last_error, self._timeouts.error):
+ elif utils.TimeoutExpired(self._ts_last_error, self._timeouts.error):
raise _ImportExportError("Too many errors while updating data")
return False
return True
- if _TimeoutExpired(self._GetConnectedCheckEpoch(), self._timeouts.connect):
+ if utils.TimeoutExpired(self._GetConnectedCheckEpoch(),
+ self._timeouts.connect):
raise _ImportExportError("Not connected after %s seconds" %
self._timeouts.connect)
"""
if ((self._ts_last_progress is None or
- _TimeoutExpired(self._ts_last_progress, self._timeouts.progress)) and
+ utils.TimeoutExpired(self._ts_last_progress,
+ self._timeouts.progress)) and
self._daemon and
self._daemon.progress_mbytes is not None and
self._daemon.progress_throughput is not None):
"""
return self._lu.rpc.call_import_start(self.node_name, self._opts,
self._instance, self._component,
- self._dest, self._dest_args)
+ (self._dest, self._dest_args))
def CheckListening(self):
"""Checks whether the daemon is listening.
return True
- if _TimeoutExpired(self._ts_begin, self._timeouts.listen):
+ if utils.TimeoutExpired(self._ts_begin, self._timeouts.listen):
raise _ImportExportError("Not listening after %s seconds" %
self._timeouts.listen)
return self._lu.rpc.call_export_start(self.node_name, self._opts,
self._dest_host, self._dest_port,
self._instance, self._component,
- self._source, self._source_args)
+ (self._source, self._source_args))
def CheckListening(self):
"""Checks whether the daemon is listening.
" result '%s'", idx, src_node, result.payload)
else:
disk_id = tuple(result.payload)
+ disk_params = constants.DISK_LD_DEFAULTS[constants.LD_LV].copy()
new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
logical_id=disk_id, physical_id=disk_id,
- iv_name=disk.iv_name)
+ iv_name=disk.iv_name,
+ params=disk_params)
self._snap_disks.append(new_dev)
from ganeti import opcodes
from ganeti import constants
from ganeti import errors
-from ganeti import rpc
from ganeti import cmdlib
from ganeti import locking
from ganeti import utils
self.context = context
self._ec_id = ec_id
self._cbs = None
- self.rpc = rpc.RpcRunner(context.cfg)
+ self.rpc = context.rpc
self.hmclass = HooksMaster
def _AcquireLocks(self, level, names, shared, timeout, priority):
return False
@classmethod
+ def ValidateNetmask(cls, netmask):
+ """Validate a netmask suffix in CIDR notation.
+
+ @type netmask: int
+ @param netmask: netmask suffix to validate
+ @rtype: bool
+ @return: True if valid, False otherwise
+
+ """
+ assert (isinstance(netmask, (int, long)))
+
+ return 0 < netmask <= cls.iplen
+
+ @classmethod
def Own(cls, address):
"""Check if the current host has the the given IP address.
raise errors.ProgrammerError("%s is not a valid IP version" % version)
+ @staticmethod
+ def GetClassFromIpVersion(version):
+ """Return the IPAddress subclass for the given IP version.
+
+ @type version: int
+ @param version: IP version, one of L{constants.IP4_VERSION} or
+ L{constants.IP6_VERSION}
+ @return: a subclass of L{netutils.IPAddress}
+ @raise errors.ProgrammerError: for unknowo IP versions
+
+ """
+ if version == constants.IP4_VERSION:
+ return IP4Address
+ elif version == constants.IP6_VERSION:
+ return IP6Address
+
+ raise errors.ProgrammerError("%s is not a valid IP version" % version)
+
+ @staticmethod
+ def GetClassFromIpFamily(family):
+ """Return the IPAddress subclass for the given IP family.
+
+ @param family: IP family (one of C{socket.AF_INET} or C{socket.AF_INET6}
+ @return: a subclass of L{netutils.IPAddress}
+ @raise errors.ProgrammerError: for unknowo IP versions
+
+ """
+ return IPAddress.GetClassFromIpVersion(
+ IPAddress.GetVersionFromAddressFamily(family))
+
@classmethod
def IsLoopback(cls, address):
"""Determine whether it is a loopback address.
"""
-# pylint: disable=E0203,W0201
+# pylint: disable=E0203,W0201,R0902
# E0203: Access to member %r before its definition, since we use
# objects.py which doesn't explicitely initialise its members
# W0201: Attribute '%s' defined outside __init__
+# R0902: Allow instances of these objects to have more than 20 attributes
+
import ConfigParser
import re
import copy
from ganeti import errors
from ganeti import constants
+from ganeti import netutils
+from ganeti import utils
from socket import AF_INET
_TIMESTAMPS = ["ctime", "mtime"]
_UUID = ["uuid"]
+# constants used to create InstancePolicy dictionary
+TISPECS_GROUP_TYPES = {
+ constants.ISPECS_MIN: constants.VTYPE_INT,
+ constants.ISPECS_MAX: constants.VTYPE_INT,
+}
+
+TISPECS_CLUSTER_TYPES = {
+ constants.ISPECS_MIN: constants.VTYPE_INT,
+ constants.ISPECS_MAX: constants.VTYPE_INT,
+ constants.ISPECS_STD: constants.VTYPE_INT,
+ }
+
def FillDict(defaults_dict, custom_dict, skip_keys=None):
"""Basic function to apply settings on top a default dict.
return ret_dict
+def FillDictOfDicts(defaults_dict, custom_dict, skip_keys=None):
+ """Run FillDict for each key in dictionary.
+
+ """
+ ret_dict = {}
+ for key in defaults_dict.keys():
+ ret_dict[key] = FillDict(defaults_dict[key],
+ custom_dict.get(key, {}),
+ skip_keys=skip_keys)
+ return ret_dict
+
+
def UpgradeGroupedParams(target, defaults):
"""Update all groups for the target parameter.
return target
+def UpgradeBeParams(target):
+ """Update the be parameters dict to the new format.
+
+ @type target: dict
+ @param target: "be" parameters dict
+
+ """
+ if constants.BE_MEMORY in target:
+ memory = target[constants.BE_MEMORY]
+ target[constants.BE_MAXMEM] = memory
+ target[constants.BE_MINMEM] = memory
+ del target[constants.BE_MEMORY]
+
+
+def UpgradeDiskParams(diskparams):
+ """Upgrade the disk parameters.
+
+ @type diskparams: dict
+ @param diskparams: disk parameters to upgrade
+ @rtype: dict
+ @return: the upgraded disk parameters dit
+
+ """
+ result = dict()
+ if diskparams is None:
+ result = constants.DISK_DT_DEFAULTS.copy()
+ else:
+ # Update the disk parameter values for each disk template.
+ # The code iterates over constants.DISK_TEMPLATES because new templates
+ # might have been added.
+ for template in constants.DISK_TEMPLATES:
+ if template not in diskparams:
+ result[template] = constants.DISK_DT_DEFAULTS[template].copy()
+ else:
+ result[template] = FillDict(constants.DISK_DT_DEFAULTS[template],
+ diskparams[template])
+
+ return result
+
+
+def MakeEmptyIPolicy():
+ """Create empty IPolicy dictionary.
+
+ """
+ return dict([
+ (constants.ISPECS_MIN, dict()),
+ (constants.ISPECS_MAX, dict()),
+ (constants.ISPECS_STD, dict()),
+ ])
+
+
+def CreateIPolicyFromOpts(ispecs_mem_size=None,
+ ispecs_cpu_count=None,
+ ispecs_disk_count=None,
+ ispecs_disk_size=None,
+ ispecs_nic_count=None,
+ group_ipolicy=False,
+ allowed_values=None):
+ """Creation of instane policy based on command line options.
+
+
+ """
+ # prepare ipolicy dict
+ ipolicy_transposed = {
+ constants.ISPEC_MEM_SIZE: ispecs_mem_size,
+ constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
+ constants.ISPEC_DISK_COUNT: ispecs_disk_count,
+ constants.ISPEC_DISK_SIZE: ispecs_disk_size,
+ constants.ISPEC_NIC_COUNT: ispecs_nic_count,
+ }
+
+ # first, check that the values given are correct
+ if group_ipolicy:
+ forced_type = TISPECS_GROUP_TYPES
+ else:
+ forced_type = TISPECS_CLUSTER_TYPES
+
+ for specs in ipolicy_transposed.values():
+ utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
+
+ # then transpose
+ ipolicy_out = MakeEmptyIPolicy()
+ for name, specs in ipolicy_transposed.iteritems():
+ assert name in constants.ISPECS_PARAMETERS
+ for key, val in specs.items(): # {min: .. ,max: .., std: ..}
+ ipolicy_out[key][name] = val
+
+ return ipolicy_out
+
+
class ConfigObject(object):
"""A generic config object.
return obj
+class MasterNetworkParameters(ConfigObject):
+ """Network configuration parameters for the master
+
+ @ivar name: master name
+ @ivar ip: master IP
+ @ivar netmask: master netmask
+ @ivar netdev: master network device
+ @ivar ip_family: master IP family
+
+ """
+ __slots__ = [
+ "name",
+ "ip",
+ "netmask",
+ "netdev",
+ "ip_family"
+ ]
+
+
class ConfigData(ConfigObject):
"""Top-level config object."""
__slots__ = [
@raise errors.ConfigurationError: when a parameter is not valid
"""
- if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES:
+ if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and
+ nicparams[constants.NIC_MODE] != constants.VALUE_AUTO):
err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE]
raise errors.ConfigurationError(err)
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = ["dev_type", "logical_id", "physical_id",
- "children", "iv_name", "size", "mode"]
+ "children", "iv_name", "size", "mode", "params"]
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
if self.children:
for child in self.children:
child.UpgradeConfig()
+
+ if not self.params:
+ self.params = constants.DISK_LD_DEFAULTS[self.dev_type].copy()
+ else:
+ self.params = FillDict(constants.DISK_LD_DEFAULTS[self.dev_type],
+ self.params)
# add here config upgrade for this disk
+class InstancePolicy(ConfigObject):
+ """Config object representing instance policy limits dictionary."""
+ __slots__ = ["min", "max", "std"]
+
+ @classmethod
+ def CheckParameterSyntax(cls, ipolicy):
+ """ Check the instance policy for validity.
+
+ """
+ for param in constants.ISPECS_PARAMETERS:
+ InstancePolicy.CheckISpecSyntax(ipolicy, param)
+
+ @classmethod
+ def CheckISpecSyntax(cls, ipolicy, name):
+ """Check the instance policy for validity on a given key.
+
+ We check if the instance policy makes sense for a given key, that is
+ if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name].
+
+ @type ipolicy: dict
+ @param ipolicy: dictionary with min, max, std specs
+ @type name: string
+ @param name: what are the limits for
+ @raise errors.ConfigureError: when specs for given name are not valid
+
+ """
+ min_v = ipolicy[constants.ISPECS_MIN].get(name, 0)
+ std_v = ipolicy[constants.ISPECS_STD].get(name, min_v)
+ max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v)
+ err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" %
+ (name,
+ ipolicy[constants.ISPECS_MIN].get(name, "-"),
+ ipolicy[constants.ISPECS_MAX].get(name, "-"),
+ ipolicy[constants.ISPECS_STD].get(name, "-")))
+ if min_v > std_v or std_v > max_v:
+ raise errors.ConfigurationError(err)
+
+
class Instance(TaggableObject):
"""Config object representing an instance."""
__slots__ = [
"hvparams",
"beparams",
"osparams",
- "admin_up",
+ "admin_state",
"nics",
"disks",
"disk_template",
"""Custom function for instances.
"""
+ if "admin_state" not in val:
+ if val.get("admin_up", False):
+ val["admin_state"] = constants.ADMINST_UP
+ else:
+ val["admin_state"] = constants.ADMINST_DOWN
+ if "admin_up" in val:
+ del val["admin_up"]
obj = super(Instance, cls).FromDict(val)
obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
pass
if self.osparams is None:
self.osparams = {}
+ UpgradeBeParams(self.beparams)
class OS(ConfigObject):
return cls.SplitNameVariant(name)[1]
+class NodeHvState(ConfigObject):
+ """Hypvervisor state on a node.
+
+ @ivar mem_total: Total amount of memory
+ @ivar mem_node: Memory used by, or reserved for, the node itself (not always
+ available)
+ @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
+ rounding
+ @ivar mem_inst: Memory used by instances living on node
+ @ivar cpu_total: Total node CPU core count
+ @ivar cpu_node: Number of CPU cores reserved for the node itself
+
+ """
+ __slots__ = [
+ "mem_total",
+ "mem_node",
+ "mem_hv",
+ "mem_inst",
+ "cpu_total",
+ "cpu_node",
+ ] + _TIMESTAMPS
+
+
+class NodeDiskState(ConfigObject):
+ """Disk state on a node.
+
+ """
+ __slots__ = [
+ "total",
+ "reserved",
+ "overhead",
+ ] + _TIMESTAMPS
+
+
class Node(TaggableObject):
- """Config object representing a node."""
+ """Config object representing a node.
+
+ @ivar hv_state: Hypervisor state (e.g. number of CPUs)
+ @ivar hv_state_static: Hypervisor state overriden by user
+ @ivar disk_state: Disk state (e.g. free space)
+ @ivar disk_state_static: Disk state overriden by user
+
+ """
__slots__ = [
"name",
"primary_ip",
"vm_capable",
"ndparams",
"powered",
+ "hv_state",
+ "hv_state_static",
+ "disk_state",
+ "disk_state_static",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
if self.powered is None:
self.powered = True
+ def ToDict(self):
+ """Custom function for serializing.
+
+ """
+ data = super(Node, self).ToDict()
+
+ hv_state = data.get("hv_state", None)
+ if hv_state is not None:
+ data["hv_state"] = self._ContainerToDicts(hv_state)
+
+ disk_state = data.get("disk_state", None)
+ if disk_state is not None:
+ data["disk_state"] = \
+ dict((key, self._ContainerToDicts(value))
+ for (key, value) in disk_state.items())
+
+ return data
+
+ @classmethod
+ def FromDict(cls, val):
+ """Custom function for deserializing.
+
+ """
+ obj = super(Node, cls).FromDict(val)
+
+ if obj.hv_state is not None:
+ obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState)
+
+ if obj.disk_state is not None:
+ obj.disk_state = \
+ dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState))
+ for (key, value) in obj.disk_state.items())
+
+ return obj
+
class NodeGroup(TaggableObject):
"""Config object representing a node group."""
"name",
"members",
"ndparams",
+ "diskparams",
+ "ipolicy",
"serial_no",
+ "hv_state_static",
+ "disk_state_static",
"alloc_policy",
] + _TIMESTAMPS + _UUID
if self.mtime is None:
self.mtime = time.time()
+ self.diskparams = UpgradeDiskParams(self.diskparams)
+ if self.ipolicy is None:
+ self.ipolicy = MakeEmptyIPolicy()
+
def FillND(self, node):
"""Return filled out ndparams for L{objects.Node}
"master_node",
"master_ip",
"master_netdev",
+ "master_netmask",
+ "use_external_mip_script",
"cluster_name",
"file_storage_dir",
"shared_file_storage_dir",
"enabled_hypervisors",
"hvparams",
+ "ipolicy",
"os_hvp",
"beparams",
"osparams",
"nicparams",
"ndparams",
+ "diskparams",
"candidate_pool_size",
"modify_etc_hosts",
"modify_ssh_setup",
"blacklisted_os",
"primary_ip_family",
"prealloc_wipe_disks",
+ "hv_state_static",
+ "disk_state_static",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
self.beparams = UpgradeGroupedParams(self.beparams,
constants.BEC_DEFAULTS)
+ for beparams_group in self.beparams:
+ UpgradeBeParams(self.beparams[beparams_group])
+
migrate_default_bridge = not self.nicparams
self.nicparams = UpgradeGroupedParams(self.nicparams,
constants.NICC_DEFAULTS)
if self.primary_ip_family is None:
self.primary_ip_family = AF_INET
+ if self.master_netmask is None:
+ ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
+ self.master_netmask = ipcls.iplen
+
if self.prealloc_wipe_disks is None:
self.prealloc_wipe_disks = False
if self.shared_file_storage_dir is None:
self.shared_file_storage_dir = ""
+ if self.use_external_mip_script is None:
+ self.use_external_mip_script = False
+
+ self.diskparams = UpgradeDiskParams(self.diskparams)
+
+ # instance policy added before 2.6
+ if self.ipolicy is None:
+ self.ipolicy = MakeEmptyIPolicy()
+
+ @property
+ def primary_hypervisor(self):
+ """The first hypervisor is the primary.
+
+ Useful, for example, for L{Node}'s hv/disk state.
+
+ """
+ return self.enabled_hypervisors[0]
+
def ToDict(self):
"""Custom function for cluster.
# specified params
return FillDict(result, os_params)
+ @staticmethod
+ def SimpleFillHvState(hv_state):
+ """Fill an hv_state sub dict with cluster defaults.
+
+ """
+ return FillDict(constants.HVST_DEFAULTS, hv_state)
+
+ @staticmethod
+ def SimpleFillDiskState(disk_state):
+ """Fill an disk_state sub dict with cluster defaults.
+
+ """
+ return FillDict(constants.DS_DEFAULTS, disk_state)
+
def FillND(self, node, nodegroup):
"""Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
"""
return FillDict(self.ndparams, ndparams)
+ def SimpleFillIPolicy(self, ipolicy):
+ """ Fill instance policy dict with defaults.
+
+ @type ipolicy: dict
+ @param ipolicy: the dict to fill
+ @rtype: dict
+ @return: a copy of passed ipolicy with missing keys filled from
+ the cluster defaults
+
+ """
+ return FillDictOfDicts(self.ipolicy, ipolicy)
+
class BlockDevStatus(ConfigObject):
"""Config object representing the status of a block device."""
__slots__ = [
"what",
"fields",
- "filter",
+ "qfilter",
]
]
+class MigrationStatus(ConfigObject):
+ """Object holding the status of a migration.
+
+ """
+ __slots__ = [
+ "status",
+ "transferred_ram",
+ "total_ram",
+ ]
+
+
class InstanceConsole(ConfigObject):
"""Object describing how to access the console of an instance.
_PSkipChecks = ("skip_checks", ht.EmptyList,
ht.TListOf(ht.TElemOf(constants.VERIFY_OPTIONAL_CHECKS)),
"Which checks to skip")
+_PIgnoreErrors = ("ignore_errors", ht.EmptyList,
+ ht.TListOf(ht.TElemOf(constants.CV_ALL_ECODES_STRINGS)),
+ "List of error codes that should be treated as warnings")
+
+# Disk parameters
+_PDiskParams = ("diskparams", None,
+ ht.TOr(
+ ht.TDictOf(ht.TElemOf(constants.DISK_TEMPLATES), ht.TDict),
+ ht.TNone),
+ "Disk templates' parameter defaults")
+
+# Parameters for node resource model
+_PHvState = ("hv_state", None, ht.TMaybeDict, "Set hypervisor states")
+_PDiskState = ("disk_state", None, ht.TMaybeDict, "Set disk states")
#: OP_ID conversion regular expression
_OPID_RE = re.compile("([a-z])([A-Z])")
return True
-_CheckDiskTemplate = ht.TAnd(ht.TElemOf(constants.DISK_TEMPLATES),
- _CheckFileStorage)
+def _BuildDiskTemplateCheck(accept_none):
+ """Builds check for disk template.
+
+ @type accept_none: bool
+ @param accept_none: whether to accept None as a correct value
+ @rtype: callable
+
+ """
+ template_check = ht.TElemOf(constants.DISK_TEMPLATES)
+
+ if accept_none:
+ template_check = ht.TOr(template_check, ht.TNone)
+
+ return ht.TAnd(template_check, _CheckFileStorage)
def _CheckStorageType(storage_type):
ht.TElemOf(constants.OP_PRIO_SUBMIT_VALID), "Opcode priority"),
(DEPEND_ATTR, None, _BuildJobDepCheck(True),
"Job dependencies; if used through ``SubmitManyJobs`` relative (negative)"
- " job IDs can be used"),
+ " job IDs can be used; see :doc:`design document <design-chained-jobs>`"
+ " for details"),
(COMMENT_ATTR, None, ht.TMaybeString,
"Comment describing the purpose of the opcode"),
]
_PDebugSimulateErrors,
_PErrorCodes,
_PSkipChecks,
+ _PIgnoreErrors,
_PVerbose,
("group_name", None, ht.TMaybeString, "Group to verify")
]
OP_PARAMS = [
_PDebugSimulateErrors,
_PErrorCodes,
+ _PIgnoreErrors,
_PVerbose,
]
OP_RESULT = ht.TBool
_PDebugSimulateErrors,
_PErrorCodes,
_PSkipChecks,
+ _PIgnoreErrors,
_PVerbose,
]
OP_RESULT = ht.TBool
"""
OP_PARAMS = [
+ _PHvState,
+ _PDiskState,
("vg_name", None, ht.TMaybeString, "Volume group name"),
("enabled_hypervisors", None,
ht.TOr(ht.TAnd(ht.TListOf(ht.TElemOf(constants.HYPER_TYPES)), ht.TTrue),
("osparams", None, ht.TOr(ht.TDictOf(ht.TNonEmptyString, ht.TDict),
ht.TNone),
"Cluster-wide OS parameter defaults"),
+ _PDiskParams,
("candidate_pool_size", None, ht.TOr(ht.TStrictPositiveInt, ht.TNone),
"Master candidate pool size"),
("uid_pool", None, ht.NoType,
"Whether to wipe disks before allocating them to instances"),
("nicparams", None, ht.TMaybeDict, "Cluster-wide NIC parameter defaults"),
("ndparams", None, ht.TMaybeDict, "Cluster-wide node parameter defaults"),
+ ("ipolicy", None, ht.TMaybeDict, "Cluster-wide instance policy specs"),
("drbd_helper", None, ht.TOr(ht.TString, ht.TNone), "DRBD helper program"),
("default_iallocator", None, ht.TOr(ht.TString, ht.TNone),
"Default iallocator for cluster"),
("master_netdev", None, ht.TOr(ht.TString, ht.TNone),
"Master network device"),
+ ("master_netmask", None, ht.TOr(ht.TInt, ht.TNone),
+ "Netmask of the master IP"),
("reserved_lvs", None, ht.TOr(ht.TListOf(ht.TNonEmptyString), ht.TNone),
"List of reserved LVs"),
("hidden_os", None, _TestClusterOsList,
"Modify list of blacklisted operating systems. Each modification must have"
" two items, the operation and the OS name. The operation can be"
" ``%s`` or ``%s``." % (constants.DDM_ADD, constants.DDM_REMOVE)),
+ ("use_external_mip_script", None, ht.TMaybeBool,
+ "Whether to use an external master IP address setup script"),
]
@ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP}
@ivar fields: List of fields to retrieve
- @ivar filter: Query filter
+ @ivar qfilter: Query filter
"""
OP_DSC_FIELD = "what"
_PUseLocking,
("fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Requested fields"),
- ("filter", None, ht.TOr(ht.TNone, ht.TListOf),
+ ("qfilter", None, ht.TOr(ht.TNone, ht.TListOf),
"Query filter"),
]
OP_DSC_FIELD = "node_name"
OP_PARAMS = [
_PNodeName,
+ _PHvState,
+ _PDiskState,
("primary_ip", None, ht.NoType, "Primary IP address"),
("secondary_ip", None, ht.TMaybeString, "Secondary IP address"),
("readd", False, ht.TBool, "Whether node is re-added to cluster"),
OP_PARAMS = [
_PNodeName,
_PForce,
+ _PHvState,
+ _PDiskState,
("master_candidate", None, ht.TMaybeBool,
"Whether the node should become a master candidate"),
("offline", None, ht.TMaybeBool,
(constants.IDISK_SIZE, constants.IDISK_SIZE, constants.IDISK_SIZE,
constants.IDISK_MODE,
" or ".join("``%s``" % i for i in sorted(constants.DISK_ACCESS_SET)))),
- ("disk_template", ht.NoDefault, _CheckDiskTemplate, "Disk template"),
+ ("disk_template", ht.NoDefault, _BuildDiskTemplateCheck(True),
+ "Disk template"),
("file_driver", None, ht.TOr(ht.TNone, ht.TElemOf(constants.FILE_DRIVER)),
"Driver for file-backed disks"),
("file_storage_dir", None, ht.TMaybeString,
class OpInstanceRecreateDisks(OpCode):
- """Deactivate an instance's disks."""
+ """Recreate an instance's disks."""
OP_DSC_FIELD = "instance_name"
OP_PARAMS = [
_PInstanceName,
("beparams", ht.EmptyDict, ht.TDict, "Per-instance backend parameters"),
("hvparams", ht.EmptyDict, ht.TDict,
"Per-instance hypervisor parameters, hypervisor-dependent"),
- ("disk_template", None, ht.TOr(ht.TNone, _CheckDiskTemplate),
+ ("disk_template", None, ht.TOr(ht.TNone, _BuildDiskTemplateCheck(False)),
"Disk template for instance"),
("remote_node", None, ht.TMaybeString,
"Secondary node (used when changing disk template)"),
("osparams", None, ht.TMaybeDict, "Per-instance OS parameters"),
("wait_for_sync", True, ht.TBool,
"Whether to wait for the disk to synchronize, when changing template"),
+ ("offline_inst", False, ht.TBool,
+ "Whether to turn off the down instance completely"),
+ ("online_inst", False, ht.TBool,
+ "Whether to enable the offline instance"),
]
OP_RESULT = _TSetParamsResult
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
+ _PDiskParams,
+ _PHvState,
+ _PDiskState,
+ ("ipolicy", None, ht.TMaybeDict, "Group-wide instance policy specs"),
]
_PGroupName,
_PNodeGroupAllocPolicy,
_PGroupNodeParams,
+ _PDiskParams,
+ _PHvState,
+ _PDiskState,
+ ("ipolicy", None, ht.TMaybeDict, "Group-wide instance policy specs"),
]
OP_RESULT = _TSetParamsResult
--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Converter tools between ovf and ganeti config file
+
+"""
+
+# pylint: disable=F0401, E1101
+
+# F0401 because ElementTree is not default for python 2.4
+# E1101 makes no sense - pylint assumes that ElementTree object is a tuple
+
+
+import ConfigParser
+import errno
+import logging
+import os
+import os.path
+import re
+import shutil
+import tarfile
+import tempfile
+import xml.dom.minidom
+import xml.parsers.expat
+try:
+ import xml.etree.ElementTree as ET
+except ImportError:
+ import elementtree.ElementTree as ET
+
+try:
+ ParseError = ET.ParseError # pylint: disable=E1103
+except AttributeError:
+ ParseError = None
+
+from ganeti import constants
+from ganeti import errors
+from ganeti import utils
+
+
+# Schemas used in OVF format
+GANETI_SCHEMA = "http://ganeti"
+OVF_SCHEMA = "http://schemas.dmtf.org/ovf/envelope/1"
+RASD_SCHEMA = ("http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/"
+ "CIM_ResourceAllocationSettingData")
+VSSD_SCHEMA = ("http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/"
+ "CIM_VirtualSystemSettingData")
+XML_SCHEMA = "http://www.w3.org/2001/XMLSchema-instance"
+
+# File extensions in OVF package
+OVA_EXT = ".ova"
+OVF_EXT = ".ovf"
+MF_EXT = ".mf"
+CERT_EXT = ".cert"
+COMPRESSION_EXT = ".gz"
+FILE_EXTENSIONS = [
+ OVF_EXT,
+ MF_EXT,
+ CERT_EXT,
+]
+
+COMPRESSION_TYPE = "gzip"
+NO_COMPRESSION = [None, "identity"]
+COMPRESS = "compression"
+DECOMPRESS = "decompression"
+ALLOWED_ACTIONS = [COMPRESS, DECOMPRESS]
+
+VMDK = "vmdk"
+RAW = "raw"
+COW = "cow"
+ALLOWED_FORMATS = [RAW, COW, VMDK]
+
+# ResourceType values
+RASD_TYPE = {
+ "vcpus": "3",
+ "memory": "4",
+ "scsi-controller": "6",
+ "ethernet-adapter": "10",
+ "disk": "17",
+}
+
+SCSI_SUBTYPE = "lsilogic"
+VS_TYPE = {
+ "ganeti": "ganeti-ovf",
+ "external": "vmx-04",
+}
+
+# AllocationUnits values and conversion
+ALLOCATION_UNITS = {
+ 'b': ["bytes", "b"],
+ 'kb': ["kilobytes", "kb", "byte * 2^10", "kibibytes", "kib"],
+ 'mb': ["megabytes", "mb", "byte * 2^20", "mebibytes", "mib"],
+ 'gb': ["gigabytes", "gb", "byte * 2^30", "gibibytes", "gib"],
+}
+CONVERT_UNITS_TO_MB = {
+ 'b': lambda x: x / (1024 * 1024),
+ 'kb': lambda x: x / 1024,
+ 'mb': lambda x: x,
+ 'gb': lambda x: x * 1024,
+}
+
+# Names of the config fields
+NAME = "name"
+OS = "os"
+HYPERV = "hypervisor"
+VCPUS = "vcpus"
+MEMORY = "memory"
+AUTO_BALANCE = "auto_balance"
+DISK_TEMPLATE = "disk_template"
+TAGS = "tags"
+VERSION = "version"
+
+# Instance IDs of System and SCSI controller
+INSTANCE_ID = {
+ "system": 0,
+ "vcpus": 1,
+ "memory": 2,
+ "scsi": 3,
+}
+
+# Disk format descriptions
+DISK_FORMAT = {
+ RAW: "http://en.wikipedia.org/wiki/Byte",
+ VMDK: "http://www.vmware.com/interfaces/specifications/vmdk.html"
+ "#monolithicSparse",
+ COW: "http://www.gnome.org/~markmc/qcow-image-format.html",
+}
+
+
+def CheckQemuImg():
+ """ Make sure that qemu-img is present before performing operations.
+
+ @raise errors.OpPrereqError: when qemu-img was not found in the system
+
+ """
+ if not constants.QEMUIMG_PATH:
+ raise errors.OpPrereqError("qemu-img not found at build time, unable"
+ " to continue")
+
+
+def LinkFile(old_path, prefix=None, suffix=None, directory=None):
+ """Create link with a given prefix and suffix.
+
+ This is a wrapper over os.link. It tries to create a hard link for given file,
+ but instead of rising error when file exists, the function changes the name
+ a little bit.
+
+ @type old_path:string
+ @param old_path: path to the file that is to be linked
+ @type prefix: string
+ @param prefix: prefix of filename for the link
+ @type suffix: string
+ @param suffix: suffix of the filename for the link
+ @type directory: string
+ @param directory: directory of the link
+
+ @raise errors.OpPrereqError: when error on linking is different than
+ "File exists"
+
+ """
+ assert(prefix is not None or suffix is not None)
+ if directory is None:
+ directory = os.getcwd()
+ new_path = utils.PathJoin(directory, "%s%s" % (prefix, suffix))
+ counter = 1
+ while True:
+ try:
+ os.link(old_path, new_path)
+ break
+ except OSError, err:
+ if err.errno == errno.EEXIST:
+ new_path = utils.PathJoin(directory,
+ "%s_%s%s" % (prefix, counter, suffix))
+ counter += 1
+ else:
+ raise errors.OpPrereqError("Error moving the file %s to %s location:"
+ " %s" % (old_path, new_path, err))
+ return new_path
+
+
+class OVFReader(object):
+ """Reader class for OVF files.
+
+ @type files_list: list
+ @ivar files_list: list of files in the OVF package
+ @type tree: ET.ElementTree
+ @ivar tree: XML tree of the .ovf file
+ @type schema_name: string
+ @ivar schema_name: name of the .ovf file
+ @type input_dir: string
+ @ivar input_dir: directory in which the .ovf file resides
+
+ """
+ def __init__(self, input_path):
+ """Initialiaze the reader - load the .ovf file to XML parser.
+
+ It is assumed that names of manifesto (.mf), certificate (.cert) and ovf
+ files are the same. In order to account any other files as part of the ovf
+ package, they have to be explicitly mentioned in the Resources section
+ of the .ovf file.
+
+ @type input_path: string
+ @param input_path: absolute path to the .ovf file
+
+ @raise errors.OpPrereqError: when .ovf file is not a proper XML file or some
+ of the files mentioned in Resources section do not exist
+
+ """
+ self.tree = ET.ElementTree()
+ try:
+ self.tree.parse(input_path)
+ except (ParseError, xml.parsers.expat.ExpatError), err:
+ raise errors.OpPrereqError("Error while reading %s file: %s" %
+ (OVF_EXT, err))
+
+ # Create a list of all files in the OVF package
+ (input_dir, input_file) = os.path.split(input_path)
+ (input_name, _) = os.path.splitext(input_file)
+ files_directory = utils.ListVisibleFiles(input_dir)
+ files_list = []
+ for file_name in files_directory:
+ (name, extension) = os.path.splitext(file_name)
+ if extension in FILE_EXTENSIONS and name == input_name:
+ files_list.append(file_name)
+ files_list += self._GetAttributes("{%s}References/{%s}File" %
+ (OVF_SCHEMA, OVF_SCHEMA),
+ "{%s}href" % OVF_SCHEMA)
+ for file_name in files_list:
+ file_path = utils.PathJoin(input_dir, file_name)
+ if not os.path.exists(file_path):
+ raise errors.OpPrereqError("File does not exist: %s" % file_path)
+ logging.info("Files in the OVF package: %s", " ".join(files_list))
+ self.files_list = files_list
+ self.input_dir = input_dir
+ self.schema_name = input_name
+
+ def _GetAttributes(self, path, attribute):
+ """Get specified attribute from all nodes accessible using given path.
+
+ Function follows the path from root node to the desired tags using path,
+ then reads the apropriate attribute values.
+
+ @type path: string
+ @param path: path of nodes to visit
+ @type attribute: string
+ @param attribute: attribute for which we gather the information
+ @rtype: list
+ @return: for each accessible tag with the attribute value set, value of the
+ attribute
+
+ """
+ current_list = self.tree.findall(path)
+ results = [x.get(attribute) for x in current_list]
+ return filter(None, results)
+
+ def _GetElementMatchingAttr(self, path, match_attr):
+ """Searches for element on a path that matches certain attribute value.
+
+ Function follows the path from root node to the desired tags using path,
+ then searches for the first one matching the attribute value.
+
+ @type path: string
+ @param path: path of nodes to visit
+ @type match_attr: tuple
+ @param match_attr: pair (attribute, value) for which we search
+ @rtype: ET.ElementTree or None
+ @return: first element matching match_attr or None if nothing matches
+
+ """
+ potential_elements = self.tree.findall(path)
+ (attr, val) = match_attr
+ for elem in potential_elements:
+ if elem.get(attr) == val:
+ return elem
+ return None
+
+ def _GetElementMatchingText(self, path, match_text):
+ """Searches for element on a path that matches certain text value.
+
+ Function follows the path from root node to the desired tags using path,
+ then searches for the first one matching the text value.
+
+ @type path: string
+ @param path: path of nodes to visit
+ @type match_text: tuple
+ @param match_text: pair (node, text) for which we search
+ @rtype: ET.ElementTree or None
+ @return: first element matching match_text or None if nothing matches
+
+ """
+ potential_elements = self.tree.findall(path)
+ (node, text) = match_text
+ for elem in potential_elements:
+ if elem.findtext(node) == text:
+ return elem
+ return None
+
+ @staticmethod
+ def _GetDictParameters(root, schema):
+ """Reads text in all children and creates the dictionary from the contents.
+
+ @type root: ET.ElementTree or None
+ @param root: father of the nodes we want to collect data about
+ @type schema: string
+ @param schema: schema name to be removed from the tag
+ @rtype: dict
+ @return: dictionary containing tags and their text contents, tags have their
+ schema fragment removed or empty dictionary, when root is None
+
+ """
+ if not root:
+ return {}
+ results = {}
+ for element in list(root):
+ pref_len = len("{%s}" % schema)
+ assert(schema in element.tag)
+ tag = element.tag[pref_len:]
+ results[tag] = element.text
+ return results
+
+ def VerifyManifest(self):
+ """Verifies manifest for the OVF package, if one is given.
+
+ @raise errors.OpPrereqError: if SHA1 checksums do not match
+
+ """
+ if "%s%s" % (self.schema_name, MF_EXT) in self.files_list:
+ logging.warning("Verifying SHA1 checksums, this may take a while")
+ manifest_filename = "%s%s" % (self.schema_name, MF_EXT)
+ manifest_path = utils.PathJoin(self.input_dir, manifest_filename)
+ manifest_content = utils.ReadFile(manifest_path).splitlines()
+ manifest_files = {}
+ regexp = r"SHA1\((\S+)\)= (\S+)"
+ for line in manifest_content:
+ match = re.match(regexp, line)
+ if match:
+ file_name = match.group(1)
+ sha1_sum = match.group(2)
+ manifest_files[file_name] = sha1_sum
+ files_with_paths = [utils.PathJoin(self.input_dir, file_name)
+ for file_name in self.files_list]
+ sha1_sums = utils.FingerprintFiles(files_with_paths)
+ for file_name, value in manifest_files.iteritems():
+ if sha1_sums.get(utils.PathJoin(self.input_dir, file_name)) != value:
+ raise errors.OpPrereqError("SHA1 checksum of %s does not match the"
+ " value in manifest file" % file_name)
+ logging.info("SHA1 checksums verified")
+
+ def GetInstanceName(self):
+ """Provides information about instance name.
+
+ @rtype: string
+ @return: instance name string
+
+ """
+ find_name = "{%s}VirtualSystem/{%s}Name" % (OVF_SCHEMA, OVF_SCHEMA)
+ return self.tree.findtext(find_name)
+
+ def GetDiskTemplate(self):
+ """Returns disk template from .ovf file
+
+ @rtype: string or None
+ @return: name of the template
+ """
+ find_template = ("{%s}GanetiSection/{%s}DiskTemplate" %
+ (GANETI_SCHEMA, GANETI_SCHEMA))
+ return self.tree.findtext(find_template)
+
+ def GetHypervisorData(self):
+ """Provides hypervisor information - hypervisor name and options.
+
+ @rtype: dict
+ @return: dictionary containing name of the used hypervisor and all the
+ specified options
+
+ """
+ hypervisor_search = ("{%s}GanetiSection/{%s}Hypervisor" %
+ (GANETI_SCHEMA, GANETI_SCHEMA))
+ hypervisor_data = self.tree.find(hypervisor_search)
+ if not hypervisor_data:
+ return {"hypervisor_name": constants.VALUE_AUTO}
+ results = {
+ "hypervisor_name": hypervisor_data.findtext("{%s}Name" % GANETI_SCHEMA,
+ default=constants.VALUE_AUTO),
+ }
+ parameters = hypervisor_data.find("{%s}Parameters" % GANETI_SCHEMA)
+ results.update(self._GetDictParameters(parameters, GANETI_SCHEMA))
+ return results
+
+ def GetOSData(self):
+ """ Provides operating system information - os name and options.
+
+ @rtype: dict
+ @return: dictionary containing name and options for the chosen OS
+
+ """
+ results = {}
+ os_search = ("{%s}GanetiSection/{%s}OperatingSystem" %
+ (GANETI_SCHEMA, GANETI_SCHEMA))
+ os_data = self.tree.find(os_search)
+ if os_data:
+ results["os_name"] = os_data.findtext("{%s}Name" % GANETI_SCHEMA)
+ parameters = os_data.find("{%s}Parameters" % GANETI_SCHEMA)
+ results.update(self._GetDictParameters(parameters, GANETI_SCHEMA))
+ return results
+
+ def GetBackendData(self):
+ """ Provides backend information - vcpus, memory, auto balancing options.
+
+ @rtype: dict
+ @return: dictionary containing options for vcpus, memory and auto balance
+ settings
+
+ """
+ results = {}
+
+ find_vcpus = ("{%s}VirtualSystem/{%s}VirtualHardwareSection/{%s}Item" %
+ (OVF_SCHEMA, OVF_SCHEMA, OVF_SCHEMA))
+ match_vcpus = ("{%s}ResourceType" % RASD_SCHEMA, RASD_TYPE["vcpus"])
+ vcpus = self._GetElementMatchingText(find_vcpus, match_vcpus)
+ if vcpus:
+ vcpus_count = vcpus.findtext("{%s}VirtualQuantity" % RASD_SCHEMA,
+ default=constants.VALUE_AUTO)
+ else:
+ vcpus_count = constants.VALUE_AUTO
+ results["vcpus"] = str(vcpus_count)
+
+ find_memory = find_vcpus
+ match_memory = ("{%s}ResourceType" % RASD_SCHEMA, RASD_TYPE["memory"])
+ memory = self._GetElementMatchingText(find_memory, match_memory)
+ memory_raw = None
+ if memory:
+ alloc_units = memory.findtext("{%s}AllocationUnits" % RASD_SCHEMA)
+ matching_units = [units for units, variants in
+ ALLOCATION_UNITS.iteritems() if alloc_units.lower() in variants]
+ if matching_units == []:
+ raise errors.OpPrereqError("Unit %s for RAM memory unknown",
+ alloc_units)
+ units = matching_units[0]
+ memory_raw = int(memory.findtext("{%s}VirtualQuantity" % RASD_SCHEMA,
+ default=constants.VALUE_AUTO))
+ memory_count = CONVERT_UNITS_TO_MB[units](memory_raw)
+ else:
+ memory_count = constants.VALUE_AUTO
+ results["memory"] = str(memory_count)
+
+ find_balance = ("{%s}GanetiSection/{%s}AutoBalance" %
+ (GANETI_SCHEMA, GANETI_SCHEMA))
+ balance = self.tree.findtext(find_balance, default=constants.VALUE_AUTO)
+ results["auto_balance"] = balance
+
+ return results
+
+ def GetTagsData(self):
+ """Provides tags information for instance.
+
+ @rtype: string or None
+ @return: string of comma-separated tags for the instance
+
+ """
+ find_tags = "{%s}GanetiSection/{%s}Tags" % (GANETI_SCHEMA, GANETI_SCHEMA)
+ results = self.tree.findtext(find_tags)
+ if results:
+ return results
+ else:
+ return None
+
+ def GetVersionData(self):
+ """Provides version number read from .ovf file
+
+ @rtype: string
+ @return: string containing the version number
+
+ """
+ find_version = ("{%s}GanetiSection/{%s}Version" %
+ (GANETI_SCHEMA, GANETI_SCHEMA))
+ return self.tree.findtext(find_version)
+
+ def GetNetworkData(self):
+ """Provides data about the network in the OVF instance.
+
+ The method gathers the data about networks used by OVF instance. It assumes
+ that 'name' tag means something - in essence, if it contains one of the
+ words 'bridged' or 'routed' then that will be the mode of this network in
+ Ganeti. The information about the network can be either in GanetiSection or
+ VirtualHardwareSection.
+
+ @rtype: dict
+ @return: dictionary containing all the network information
+
+ """
+ results = {}
+ networks_search = ("{%s}NetworkSection/{%s}Network" %
+ (OVF_SCHEMA, OVF_SCHEMA))
+ network_names = self._GetAttributes(networks_search,
+ "{%s}name" % OVF_SCHEMA)
+ required = ["ip", "mac", "link", "mode"]
+ for (counter, network_name) in enumerate(network_names):
+ network_search = ("{%s}VirtualSystem/{%s}VirtualHardwareSection/{%s}Item"
+ % (OVF_SCHEMA, OVF_SCHEMA, OVF_SCHEMA))
+ ganeti_search = ("{%s}GanetiSection/{%s}Network/{%s}Nic" %
+ (GANETI_SCHEMA, GANETI_SCHEMA, GANETI_SCHEMA))
+ network_match = ("{%s}Connection" % RASD_SCHEMA, network_name)
+ ganeti_match = ("{%s}name" % OVF_SCHEMA, network_name)
+ network_data = self._GetElementMatchingText(network_search, network_match)
+ network_ganeti_data = self._GetElementMatchingAttr(ganeti_search,
+ ganeti_match)
+
+ ganeti_data = {}
+ if network_ganeti_data:
+ ganeti_data["mode"] = network_ganeti_data.findtext("{%s}Mode" %
+ GANETI_SCHEMA)
+ ganeti_data["mac"] = network_ganeti_data.findtext("{%s}MACAddress" %
+ GANETI_SCHEMA)
+ ganeti_data["ip"] = network_ganeti_data.findtext("{%s}IPAddress" %
+ GANETI_SCHEMA)
+ ganeti_data["link"] = network_ganeti_data.findtext("{%s}Link" %
+ GANETI_SCHEMA)
+ mac_data = None
+ if network_data:
+ mac_data = network_data.findtext("{%s}Address" % RASD_SCHEMA)
+
+ network_name = network_name.lower()
+
+ # First, some not Ganeti-specific information is collected
+ if constants.NIC_MODE_BRIDGED in network_name:
+ results["nic%s_mode" % counter] = "bridged"
+ elif constants.NIC_MODE_ROUTED in network_name:
+ results["nic%s_mode" % counter] = "routed"
+ results["nic%s_mac" % counter] = mac_data
+
+ # GanetiSection data overrides 'manually' collected data
+ for name, value in ganeti_data.iteritems():
+ results["nic%s_%s" % (counter, name)] = value
+
+ # Bridged network has no IP - unless specifically stated otherwise
+ if (results.get("nic%s_mode" % counter) == "bridged" and
+ not results.get("nic%s_ip" % counter)):
+ results["nic%s_ip" % counter] = constants.VALUE_NONE
+
+ for option in required:
+ if not results.get("nic%s_%s" % (counter, option)):
+ results["nic%s_%s" % (counter, option)] = constants.VALUE_AUTO
+
+ if network_names:
+ results["nic_count"] = str(len(network_names))
+ return results
+
+ def GetDisksNames(self):
+ """Provides list of file names for the disks used by the instance.
+
+ @rtype: list
+ @return: list of file names, as referenced in .ovf file
+
+ """
+ results = []
+ disks_search = "{%s}DiskSection/{%s}Disk" % (OVF_SCHEMA, OVF_SCHEMA)
+ disk_ids = self._GetAttributes(disks_search, "{%s}fileRef" % OVF_SCHEMA)
+ for disk in disk_ids:
+ disk_search = "{%s}References/{%s}File" % (OVF_SCHEMA, OVF_SCHEMA)
+ disk_match = ("{%s}id" % OVF_SCHEMA, disk)
+ disk_elem = self._GetElementMatchingAttr(disk_search, disk_match)
+ if disk_elem is None:
+ raise errors.OpPrereqError("%s file corrupted - disk %s not found in"
+ " references" % (OVF_EXT, disk))
+ disk_name = disk_elem.get("{%s}href" % OVF_SCHEMA)
+ disk_compression = disk_elem.get("{%s}compression" % OVF_SCHEMA)
+ results.append((disk_name, disk_compression))
+ return results
+
+
+def SubElementText(parent, tag, text, attrib={}, **extra):
+# pylint: disable=W0102
+ """This is just a wrapper on ET.SubElement that always has text content.
+
+ """
+ if text is None:
+ return None
+ elem = ET.SubElement(parent, tag, attrib=attrib, **extra)
+ elem.text = str(text)
+ return elem
+
+
+class OVFWriter(object):
+ """Writer class for OVF files.
+
+ @type tree: ET.ElementTree
+ @ivar tree: XML tree that we are constructing
+ @type virtual_system_type: string
+ @ivar virtual_system_type: value of vssd:VirtualSystemType, for external usage
+ in VMWare this requires to be vmx
+ @type hardware_list: list
+ @ivar hardware_list: list of items prepared for VirtualHardwareSection
+ @type next_instance_id: int
+ @ivar next_instance_id: next instance id to be used when creating elements on
+ hardware_list
+
+ """
+ def __init__(self, has_gnt_section):
+ """Initialize the writer - set the top element.
+
+ @type has_gnt_section: bool
+ @param has_gnt_section: if the Ganeti schema should be added - i.e. this
+ means that Ganeti section will be present
+
+ """
+ env_attribs = {
+ "xmlns:xsi": XML_SCHEMA,
+ "xmlns:vssd": VSSD_SCHEMA,
+ "xmlns:rasd": RASD_SCHEMA,
+ "xmlns:ovf": OVF_SCHEMA,
+ "xmlns": OVF_SCHEMA,
+ "xml:lang": "en-US",
+ }
+ if has_gnt_section:
+ env_attribs["xmlns:gnt"] = GANETI_SCHEMA
+ self.virtual_system_type = VS_TYPE["ganeti"]
+ else:
+ self.virtual_system_type = VS_TYPE["external"]
+ self.tree = ET.Element("Envelope", attrib=env_attribs)
+ self.hardware_list = []
+ # INSTANCE_ID contains statically assigned IDs, starting from 0
+ self.next_instance_id = len(INSTANCE_ID) # FIXME: hackish
+
+ def SaveDisksData(self, disks):
+ """Convert disk information to certain OVF sections.
+
+ @type disks: list
+ @param disks: list of dictionaries of disk options from config.ini
+
+ """
+ references = ET.SubElement(self.tree, "References")
+ disk_section = ET.SubElement(self.tree, "DiskSection")
+ SubElementText(disk_section, "Info", "Virtual disk information")
+ for counter, disk in enumerate(disks):
+ file_id = "file%s" % counter
+ disk_id = "disk%s" % counter
+ file_attribs = {
+ "ovf:href": disk["path"],
+ "ovf:size": str(disk["real-size"]),
+ "ovf:id": file_id,
+ }
+ disk_attribs = {
+ "ovf:capacity": str(disk["virt-size"]),
+ "ovf:diskId": disk_id,
+ "ovf:fileRef": file_id,
+ "ovf:format": DISK_FORMAT.get(disk["format"], disk["format"]),
+ }
+ if "compression" in disk:
+ file_attribs["ovf:compression"] = disk["compression"]
+ ET.SubElement(references, "File", attrib=file_attribs)
+ ET.SubElement(disk_section, "Disk", attrib=disk_attribs)
+
+ # Item in VirtualHardwareSection creation
+ disk_item = ET.Element("Item")
+ SubElementText(disk_item, "rasd:ElementName", disk_id)
+ SubElementText(disk_item, "rasd:HostResource", "ovf:/disk/%s" % disk_id)
+ SubElementText(disk_item, "rasd:InstanceID", self.next_instance_id)
+ SubElementText(disk_item, "rasd:Parent", INSTANCE_ID["scsi"])
+ SubElementText(disk_item, "rasd:ResourceType", RASD_TYPE["disk"])
+ self.hardware_list.append(disk_item)
+ self.next_instance_id += 1
+
+ def SaveNetworksData(self, networks):
+ """Convert network information to NetworkSection.
+
+ @type networks: list
+ @param networks: list of dictionaries of network options form config.ini
+
+ """
+ network_section = ET.SubElement(self.tree, "NetworkSection")
+ SubElementText(network_section, "Info", "List of logical networks")
+ for counter, network in enumerate(networks):
+ network_name = "%s%s" % (network["mode"], counter)
+ network_attrib = {"ovf:name": network_name}
+ ET.SubElement(network_section, "Network", attrib=network_attrib)
+
+ # Item in VirtualHardwareSection creation
+ network_item = ET.Element("Item")
+ SubElementText(network_item, "rasd:Address", network["mac"])
+ SubElementText(network_item, "rasd:Connection", network_name)
+ SubElementText(network_item, "rasd:ElementName", network_name)
+ SubElementText(network_item, "rasd:InstanceID", self.next_instance_id)
+ SubElementText(network_item, "rasd:ResourceType",
+ RASD_TYPE["ethernet-adapter"])
+ self.hardware_list.append(network_item)
+ self.next_instance_id += 1
+
+ @staticmethod
+ def _SaveNameAndParams(root, data):
+ """Save name and parameters information under root using data.
+
+ @type root: ET.Element
+ @param root: root element for the Name and Parameters
+ @type data: dict
+ @param data: data from which we gather the values
+
+ """
+ assert(data.get("name"))
+ name = SubElementText(root, "gnt:Name", data["name"])
+ params = ET.SubElement(root, "gnt:Parameters")
+ for name, value in data.iteritems():
+ if name != "name":
+ SubElementText(params, "gnt:%s" % name, value)
+
+ def SaveGanetiData(self, ganeti, networks):
+ """Convert Ganeti-specific information to GanetiSection.
+
+ @type ganeti: dict
+ @param ganeti: dictionary of Ganeti-specific options from config.ini
+ @type networks: list
+ @param networks: list of dictionaries of network options form config.ini
+
+ """
+ ganeti_section = ET.SubElement(self.tree, "gnt:GanetiSection")
+
+ SubElementText(ganeti_section, "gnt:Version", ganeti.get("version"))
+ SubElementText(ganeti_section, "gnt:DiskTemplate",
+ ganeti.get("disk_template"))
+ SubElementText(ganeti_section, "gnt:AutoBalance",
+ ganeti.get("auto_balance"))
+ SubElementText(ganeti_section, "gnt:Tags", ganeti.get("tags"))
+
+ osys = ET.SubElement(ganeti_section, "gnt:OperatingSystem")
+ self._SaveNameAndParams(osys, ganeti["os"])
+
+ hypervisor = ET.SubElement(ganeti_section, "gnt:Hypervisor")
+ self._SaveNameAndParams(hypervisor, ganeti["hypervisor"])
+
+ network_section = ET.SubElement(ganeti_section, "gnt:Network")
+ for counter, network in enumerate(networks):
+ network_name = "%s%s" % (network["mode"], counter)
+ nic_attrib = {"ovf:name": network_name}
+ nic = ET.SubElement(network_section, "gnt:Nic", attrib=nic_attrib)
+ SubElementText(nic, "gnt:Mode", network["mode"])
+ SubElementText(nic, "gnt:MACAddress", network["mac"])
+ SubElementText(nic, "gnt:IPAddress", network["ip"])
+ SubElementText(nic, "gnt:Link", network["link"])
+
+ def SaveVirtualSystemData(self, name, vcpus, memory):
+ """Convert virtual system information to OVF sections.
+
+ @type name: string
+ @param name: name of the instance
+ @type vcpus: int
+ @param vcpus: number of VCPUs
+ @type memory: int
+ @param memory: RAM memory in MB
+
+ """
+ assert(vcpus > 0)
+ assert(memory > 0)
+ vs_attrib = {"ovf:id": name}
+ virtual_system = ET.SubElement(self.tree, "VirtualSystem", attrib=vs_attrib)
+ SubElementText(virtual_system, "Info", "A virtual machine")
+
+ name_section = ET.SubElement(virtual_system, "Name")
+ name_section.text = name
+ os_attrib = {"ovf:id": "0"}
+ os_section = ET.SubElement(virtual_system, "OperatingSystemSection",
+ attrib=os_attrib)
+ SubElementText(os_section, "Info", "Installed guest operating system")
+ hardware_section = ET.SubElement(virtual_system, "VirtualHardwareSection")
+ SubElementText(hardware_section, "Info", "Virtual hardware requirements")
+
+ # System description
+ system = ET.SubElement(hardware_section, "System")
+ SubElementText(system, "vssd:ElementName", "Virtual Hardware Family")
+ SubElementText(system, "vssd:InstanceID", INSTANCE_ID["system"])
+ SubElementText(system, "vssd:VirtualSystemIdentifier", name)
+ SubElementText(system, "vssd:VirtualSystemType", self.virtual_system_type)
+
+ # Item for vcpus
+ vcpus_item = ET.SubElement(hardware_section, "Item")
+ SubElementText(vcpus_item, "rasd:ElementName",
+ "%s virtual CPU(s)" % vcpus)
+ SubElementText(vcpus_item, "rasd:InstanceID", INSTANCE_ID["vcpus"])
+ SubElementText(vcpus_item, "rasd:ResourceType", RASD_TYPE["vcpus"])
+ SubElementText(vcpus_item, "rasd:VirtualQuantity", vcpus)
+
+ # Item for memory
+ memory_item = ET.SubElement(hardware_section, "Item")
+ SubElementText(memory_item, "rasd:AllocationUnits", "byte * 2^20")
+ SubElementText(memory_item, "rasd:ElementName", "%sMB of memory" % memory)
+ SubElementText(memory_item, "rasd:InstanceID", INSTANCE_ID["memory"])
+ SubElementText(memory_item, "rasd:ResourceType", RASD_TYPE["memory"])
+ SubElementText(memory_item, "rasd:VirtualQuantity", memory)
+
+ # Item for scsi controller
+ scsi_item = ET.SubElement(hardware_section, "Item")
+ SubElementText(scsi_item, "rasd:Address", INSTANCE_ID["system"])
+ SubElementText(scsi_item, "rasd:ElementName", "scsi_controller0")
+ SubElementText(scsi_item, "rasd:InstanceID", INSTANCE_ID["scsi"])
+ SubElementText(scsi_item, "rasd:ResourceSubType", SCSI_SUBTYPE)
+ SubElementText(scsi_item, "rasd:ResourceType", RASD_TYPE["scsi-controller"])
+
+ # Other items - from self.hardware_list
+ for item in self.hardware_list:
+ hardware_section.append(item)
+
+ def PrettyXmlDump(self):
+ """Formatter of the XML file.
+
+ @rtype: string
+ @return: XML tree in the form of nicely-formatted string
+
+ """
+ raw_string = ET.tostring(self.tree)
+ parsed_xml = xml.dom.minidom.parseString(raw_string)
+ xml_string = parsed_xml.toprettyxml(indent=" ")
+ text_re = re.compile(">\n\s+([^<>\s].*?)\n\s+</", re.DOTALL)
+ return text_re.sub(">\g<1></", xml_string)
+
+
+class Converter(object):
+ """Converter class for OVF packages.
+
+ Converter is a class above both ImporterOVF and ExporterOVF. It's purpose is
+ to provide a common interface for the two.
+
+ @type options: optparse.Values
+ @ivar options: options parsed from the command line
+ @type output_dir: string
+ @ivar output_dir: directory to which the results of conversion shall be
+ written
+ @type temp_file_manager: L{utils.TemporaryFileManager}
+ @ivar temp_file_manager: container for temporary files created during
+ conversion
+ @type temp_dir: string
+ @ivar temp_dir: temporary directory created then we deal with OVA
+
+ """
+ def __init__(self, input_path, options):
+ """Initialize the converter.
+
+ @type input_path: string
+ @param input_path: path to the Converter input file
+ @type options: optparse.Values
+ @param options: command line options
+
+ @raise errors.OpPrereqError: if file does not exist
+
+ """
+ input_path = os.path.abspath(input_path)
+ if not os.path.isfile(input_path):
+ raise errors.OpPrereqError("File does not exist: %s" % input_path)
+ self.options = options
+ self.temp_file_manager = utils.TemporaryFileManager()
+ self.temp_dir = None
+ self.output_dir = None
+ self._ReadInputData(input_path)
+
+ def _ReadInputData(self, input_path):
+ """Reads the data on which the conversion will take place.
+
+ @type input_path: string
+ @param input_path: absolute path to the Converter input file
+
+ """
+ raise NotImplementedError()
+
+ def _CompressDisk(self, disk_path, compression, action):
+ """Performs (de)compression on the disk and returns the new path
+
+ @type disk_path: string
+ @param disk_path: path to the disk
+ @type compression: string
+ @param compression: compression type
+ @type action: string
+ @param action: whether the action is compression or decompression
+ @rtype: string
+ @return: new disk path after (de)compression
+
+ @raise errors.OpPrereqError: disk (de)compression failed or "compression"
+ is not supported
+
+ """
+ assert(action in ALLOWED_ACTIONS)
+ # For now we only support gzip, as it is used in ovftool
+ if compression != COMPRESSION_TYPE:
+ raise errors.OpPrereqError("Unsupported compression type: %s"
+ % compression)
+ disk_file = os.path.basename(disk_path)
+ if action == DECOMPRESS:
+ (disk_name, _) = os.path.splitext(disk_file)
+ prefix = disk_name
+ elif action == COMPRESS:
+ prefix = disk_file
+ new_path = utils.GetClosedTempfile(suffix=COMPRESSION_EXT, prefix=prefix,
+ dir=self.output_dir)
+ self.temp_file_manager.Add(new_path)
+ args = ["gzip", "-c", disk_path]
+ run_result = utils.RunCmd(args, output=new_path)
+ if run_result.failed:
+ raise errors.OpPrereqError("Disk %s failed with output: %s"
+ % (action, run_result.stderr))
+ logging.info("The %s of the disk is completed", action)
+ return (COMPRESSION_EXT, new_path)
+
+ def _ConvertDisk(self, disk_format, disk_path):
+ """Performes conversion to specified format.
+
+ @type disk_format: string
+ @param disk_format: format to which the disk should be converted
+ @type disk_path: string
+ @param disk_path: path to the disk that should be converted
+ @rtype: string
+ @return path to the output disk
+
+ @raise errors.OpPrereqError: convertion of the disk failed
+
+ """
+ CheckQemuImg()
+ disk_file = os.path.basename(disk_path)
+ (disk_name, disk_extension) = os.path.splitext(disk_file)
+ if disk_extension != disk_format:
+ logging.warning("Conversion of disk image to %s format, this may take"
+ " a while", disk_format)
+
+ new_disk_path = utils.GetClosedTempfile(suffix=".%s" % disk_format,
+ prefix=disk_name, dir=self.output_dir)
+ self.temp_file_manager.Add(new_disk_path)
+ args = [
+ constants.QEMUIMG_PATH,
+ "convert",
+ "-O",
+ disk_format,
+ disk_path,
+ new_disk_path,
+ ]
+ run_result = utils.RunCmd(args, cwd=os.getcwd())
+ if run_result.failed:
+ raise errors.OpPrereqError("Convertion to %s failed, qemu-img output was"
+ ": %s" % (disk_format, run_result.stderr))
+ return (".%s" % disk_format, new_disk_path)
+
+ @staticmethod
+ def _GetDiskQemuInfo(disk_path, regexp):
+ """Figures out some information of the disk using qemu-img.
+
+ @type disk_path: string
+ @param disk_path: path to the disk we want to know the format of
+ @type regexp: string
+ @param regexp: string that has to be matched, it has to contain one group
+ @rtype: string
+ @return: disk format
+
+ @raise errors.OpPrereqError: format information cannot be retrieved
+
+ """
+ CheckQemuImg()
+ args = [constants.QEMUIMG_PATH, "info", disk_path]
+ run_result = utils.RunCmd(args, cwd=os.getcwd())
+ if run_result.failed:
+ raise errors.OpPrereqError("Gathering info about the disk using qemu-img"
+ " failed, output was: %s" % run_result.stderr)
+ result = run_result.output
+ regexp = r"%s" % regexp
+ match = re.search(regexp, result)
+ if match:
+ disk_format = match.group(1)
+ else:
+ raise errors.OpPrereqError("No file information matching %s found in:"
+ " %s" % (regexp, result))
+ return disk_format
+
+ def Parse(self):
+ """Parses the data and creates a structure containing all required info.
+
+ """
+ raise NotImplementedError()
+
+ def Save(self):
+ """Saves the gathered configuration in an apropriate format.
+
+ """
+ raise NotImplementedError()
+
+ def Cleanup(self):
+ """Cleans the temporary directory, if one was created.
+
+ """
+ self.temp_file_manager.Cleanup()
+ if self.temp_dir:
+ shutil.rmtree(self.temp_dir)
+ self.temp_dir = None
+
+
+class OVFImporter(Converter):
+ """Converter from OVF to Ganeti config file.
+
+ @type input_dir: string
+ @ivar input_dir: directory in which the .ovf file resides
+ @type output_dir: string
+ @ivar output_dir: directory to which the results of conversion shall be
+ written
+ @type input_path: string
+ @ivar input_path: complete path to the .ovf file
+ @type ovf_reader: L{OVFReader}
+ @ivar ovf_reader: OVF reader instance collects data from .ovf file
+ @type results_name: string
+ @ivar results_name: name of imported instance
+ @type results_template: string
+ @ivar results_template: disk template read from .ovf file or command line
+ arguments
+ @type results_hypervisor: dict
+ @ivar results_hypervisor: hypervisor information gathered from .ovf file or
+ command line arguments
+ @type results_os: dict
+ @ivar results_os: operating system information gathered from .ovf file or
+ command line arguments
+ @type results_backend: dict
+ @ivar results_backend: backend information gathered from .ovf file or
+ command line arguments
+ @type results_tags: string
+ @ivar results_tags: string containing instance-specific tags
+ @type results_version: string
+ @ivar results_version: version as required by Ganeti import
+ @type results_network: dict
+ @ivar results_network: network information gathered from .ovf file or command
+ line arguments
+ @type results_disk: dict
+ @ivar results_disk: disk information gathered from .ovf file or command line
+ arguments
+
+ """
+ def _ReadInputData(self, input_path):
+ """Reads the data on which the conversion will take place.
+
+ @type input_path: string
+ @param input_path: absolute path to the .ovf or .ova input file
+
+ @raise errors.OpPrereqError: if input file is neither .ovf nor .ova
+
+ """
+ (input_dir, input_file) = os.path.split(input_path)
+ (_, input_extension) = os.path.splitext(input_file)
+
+ if input_extension == OVF_EXT:
+ logging.info("%s file extension found, no unpacking necessary", OVF_EXT)
+ self.input_dir = input_dir
+ self.input_path = input_path
+ self.temp_dir = None
+ elif input_extension == OVA_EXT:
+ logging.info("%s file extension found, proceeding to unpacking", OVA_EXT)
+ self._UnpackOVA(input_path)
+ else:
+ raise errors.OpPrereqError("Unknown file extension; expected %s or %s"
+ " file" % (OVA_EXT, OVF_EXT))
+ assert ((input_extension == OVA_EXT and self.temp_dir) or
+ (input_extension == OVF_EXT and not self.temp_dir))
+ assert self.input_dir in self.input_path
+
+ if self.options.output_dir:
+ self.output_dir = os.path.abspath(self.options.output_dir)
+ if (os.path.commonprefix([constants.EXPORT_DIR, self.output_dir]) !=
+ constants.EXPORT_DIR):
+ logging.warning("Export path is not under %s directory, import to"
+ " Ganeti using gnt-backup may fail",
+ constants.EXPORT_DIR)
+ else:
+ self.output_dir = constants.EXPORT_DIR
+
+ self.ovf_reader = OVFReader(self.input_path)
+ self.ovf_reader.VerifyManifest()
+
+ def _UnpackOVA(self, input_path):
+ """Unpacks the .ova package into temporary directory.
+
+ @type input_path: string
+ @param input_path: path to the .ova package file
+
+ @raise errors.OpPrereqError: if file is not a proper tarball, one of the
+ files in the archive seem malicious (e.g. path starts with '../') or
+ .ova package does not contain .ovf file
+
+ """
+ input_name = None
+ if not tarfile.is_tarfile(input_path):
+ raise errors.OpPrereqError("The provided %s file is not a proper tar"
+ " archive", OVA_EXT)
+ ova_content = tarfile.open(input_path)
+ temp_dir = tempfile.mkdtemp()
+ self.temp_dir = temp_dir
+ for file_name in ova_content.getnames():
+ file_normname = os.path.normpath(file_name)
+ try:
+ utils.PathJoin(temp_dir, file_normname)
+ except ValueError, err:
+ raise errors.OpPrereqError("File %s inside %s package is not safe" %
+ (file_name, OVA_EXT))
+ if file_name.endswith(OVF_EXT):
+ input_name = file_name
+ if not input_name:
+ raise errors.OpPrereqError("No %s file in %s package found" %
+ (OVF_EXT, OVA_EXT))
+ logging.warning("Unpacking the %s archive, this may take a while",
+ input_path)
+ self.input_dir = temp_dir
+ self.input_path = utils.PathJoin(self.temp_dir, input_name)
+ try:
+ try:
+ extract = ova_content.extractall
+ except AttributeError:
+ # This is a prehistorical case of using python < 2.5
+ for member in ova_content.getmembers():
+ ova_content.extract(member, path=self.temp_dir)
+ else:
+ extract(self.temp_dir)
+ except tarfile.TarError, err:
+ raise errors.OpPrereqError("Error while extracting %s archive: %s" %
+ (OVA_EXT, err))
+ logging.info("OVA package extracted to %s directory", self.temp_dir)
+
+ def Parse(self):
+ """Parses the data and creates a structure containing all required info.
+
+ The method reads the information given either as a command line option or as
+ a part of the OVF description.
+
+ @raise errors.OpPrereqError: if some required part of the description of
+ virtual instance is missing or unable to create output directory
+
+ """
+ self.results_name = self._GetInfo("instance name", self.options.name,
+ self._ParseNameOptions, self.ovf_reader.GetInstanceName)
+ if not self.results_name:
+ raise errors.OpPrereqError("Name of instance not provided")
+
+ self.output_dir = utils.PathJoin(self.output_dir, self.results_name)
+ try:
+ utils.Makedirs(self.output_dir)
+ except OSError, err:
+ raise errors.OpPrereqError("Failed to create directory %s: %s" %
+ (self.output_dir, err))
+
+ self.results_template = self._GetInfo("disk template",
+ self.options.disk_template, self._ParseTemplateOptions,
+ self.ovf_reader.GetDiskTemplate)
+ if not self.results_template:
+ logging.info("Disk template not given")
+
+ self.results_hypervisor = self._GetInfo("hypervisor",
+ self.options.hypervisor, self._ParseHypervisorOptions,
+ self.ovf_reader.GetHypervisorData)
+ assert self.results_hypervisor["hypervisor_name"]
+ if self.results_hypervisor["hypervisor_name"] == constants.VALUE_AUTO:
+ logging.debug("Default hypervisor settings from the cluster will be used")
+
+ self.results_os = self._GetInfo("OS", self.options.os,
+ self._ParseOSOptions, self.ovf_reader.GetOSData)
+ if not self.results_os.get("os_name"):
+ raise errors.OpPrereqError("OS name must be provided")
+
+ self.results_backend = self._GetInfo("backend", self.options.beparams,
+ self._ParseBackendOptions, self.ovf_reader.GetBackendData)
+ assert self.results_backend.get("vcpus")
+ assert self.results_backend.get("memory")
+ assert self.results_backend.get("auto_balance") is not None
+
+ self.results_tags = self._GetInfo("tags", self.options.tags,
+ self._ParseTags, self.ovf_reader.GetTagsData)
+
+ ovf_version = self.ovf_reader.GetVersionData()
+ if ovf_version:
+ self.results_version = ovf_version
+ else:
+ self.results_version = constants.EXPORT_VERSION
+
+ self.results_network = self._GetInfo("network", self.options.nics,
+ self._ParseNicOptions, self.ovf_reader.GetNetworkData,
+ ignore_test=self.options.no_nics)
+
+ self.results_disk = self._GetInfo("disk", self.options.disks,
+ self._ParseDiskOptions, self._GetDiskInfo,
+ ignore_test=self.results_template == constants.DT_DISKLESS)
+
+ if not self.results_disk and not self.results_network:
+ raise errors.OpPrereqError("Either disk specification or network"
+ " description must be present")
+
+ @staticmethod
+ def _GetInfo(name, cmd_arg, cmd_function, nocmd_function,
+ ignore_test=False):
+ """Get information about some section - e.g. disk, network, hypervisor.
+
+ @type name: string
+ @param name: name of the section
+ @type cmd_arg: dict
+ @param cmd_arg: command line argument specific for section 'name'
+ @type cmd_function: callable
+ @param cmd_function: function to call if 'cmd_args' exists
+ @type nocmd_function: callable
+ @param nocmd_function: function to call if 'cmd_args' is not there
+
+ """
+ if ignore_test:
+ logging.info("Information for %s will be ignored", name)
+ return {}
+ if cmd_arg:
+ logging.info("Information for %s will be parsed from command line", name)
+ results = cmd_function()
+ else:
+ logging.info("Information for %s will be parsed from %s file",
+ name, OVF_EXT)
+ results = nocmd_function()
+ logging.info("Options for %s were succesfully read", name)
+ return results
+
+ def _ParseNameOptions(self):
+ """Returns name if one was given in command line.
+
+ @rtype: string
+ @return: name of an instance
+
+ """
+ return self.options.name
+
+ def _ParseTemplateOptions(self):
+ """Returns disk template if one was given in command line.
+
+ @rtype: string
+ @return: disk template name
+
+ """
+ return self.options.disk_template
+
+ def _ParseHypervisorOptions(self):
+ """Parses hypervisor options given in a command line.
+
+ @rtype: dict
+ @return: dictionary containing name of the chosen hypervisor and all the
+ options
+
+ """
+ assert type(self.options.hypervisor) is tuple
+ assert len(self.options.hypervisor) == 2
+ results = {}
+ if self.options.hypervisor[0]:
+ results["hypervisor_name"] = self.options.hypervisor[0]
+ else:
+ results["hypervisor_name"] = constants.VALUE_AUTO
+ results.update(self.options.hypervisor[1])
+ return results
+
+ def _ParseOSOptions(self):
+ """Parses OS options given in command line.
+
+ @rtype: dict
+ @return: dictionary containing name of chosen OS and all its options
+
+ """
+ assert self.options.os
+ results = {}
+ results["os_name"] = self.options.os
+ results.update(self.options.osparams)
+ return results
+
+ def _ParseBackendOptions(self):
+ """Parses backend options given in command line.
+
+ @rtype: dict
+ @return: dictionary containing vcpus, memory and auto-balance options
+
+ """
+ assert self.options.beparams
+ backend = {}
+ backend.update(self.options.beparams)
+ must_contain = ["vcpus", "memory", "auto_balance"]
+ for element in must_contain:
+ if backend.get(element) is None:
+ backend[element] = constants.VALUE_AUTO
+ return backend
+
+ def _ParseTags(self):
+ """Returns tags list given in command line.
+
+ @rtype: string
+ @return: string containing comma-separated tags
+
+ """
+ return self.options.tags
+
+ def _ParseNicOptions(self):
+ """Parses network options given in a command line or as a dictionary.
+
+ @rtype: dict
+ @return: dictionary of network-related options
+
+ """
+ assert self.options.nics
+ results = {}
+ for (nic_id, nic_desc) in self.options.nics:
+ results["nic%s_mode" % nic_id] = \
+ nic_desc.get("mode", constants.VALUE_AUTO)
+ results["nic%s_mac" % nic_id] = nic_desc.get("mac", constants.VALUE_AUTO)
+ results["nic%s_link" % nic_id] = \
+ nic_desc.get("link", constants.VALUE_AUTO)
+ if nic_desc.get("mode") == "bridged":
+ results["nic%s_ip" % nic_id] = constants.VALUE_NONE
+ else:
+ results["nic%s_ip" % nic_id] = constants.VALUE_AUTO
+ results["nic_count"] = str(len(self.options.nics))
+ return results
+
+ def _ParseDiskOptions(self):
+ """Parses disk options given in a command line.
+
+ @rtype: dict
+ @return: dictionary of disk-related options
+
+ @raise errors.OpPrereqError: disk description does not contain size
+ information or size information is invalid or creation failed
+
+ """
+ CheckQemuImg()
+ assert self.options.disks
+ results = {}
+ for (disk_id, disk_desc) in self.options.disks:
+ results["disk%s_ivname" % disk_id] = "disk/%s" % disk_id
+ if disk_desc.get("size"):
+ try:
+ disk_size = utils.ParseUnit(disk_desc["size"])
+ except ValueError:
+ raise errors.OpPrereqError("Invalid disk size for disk %s: %s" %
+ (disk_id, disk_desc["size"]))
+ new_path = utils.PathJoin(self.output_dir, str(disk_id))
+ args = [
+ constants.QEMUIMG_PATH,
+ "create",
+ "-f",
+ "raw",
+ new_path,
+ disk_size,
+ ]
+ run_result = utils.RunCmd(args)
+ if run_result.failed:
+ raise errors.OpPrereqError("Creation of disk %s failed, output was:"
+ " %s" % (new_path, run_result.stderr))
+ results["disk%s_size" % disk_id] = str(disk_size)
+ results["disk%s_dump" % disk_id] = "disk%s.raw" % disk_id
+ else:
+ raise errors.OpPrereqError("Disks created for import must have their"
+ " size specified")
+ results["disk_count"] = str(len(self.options.disks))
+ return results
+
+ def _GetDiskInfo(self):
+ """Gathers information about disks used by instance, perfomes conversion.
+
+ @rtype: dict
+ @return: dictionary of disk-related options
+
+ @raise errors.OpPrereqError: disk is not in the same directory as .ovf file
+
+ """
+ results = {}
+ disks_list = self.ovf_reader.GetDisksNames()
+ for (counter, (disk_name, disk_compression)) in enumerate(disks_list):
+ if os.path.dirname(disk_name):
+ raise errors.OpPrereqError("Disks are not allowed to have absolute"
+ " paths or paths outside main OVF directory")
+ disk, _ = os.path.splitext(disk_name)
+ disk_path = utils.PathJoin(self.input_dir, disk_name)
+ if disk_compression not in NO_COMPRESSION:
+ _, disk_path = self._CompressDisk(disk_path, disk_compression,
+ DECOMPRESS)
+ disk, _ = os.path.splitext(disk)
+ if self._GetDiskQemuInfo(disk_path, "file format: (\S+)") != "raw":
+ logging.info("Conversion to raw format is required")
+ ext, new_disk_path = self._ConvertDisk("raw", disk_path)
+
+ final_disk_path = LinkFile(new_disk_path, prefix=disk, suffix=ext,
+ directory=self.output_dir)
+ final_name = os.path.basename(final_disk_path)
+ disk_size = os.path.getsize(final_disk_path) / (1024 * 1024)
+ results["disk%s_dump" % counter] = final_name
+ results["disk%s_size" % counter] = str(disk_size)
+ results["disk%s_ivname" % counter] = "disk/%s" % str(counter)
+ if disks_list:
+ results["disk_count"] = str(len(disks_list))
+ return results
+
+ def Save(self):
+ """Saves all the gathered information in a constant.EXPORT_CONF_FILE file.
+
+ @raise errors.OpPrereqError: when saving to config file failed
+
+ """
+ logging.info("Conversion was succesfull, saving %s in %s directory",
+ constants.EXPORT_CONF_FILE, self.output_dir)
+ results = {
+ constants.INISECT_INS: {},
+ constants.INISECT_BEP: {},
+ constants.INISECT_EXP: {},
+ constants.INISECT_OSP: {},
+ constants.INISECT_HYP: {},
+ }
+
+ results[constants.INISECT_INS].update(self.results_disk)
+ results[constants.INISECT_INS].update(self.results_network)
+ results[constants.INISECT_INS]["hypervisor"] = \
+ self.results_hypervisor["hypervisor_name"]
+ results[constants.INISECT_INS]["name"] = self.results_name
+ if self.results_template:
+ results[constants.INISECT_INS]["disk_template"] = self.results_template
+ if self.results_tags:
+ results[constants.INISECT_INS]["tags"] = self.results_tags
+
+ results[constants.INISECT_BEP].update(self.results_backend)
+
+ results[constants.INISECT_EXP]["os"] = self.results_os["os_name"]
+ results[constants.INISECT_EXP]["version"] = self.results_version
+
+ del self.results_os["os_name"]
+ results[constants.INISECT_OSP].update(self.results_os)
+
+ del self.results_hypervisor["hypervisor_name"]
+ results[constants.INISECT_HYP].update(self.results_hypervisor)
+
+ output_file_name = utils.PathJoin(self.output_dir,
+ constants.EXPORT_CONF_FILE)
+
+ output = []
+ for section, options in results.iteritems():
+ output.append("[%s]" % section)
+ for name, value in options.iteritems():
+ if value is None:
+ value = ""
+ output.append("%s = %s" % (name, value))
+ output.append("")
+ output_contents = "\n".join(output)
+
+ try:
+ utils.WriteFile(output_file_name, data=output_contents)
+ except errors.ProgrammerError, err:
+ raise errors.OpPrereqError("Saving the config file failed: %s" % err)
+
+ self.Cleanup()
+
+
+class ConfigParserWithDefaults(ConfigParser.SafeConfigParser):
+ """This is just a wrapper on SafeConfigParser, that uses default values
+
+ """
+ def get(self, section, options, raw=None, vars=None): # pylint: disable=W0622
+ try:
+ result = ConfigParser.SafeConfigParser.get(self, section, options, \
+ raw=raw, vars=vars)
+ except ConfigParser.NoOptionError:
+ result = None
+ return result
+
+ def getint(self, section, options):
+ try:
+ result = ConfigParser.SafeConfigParser.get(self, section, options)
+ except ConfigParser.NoOptionError:
+ result = 0
+ return int(result)
+
+
+class OVFExporter(Converter):
+ """Converter from Ganeti config file to OVF
+
+ @type input_dir: string
+ @ivar input_dir: directory in which the config.ini file resides
+ @type output_dir: string
+ @ivar output_dir: directory to which the results of conversion shall be
+ written
+ @type packed_dir: string
+ @ivar packed_dir: if we want OVA package, this points to the real (i.e. not
+ temp) output directory
+ @type input_path: string
+ @ivar input_path: complete path to the config.ini file
+ @type output_path: string
+ @ivar output_path: complete path to .ovf file
+ @type config_parser: L{ConfigParserWithDefaults}
+ @ivar config_parser: parser for the config.ini file
+ @type reference_files: list
+ @ivar reference_files: files referenced in the ovf file
+ @type results_disk: list
+ @ivar results_disk: list of dictionaries of disk options from config.ini
+ @type results_network: list
+ @ivar results_network: list of dictionaries of network options form config.ini
+ @type results_name: string
+ @ivar results_name: name of the instance
+ @type results_vcpus: string
+ @ivar results_vcpus: number of VCPUs
+ @type results_memory: string
+ @ivar results_memory: RAM memory in MB
+ @type results_ganeti: dict
+ @ivar results_ganeti: dictionary of Ganeti-specific options from config.ini
+
+ """
+ def _ReadInputData(self, input_path):
+ """Reads the data on which the conversion will take place.
+
+ @type input_path: string
+ @param input_path: absolute path to the config.ini input file
+
+ @raise errors.OpPrereqError: error when reading the config file
+
+ """
+ input_dir = os.path.dirname(input_path)
+ self.input_path = input_path
+ self.input_dir = input_dir
+ if self.options.output_dir:
+ self.output_dir = os.path.abspath(self.options.output_dir)
+ else:
+ self.output_dir = input_dir
+ self.config_parser = ConfigParserWithDefaults()
+ logging.info("Reading configuration from %s file", input_path)
+ try:
+ self.config_parser.read(input_path)
+ except ConfigParser.MissingSectionHeaderError, err:
+ raise errors.OpPrereqError("Error when trying to read %s: %s" %
+ (input_path, err))
+ if self.options.ova_package:
+ self.temp_dir = tempfile.mkdtemp()
+ self.packed_dir = self.output_dir
+ self.output_dir = self.temp_dir
+
+ self.ovf_writer = OVFWriter(not self.options.ext_usage)
+
+ def _ParseName(self):
+ """Parses name from command line options or config file.
+
+ @rtype: string
+ @return: name of Ganeti instance
+
+ @raise errors.OpPrereqError: if name of the instance is not provided
+
+ """
+ if self.options.name:
+ name = self.options.name
+ else:
+ name = self.config_parser.get(constants.INISECT_INS, NAME)
+ if name is None:
+ raise errors.OpPrereqError("No instance name found")
+ return name
+
+ def _ParseVCPUs(self):
+ """Parses vcpus number from config file.
+
+ @rtype: int
+ @return: number of virtual CPUs
+
+ @raise errors.OpPrereqError: if number of VCPUs equals 0
+
+ """
+ vcpus = self.config_parser.getint(constants.INISECT_BEP, VCPUS)
+ if vcpus == 0:
+ raise errors.OpPrereqError("No CPU information found")
+ return vcpus
+
+ def _ParseMemory(self):
+ """Parses vcpus number from config file.
+
+ @rtype: int
+ @return: amount of memory in MB
+
+ @raise errors.OpPrereqError: if amount of memory equals 0
+
+ """
+ memory = self.config_parser.getint(constants.INISECT_BEP, MEMORY)
+ if memory == 0:
+ raise errors.OpPrereqError("No memory information found")
+ return memory
+
+ def _ParseGaneti(self):
+ """Parses Ganeti data from config file.
+
+ @rtype: dictionary
+ @return: dictionary of Ganeti-specific options
+
+ """
+ results = {}
+ # hypervisor
+ results["hypervisor"] = {}
+ hyp_name = self.config_parser.get(constants.INISECT_INS, HYPERV)
+ if hyp_name is None:
+ raise errors.OpPrereqError("No hypervisor information found")
+ results["hypervisor"]["name"] = hyp_name
+ pairs = self.config_parser.items(constants.INISECT_HYP)
+ for (name, value) in pairs:
+ results["hypervisor"][name] = value
+ # os
+ results["os"] = {}
+ os_name = self.config_parser.get(constants.INISECT_EXP, OS)
+ if os_name is None:
+ raise errors.OpPrereqError("No operating system information found")
+ results["os"]["name"] = os_name
+ pairs = self.config_parser.items(constants.INISECT_OSP)
+ for (name, value) in pairs:
+ results["os"][name] = value
+ # other
+ others = [
+ (constants.INISECT_INS, DISK_TEMPLATE, "disk_template"),
+ (constants.INISECT_BEP, AUTO_BALANCE, "auto_balance"),
+ (constants.INISECT_INS, TAGS, "tags"),
+ (constants.INISECT_EXP, VERSION, "version"),
+ ]
+ for (section, element, name) in others:
+ results[name] = self.config_parser.get(section, element)
+ return results
+
+ def _ParseNetworks(self):
+ """Parses network data from config file.
+
+ @rtype: list
+ @return: list of dictionaries of network options
+
+ @raise errors.OpPrereqError: then network mode is not recognized
+
+ """
+ results = []
+ counter = 0
+ while True:
+ data_link = \
+ self.config_parser.get(constants.INISECT_INS, "nic%s_link" % counter)
+ if data_link is None:
+ break
+ results.append({
+ "mode": self.config_parser.get(constants.INISECT_INS,
+ "nic%s_mode" % counter),
+ "mac": self.config_parser.get(constants.INISECT_INS,
+ "nic%s_mac" % counter),
+ "ip": self.config_parser.get(constants.INISECT_INS,
+ "nic%s_ip" % counter),
+ "link": data_link,
+ })
+ if results[counter]["mode"] not in constants.NIC_VALID_MODES:
+ raise errors.OpPrereqError("Network mode %s not recognized"
+ % results[counter]["mode"])
+ counter += 1
+ return results
+
+ def _GetDiskOptions(self, disk_file, compression):
+ """Convert the disk and gather disk info for .ovf file.
+
+ @type disk_file: string
+ @param disk_file: name of the disk (without the full path)
+ @type compression: bool
+ @param compression: whether the disk should be compressed or not
+
+ @raise errors.OpPrereqError: when disk image does not exist
+
+ """
+ disk_path = utils.PathJoin(self.input_dir, disk_file)
+ results = {}
+ if not os.path.isfile(disk_path):
+ raise errors.OpPrereqError("Disk image does not exist: %s" % disk_path)
+ if os.path.dirname(disk_file):
+ raise errors.OpPrereqError("Path for the disk: %s contains a directory"
+ " name" % disk_path)
+ disk_name, _ = os.path.splitext(disk_file)
+ ext, new_disk_path = self._ConvertDisk(self.options.disk_format, disk_path)
+ results["format"] = self.options.disk_format
+ results["virt-size"] = self._GetDiskQemuInfo(new_disk_path,
+ "virtual size: \S+ \((\d+) bytes\)")
+ if compression:
+ ext2, new_disk_path = self._CompressDisk(new_disk_path, "gzip",
+ COMPRESS)
+ disk_name, _ = os.path.splitext(disk_name)
+ results["compression"] = "gzip"
+ ext += ext2
+ final_disk_path = LinkFile(new_disk_path, prefix=disk_name, suffix=ext,
+ directory=self.output_dir)
+ final_disk_name = os.path.basename(final_disk_path)
+ results["real-size"] = os.path.getsize(final_disk_path)
+ results["path"] = final_disk_name
+ self.references_files.append(final_disk_path)
+ return results
+
+ def _ParseDisks(self):
+ """Parses disk data from config file.
+
+ @rtype: list
+ @return: list of dictionaries of disk options
+
+ """
+ results = []
+ counter = 0
+ while True:
+ disk_file = \
+ self.config_parser.get(constants.INISECT_INS, "disk%s_dump" % counter)
+ if disk_file is None:
+ break
+ results.append(self._GetDiskOptions(disk_file, self.options.compression))
+ counter += 1
+ return results
+
+ def Parse(self):
+ """Parses the data and creates a structure containing all required info.
+
+ """
+ try:
+ utils.Makedirs(self.output_dir)
+ except OSError, err:
+ raise errors.OpPrereqError("Failed to create directory %s: %s" %
+ (self.output_dir, err))
+
+ self.references_files = []
+ self.results_name = self._ParseName()
+ self.results_vcpus = self._ParseVCPUs()
+ self.results_memory = self._ParseMemory()
+ if not self.options.ext_usage:
+ self.results_ganeti = self._ParseGaneti()
+ self.results_network = self._ParseNetworks()
+ self.results_disk = self._ParseDisks()
+
+ def _PrepareManifest(self, path):
+ """Creates manifest for all the files in OVF package.
+
+ @type path: string
+ @param path: path to manifesto file
+
+ @raise errors.OpPrereqError: if error occurs when writing file
+
+ """
+ logging.info("Preparing manifest for the OVF package")
+ lines = []
+ files_list = [self.output_path]
+ files_list.extend(self.references_files)
+ logging.warning("Calculating SHA1 checksums, this may take a while")
+ sha1_sums = utils.FingerprintFiles(files_list)
+ for file_path, value in sha1_sums.iteritems():
+ file_name = os.path.basename(file_path)
+ lines.append("SHA1(%s)= %s" % (file_name, value))
+ lines.append("")
+ data = "\n".join(lines)
+ try:
+ utils.WriteFile(path, data=data)
+ except errors.ProgrammerError, err:
+ raise errors.OpPrereqError("Saving the manifest file failed: %s" % err)
+
+ @staticmethod
+ def _PrepareTarFile(tar_path, files_list):
+ """Creates tarfile from the files in OVF package.
+
+ @type tar_path: string
+ @param tar_path: path to the resulting file
+ @type files_list: list
+ @param files_list: list of files in the OVF package
+
+ """
+ logging.info("Preparing tarball for the OVF package")
+ open(tar_path, mode="w").close()
+ ova_package = tarfile.open(name=tar_path, mode="w")
+ for file_path in files_list:
+ file_name = os.path.basename(file_path)
+ ova_package.add(file_path, arcname=file_name)
+ ova_package.close()
+
+ def Save(self):
+ """Saves the gathered configuration in an apropriate format.
+
+ @raise errors.OpPrereqError: if unable to create output directory
+
+ """
+ output_file = "%s%s" % (self.results_name, OVF_EXT)
+ output_path = utils.PathJoin(self.output_dir, output_file)
+ self.ovf_writer = OVFWriter(not self.options.ext_usage)
+ logging.info("Saving read data to %s", output_path)
+
+ self.output_path = utils.PathJoin(self.output_dir, output_file)
+ files_list = [self.output_path]
+
+ self.ovf_writer.SaveDisksData(self.results_disk)
+ self.ovf_writer.SaveNetworksData(self.results_network)
+ if not self.options.ext_usage:
+ self.ovf_writer.SaveGanetiData(self.results_ganeti, self.results_network)
+
+ self.ovf_writer.SaveVirtualSystemData(self.results_name, self.results_vcpus,
+ self.results_memory)
+
+ data = self.ovf_writer.PrettyXmlDump()
+ utils.WriteFile(self.output_path, data=data)
+
+ manifest_file = "%s%s" % (self.results_name, MF_EXT)
+ manifest_path = utils.PathJoin(self.output_dir, manifest_file)
+ self._PrepareManifest(manifest_path)
+ files_list.append(manifest_path)
+
+ files_list.extend(self.references_files)
+
+ if self.options.ova_package:
+ ova_file = "%s%s" % (self.results_name, OVA_EXT)
+ packed_path = utils.PathJoin(self.packed_dir, ova_file)
+ try:
+ utils.Makedirs(self.packed_dir)
+ except OSError, err:
+ raise errors.OpPrereqError("Failed to create directory %s: %s" %
+ (self.packed_dir, err))
+ self._PrepareTarFile(packed_path, files_list)
+ logging.info("Creation of the OVF package was successfull")
+ self.Cleanup()
self._hints = None
self._op_handler = None
- def __call__(self, hints, filter_):
+ def __call__(self, hints, qfilter):
"""Converts a query filter into a callable function.
@type hints: L{_FilterHints} or None
@param hints: Callbacks doing analysis on filter
- @type filter_: list
- @param filter_: Filter structure
+ @type qfilter: list
+ @param qfilter: Filter structure
@rtype: callable
@return: Function receiving context and item as parameters, returning
boolean as to whether item matches filter
}
try:
- filter_fn = self._Compile(filter_, 0)
+ filter_fn = self._Compile(qfilter, 0)
finally:
self._op_handler = None
return filter_fn
- def _Compile(self, filter_, level):
+ def _Compile(self, qfilter, level):
"""Inner function for converting filters.
Calls the correct handler functions for the top-level operator. This
function is called recursively (e.g. for logic operators).
"""
- if not (isinstance(filter_, (list, tuple)) and filter_):
+ if not (isinstance(qfilter, (list, tuple)) and qfilter):
raise errors.ParameterError("Invalid filter on level %s" % level)
# Limit recursion
" nested too deep)" % self._LEVELS_MAX)
# Create copy to be modified
- operands = filter_[:]
+ operands = qfilter[:]
op = operands.pop(0)
try:
" (op '%s', flags %s)" % (op, field_flags))
-def _CompileFilter(fields, hints, filter_):
+def _CompileFilter(fields, hints, qfilter):
"""Converts a query filter into a callable function.
See L{_FilterCompilerHelper} for details.
@rtype: callable
"""
- return _FilterCompilerHelper(fields)(hints, filter_)
+ return _FilterCompilerHelper(fields)(hints, qfilter)
class Query:
- def __init__(self, fieldlist, selected, filter_=None, namefield=None):
+ def __init__(self, fieldlist, selected, qfilter=None, namefield=None):
"""Initializes this class.
The field definition is a dictionary with the field's name as a key and a
self._requested_names = None
self._filter_datakinds = frozenset()
- if filter_ is not None:
+ if qfilter is not None:
# Collect requested names if wanted
if namefield:
hints = _FilterHints(namefield)
hints = None
# Build filter function
- self._filter_fn = _CompileFilter(fieldlist, hints, filter_)
+ self._filter_fn = _CompileFilter(fieldlist, hints, qfilter)
if hints:
self._requested_names = hints.RequestedNames()
self._filter_datakinds = hints.ReferencedData()
elif value is not None:
errs.append("abnormal field %s has a non-None value" % fdef.name)
assert not errs, ("Failed validation: %s in row %s" %
- (utils.CommaJoin(errors), row))
+ (utils.CommaJoin(errs), row))
+
+
+def _FieldDictKey((fdef, _, flags, fn)):
+ """Generates key for field dictionary.
+
+ """
+ assert fdef.name and fdef.title, "Name and title are required"
+ assert FIELD_NAME_RE.match(fdef.name)
+ assert TITLE_RE.match(fdef.title)
+ assert (DOC_RE.match(fdef.doc) and len(fdef.doc.splitlines()) == 1 and
+ fdef.doc.strip() == fdef.doc), \
+ "Invalid description for field '%s'" % fdef.name
+ assert callable(fn)
+ assert (flags & ~QFF_ALL) == 0, "Unknown flags for field '%s'" % fdef.name
+
+ return fdef.name
def _PrepareFieldList(fields, aliases):
for (fdef, _, _, _) in fields)
assert not duplicates, "Duplicate title(s) found: %r" % duplicates
- result = {}
-
- for field in fields:
- (fdef, _, flags, fn) = field
-
- assert fdef.name and fdef.title, "Name and title are required"
- assert FIELD_NAME_RE.match(fdef.name)
- assert TITLE_RE.match(fdef.title)
- assert (DOC_RE.match(fdef.doc) and len(fdef.doc.splitlines()) == 1 and
- fdef.doc.strip() == fdef.doc), \
- "Invalid description for field '%s'" % fdef.name
- assert callable(fn)
- assert fdef.name not in result, \
- "Duplicate field name '%s' found" % fdef.name
- assert (flags & ~QFF_ALL) == 0, "Unknown flags for field '%s'" % fdef.name
-
- result[fdef.name] = field
+ result = utils.SequenceToDict(fields, key=_FieldDictKey)
for alias, target in aliases:
assert alias not in result, "Alias %s overrides an existing field" % alias
return _FS_UNAVAIL
+def _GetNodeHvState(_, node):
+ """Converts node's hypervisor state for query result.
+
+ """
+ hv_state = node.hv_state
+
+ if hv_state is None:
+ return _FS_UNAVAIL
+
+ return dict((name, value.ToDict()) for (name, value) in hv_state.items())
+
+
+def _GetNodeDiskState(_, node):
+ """Converts node's disk state for query result.
+
+ """
+ disk_state = node.disk_state
+
+ if disk_state is None:
+ return _FS_UNAVAIL
+
+ return dict((disk_kind, dict((name, value.ToDict())
+ for (name, value) in kind_state.items()))
+ for (disk_kind, kind_state) in disk_state.items())
+
+
def _BuildNodeFields():
"""Builds list of fields for node queries.
(_MakeField("custom_ndparams", "CustomNodeParameters", QFT_OTHER,
"Custom node parameters"),
NQ_GROUP, 0, _GetItemAttr("ndparams")),
+ (_MakeField("hv_state", "HypervisorState", QFT_OTHER, "Hypervisor state"),
+ NQ_CONFIG, 0, _GetNodeHvState),
+ (_MakeField("disk_state", "DiskState", QFT_OTHER, "Disk state"),
+ NQ_CONFIG, 0, _GetNodeDiskState),
]
# Node role
if bool(ctx.live_data.get(inst.name)):
if inst.name in ctx.wrongnode_inst:
return constants.INSTST_WRONGNODE
- elif inst.admin_up:
+ elif inst.admin_state == constants.ADMINST_UP:
return constants.INSTST_RUNNING
else:
return constants.INSTST_ERRORUP
- if inst.admin_up:
+ if inst.admin_state == constants.ADMINST_UP:
return constants.INSTST_ERRORDOWN
+ elif inst.admin_state == constants.ADMINST_DOWN:
+ return constants.INSTST_ADMINDOWN
- return constants.INSTST_ADMINDOWN
+ return constants.INSTST_ADMINOFFLINE
def _GetInstDiskSize(index):
# TODO: Consider moving titles closer to constants
be_title = {
constants.BE_AUTO_BALANCE: "Auto_balance",
- constants.BE_MEMORY: "ConfigMemory",
+ constants.BE_MAXMEM: "ConfigMaxMem",
+ constants.BE_MINMEM: "ConfigMinMem",
constants.BE_VCPUS: "ConfigVCPUs",
}
IQ_NODES, 0,
lambda ctx, inst: map(compat.partial(_GetInstNodeGroup, ctx, None),
inst.secondary_nodes)),
- (_MakeField("admin_state", "Autostart", QFT_BOOL,
- "Desired state of instance (if set, the instance should be"
- " up)"),
- IQ_CONFIG, 0, _GetItemAttr("admin_up")),
+ (_MakeField("admin_state", "InstanceState", QFT_TEXT,
+ "Desired state of instance"),
+ IQ_CONFIG, 0, _GetItemAttr("admin_state")),
+ (_MakeField("admin_up", "Autostart", QFT_BOOL,
+ "Desired state of instance"),
+ IQ_CONFIG, 0, lambda ctx, inst: inst.admin_state == constants.ADMINST_UP),
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), IQ_CONFIG, 0,
lambda ctx, inst: list(inst.GetTags())),
(_MakeField("console", "Console", QFT_OTHER,
status_values = (constants.INSTST_RUNNING, constants.INSTST_ADMINDOWN,
constants.INSTST_WRONGNODE, constants.INSTST_ERRORUP,
constants.INSTST_ERRORDOWN, constants.INSTST_NODEDOWN,
- constants.INSTST_NODEOFFLINE)
+ constants.INSTST_NODEOFFLINE, constants.INSTST_ADMINOFFLINE)
status_doc = ("Instance status; \"%s\" if instance is set to be running"
" and actually is, \"%s\" if instance is stopped and"
" is not running, \"%s\" if instance running, but not on its"
" designated primary node, \"%s\" if instance should be"
" stopped, but is actually running, \"%s\" if instance should"
" run, but doesn't, \"%s\" if instance's primary node is down,"
- " \"%s\" if instance's primary node is marked offline" %
- status_values)
+ " \"%s\" if instance's primary node is marked offline,"
+ " \"%s\" if instance is offline and does not use dynamic"
+ " resources" % status_values)
fields.append((_MakeField("status", "Status", QFT_TEXT, status_doc),
IQ_LIVE, 0, _GetInstStatus))
assert set(status_values) == constants.INSTST_ALL, \
aliases = [
("vcpus", "be/vcpus"),
+ ("be/memory", "be/maxmem"),
("sda_size", "disk.size/0"),
("sdb_size", "disk.size/1"),
] + network_aliases
"""Data container for node group data queries.
"""
- def __init__(self, groups, group_to_nodes, group_to_instances):
+ def __init__(self, cluster, groups, group_to_nodes, group_to_instances):
"""Initializes this class.
+ @param cluster: Cluster object
@param groups: List of node group objects
@type group_to_nodes: dict; group UUID as key
@param group_to_nodes: Per-group list of nodes
self.groups = groups
self.group_to_nodes = group_to_nodes
self.group_to_instances = group_to_instances
+ self.cluster = cluster
+
+ # Used for individual rows
+ self.group_ipolicy = None
def __iter__(self):
"""Iterate over all node groups.
+ This function has side-effects and only one instance of the resulting
+ generator should be used at a time.
+
"""
- return iter(self.groups)
+ for group in self.groups:
+ self.group_ipolicy = self.cluster.SimpleFillIPolicy(group.ipolicy)
+ yield group
_GROUP_SIMPLE_FIELDS = {
fields.extend([
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), GQ_CONFIG, 0,
lambda ctx, group: list(group.GetTags())),
+ (_MakeField("ipolicy", "InstancePolicy", QFT_OTHER,
+ "Instance policy limitations (merged)"),
+ GQ_CONFIG, 0, lambda ctx, _: ctx.group_ipolicy),
+ (_MakeField("custom_ipolicy", "CustomInstancePolicy", QFT_OTHER,
+ "Custom instance policy limitations"),
+ GQ_CONFIG, 0, _GetItemAttr("ipolicy")),
])
fields.extend(_GetItemTimestampFields(GQ_CONFIG))
from ganeti import luxi
from ganeti import rapi
from ganeti import http
-from ganeti import ssconf
-from ganeti import constants
-from ganeti import opcodes
from ganeti import errors
+from ganeti import compat
# Dummy value to detect unchanged parameters
_DEFAULT = object()
+#: Supported HTTP methods
+_SUPPORTED_METHODS = frozenset([
+ http.HTTP_DELETE,
+ http.HTTP_GET,
+ http.HTTP_POST,
+ http.HTTP_PUT,
+ ])
+
+
+def _BuildOpcodeAttributes():
+ """Builds list of attributes used for per-handler opcodes.
+
+ """
+ return [(method, "%s_OPCODE" % method, "%s_RENAME" % method,
+ "Get%sOpInput" % method.capitalize())
+ for method in _SUPPORTED_METHODS]
+
+
+_OPCODE_ATTRS = _BuildOpcodeAttributes()
+
def BuildUriList(ids, uri_format, uri_fields=("name", "uri")):
"""Builds a URI list as used by index resources.
return dict(zip(names, data))
-def _Tags_GET(kind, name):
- """Helper function to retrieve tags.
-
- """
- if kind in (constants.TAG_INSTANCE,
- constants.TAG_NODEGROUP,
- constants.TAG_NODE):
- if not name:
- raise http.HttpBadRequest("Missing name on tag request")
- cl = GetClient()
- if kind == constants.TAG_INSTANCE:
- fn = cl.QueryInstances
- elif kind == constants.TAG_NODEGROUP:
- fn = cl.QueryGroups
- else:
- fn = cl.QueryNodes
- result = fn(names=[name], fields=["tags"], use_locking=False)
- if not result or not result[0]:
- raise http.HttpBadGateway("Invalid response from tag query")
- tags = result[0][0]
- elif kind == constants.TAG_CLUSTER:
- ssc = ssconf.SimpleStore()
- tags = ssc.GetClusterTags()
-
- return list(tags)
-
-
-def _Tags_PUT(kind, tags, name, dry_run):
- """Helper function to set tags.
-
- """
- return SubmitJob([opcodes.OpTagsSet(kind=kind, name=name,
- tags=tags, dry_run=dry_run)])
-
-
-def _Tags_DELETE(kind, tags, name, dry_run):
- """Helper function to delete tags.
-
- """
- return SubmitJob([opcodes.OpTagsDel(kind=kind, name=name,
- tags=tags, dry_run=dry_run)])
-
-
def MapBulkFields(itemslist, fields):
"""Map value to field name in to one dictionary.
return op
-def SubmitJob(op, cl=None):
- """Generic wrapper for submit job, for better http compatibility.
-
- @type op: list
- @param op: the list of opcodes for the job
- @type cl: None or luxi.Client
- @param cl: optional luxi client to use
- @rtype: string
- @return: the job ID
-
- """
- try:
- if cl is None:
- cl = GetClient()
- return cl.SubmitJob(op)
- except errors.JobQueueFull:
- raise http.HttpServiceUnavailable("Job queue is full, needs archiving")
- except errors.JobQueueDrainError:
- raise http.HttpServiceUnavailable("Job queue is drained, cannot submit")
- except luxi.NoMasterError, err:
- raise http.HttpBadGateway("Master seems to be unreachable: %s" % str(err))
- except luxi.PermissionError:
- raise http.HttpInternalServerError("Internal error: no permission to"
- " connect to the master daemon")
- except luxi.TimeoutError, err:
- raise http.HttpGatewayTimeout("Timeout while talking to the master"
- " daemon. Error: %s" % str(err))
-
-
def HandleItemQueryErrors(fn, *args, **kwargs):
"""Converts errors when querying a single item.
raise
-def GetClient():
- """Geric wrapper for luxi.Client(), for better http compatiblity.
-
- """
- try:
- return luxi.Client()
- except luxi.NoMasterError, err:
- raise http.HttpBadGateway("Master seems to unreachable: %s" % str(err))
- except luxi.PermissionError:
- raise http.HttpInternalServerError("Internal error: no permission to"
- " connect to the master daemon")
-
-
def FeedbackFn(msg):
"""Feedback logging function for jobs.
return CheckType(value, exptype, "'%s' parameter" % name)
-class R_Generic(object):
+class ResourceBase(object):
"""Generic class for resources.
"""
POST_ACCESS = [rapi.RAPI_ACCESS_WRITE]
DELETE_ACCESS = [rapi.RAPI_ACCESS_WRITE]
- def __init__(self, items, queryargs, req):
+ def __init__(self, items, queryargs, req, _client_cls=luxi.Client):
"""Generic resource constructor.
@param items: a list with variables encoded in the URL
@param queryargs: a dictionary with additional options from URL
+ @param req: Request context
+ @param _client_cls: L{luxi} client class (unittests only)
"""
self.items = items
self.queryargs = queryargs
self._req = req
+ self._client_cls = _client_cls
def _GetRequestBody(self):
"""Returns the body data.
"""
return bool(self._checkIntVariable("dry-run"))
+
+ def GetClient(self):
+ """Wrapper for L{luxi.Client} with HTTP-specific error handling.
+
+ """
+ # Could be a function, pylint: disable=R0201
+ try:
+ return self._client_cls()
+ except luxi.NoMasterError, err:
+ raise http.HttpBadGateway("Can't connect to master daemon: %s" % err)
+ except luxi.PermissionError:
+ raise http.HttpInternalServerError("Internal error: no permission to"
+ " connect to the master daemon")
+
+ def SubmitJob(self, op, cl=None):
+ """Generic wrapper for submit job, for better http compatibility.
+
+ @type op: list
+ @param op: the list of opcodes for the job
+ @type cl: None or luxi.Client
+ @param cl: optional luxi client to use
+ @rtype: string
+ @return: the job ID
+
+ """
+ if cl is None:
+ cl = self.GetClient()
+ try:
+ return cl.SubmitJob(op)
+ except errors.JobQueueFull:
+ raise http.HttpServiceUnavailable("Job queue is full, needs archiving")
+ except errors.JobQueueDrainError:
+ raise http.HttpServiceUnavailable("Job queue is drained, cannot submit")
+ except luxi.NoMasterError, err:
+ raise http.HttpBadGateway("Master seems to be unreachable: %s" % err)
+ except luxi.PermissionError:
+ raise http.HttpInternalServerError("Internal error: no permission to"
+ " connect to the master daemon")
+ except luxi.TimeoutError, err:
+ raise http.HttpGatewayTimeout("Timeout while talking to the master"
+ " daemon: %s" % err)
+
+
+def GetResourceOpcodes(cls):
+ """Returns all opcodes used by a resource.
+
+ """
+ return frozenset(filter(None, (getattr(cls, op_attr, None)
+ for (_, op_attr, _, _) in _OPCODE_ATTRS)))
+
+
+class _MetaOpcodeResource(type):
+ """Meta class for RAPI resources.
+
+ """
+ def __call__(mcs, *args, **kwargs):
+ """Instantiates class and patches it for use by the RAPI daemon.
+
+ """
+ # Access to private attributes of a client class, pylint: disable=W0212
+ obj = type.__call__(mcs, *args, **kwargs)
+
+ for (method, op_attr, rename_attr, fn_attr) in _OPCODE_ATTRS:
+ if hasattr(obj, method):
+ # If the method handler is already defined, "*_RENAME" or "Get*OpInput"
+ # shouldn't be (they're only used by the automatically generated
+ # handler)
+ assert not hasattr(obj, rename_attr)
+ assert not hasattr(obj, fn_attr)
+ else:
+ # Try to generate handler method on handler instance
+ try:
+ opcode = getattr(obj, op_attr)
+ except AttributeError:
+ pass
+ else:
+ setattr(obj, method,
+ compat.partial(obj._GenericHandler, opcode,
+ getattr(obj, rename_attr, None),
+ getattr(obj, fn_attr, obj._GetDefaultData)))
+
+ return obj
+
+
+class OpcodeResource(ResourceBase):
+ """Base class for opcode-based RAPI resources.
+
+ Instances of this class automatically gain handler functions through
+ L{_MetaOpcodeResource} for any method for which a C{$METHOD$_OPCODE} variable
+ is defined at class level. Subclasses can define a C{Get$Method$OpInput}
+ method to do their own opcode input processing (e.g. for static values). The
+ C{$METHOD$_RENAME} variable defines which values are renamed (see
+ L{FillOpcode}).
+
+ @cvar GET_OPCODE: Set this to a class derived from L{opcodes.OpCode} to
+ automatically generate a GET handler submitting the opcode
+ @cvar GET_RENAME: Set this to rename parameters in the GET handler (see
+ L{FillOpcode})
+ @ivar GetGetOpInput: Define this to override the default method for
+ getting opcode parameters (see L{baserlib.OpcodeResource._GetDefaultData})
+
+ @cvar PUT_OPCODE: Set this to a class derived from L{opcodes.OpCode} to
+ automatically generate a PUT handler submitting the opcode
+ @cvar PUT_RENAME: Set this to rename parameters in the PUT handler (see
+ L{FillOpcode})
+ @ivar GetPutOpInput: Define this to override the default method for
+ getting opcode parameters (see L{baserlib.OpcodeResource._GetDefaultData})
+
+ @cvar POST_OPCODE: Set this to a class derived from L{opcodes.OpCode} to
+ automatically generate a POST handler submitting the opcode
+ @cvar POST_RENAME: Set this to rename parameters in the DELETE handler (see
+ L{FillOpcode})
+ @ivar GetPostOpInput: Define this to override the default method for
+ getting opcode parameters (see L{baserlib.OpcodeResource._GetDefaultData})
+
+ @cvar DELETE_OPCODE: Set this to a class derived from L{opcodes.OpCode} to
+ automatically generate a GET handler submitting the opcode
+ @cvar DELETE_RENAME: Set this to rename parameters in the DELETE handler (see
+ L{FillOpcode})
+ @ivar GetDeleteOpInput: Define this to override the default method for
+ getting opcode parameters (see L{baserlib.OpcodeResource._GetDefaultData})
+
+ """
+ __metaclass__ = _MetaOpcodeResource
+
+ def _GetDefaultData(self):
+ return (self.request_body, None)
+
+ def _GenericHandler(self, opcode, rename, fn):
+ (body, static) = fn()
+ op = FillOpcode(opcode, body, static, rename=rename)
+ return self.SubmitJob([op])
# Internal constants
_REQ_DATA_VERSION_FIELD = "__version__"
-_INST_CREATE_REQV1 = "instance-create-reqv1"
-_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
-_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
-_NODE_EVAC_RES1 = "node-evac-res1"
_INST_NIC_PARAMS = frozenset(["mac", "ip", "mode", "link"])
_INST_CREATE_V0_DISK_PARAMS = frozenset(["size"])
_INST_CREATE_V0_PARAMS = frozenset([
"hypervisor", "file_storage_dir", "file_driver", "dry_run",
])
_INST_CREATE_V0_DPARAMS = frozenset(["beparams", "hvparams"])
+_QPARAM_DRY_RUN = "dry-run"
+_QPARAM_FORCE = "force"
+
+# Feature strings
+INST_CREATE_REQV1 = "instance-create-reqv1"
+INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
+NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
+NODE_EVAC_RES1 = "node-evac-res1"
+
+# Old feature constant names in case they're references by users of this module
+_INST_CREATE_REQV1 = INST_CREATE_REQV1
+_INST_REINSTALL_REQV1 = INST_REINSTALL_REQV1
+_NODE_MIGRATE_REQV1 = NODE_MIGRATE_REQV1
+_NODE_EVAC_RES1 = NODE_EVAC_RES1
# Older pycURL versions don't have all error constants
try:
self.code = code
+def _AppendIf(container, condition, value):
+ """Appends to a list if a condition evaluates to truth.
+
+ """
+ if condition:
+ container.append(value)
+
+ return condition
+
+
+def _AppendDryRunIf(container, condition):
+ """Appends a "dry-run" parameter if a condition evaluates to truth.
+
+ """
+ return _AppendIf(container, condition, (_QPARAM_DRY_RUN, 1))
+
+
+def _AppendForceIf(container, condition):
+ """Appends a "force" parameter if a condition evaluates to truth.
+
+ """
+ return _AppendIf(container, condition, (_QPARAM_FORCE, 1))
+
+
+def _SetItemIf(container, condition, item, value):
+ """Sets an item if a condition evaluates to truth.
+
+ """
+ if condition:
+ container[item] = value
+
+ return condition
+
+
def UsesRapiClient(fn):
"""Decorator for code using RAPI client to initialize pycURL.
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_PUT, "/%s/tags" % GANETI_RAPI_VERSION,
query, None)
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE, "/%s/tags" % GANETI_RAPI_VERSION,
query, None)
"""
query = []
- if bulk:
- query.append(("bulk", 1))
+ _AppendIf(query, bulk, ("bulk", 1))
instances = self._SendRequest(HTTP_GET,
"/%s/instances" % GANETI_RAPI_VERSION,
"""
query = []
- if kwargs.get("dry_run"):
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, kwargs.get("dry_run"))
if _INST_CREATE_REQV1 in self.GetFeatures():
# All required fields for request data version 1
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
("/%s/instances/%s" %
"""
query = []
- if ignore_size:
- query.append(("ignore_size", 1))
+ _AppendIf(query, ignore_size, ("ignore_size", 1))
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/activate-disks" %
("/%s/instances/%s/deactivate-disks" %
(GANETI_RAPI_VERSION, instance)), None, None)
+ def RecreateInstanceDisks(self, instance, disks=None, nodes=None):
+ """Recreate an instance's disks.
+
+ @type instance: string
+ @param instance: Instance name
+ @type disks: list of int
+ @param disks: List of disk indexes
+ @type nodes: list of string
+ @param nodes: New instance nodes, if relocation is desired
+ @rtype: string
+ @return: job id
+
+ """
+ body = {}
+ _SetItemIf(body, disks is not None, "disks", disks)
+ _SetItemIf(body, nodes is not None, "nodes", nodes)
+
+ return self._SendRequest(HTTP_POST,
+ ("/%s/instances/%s/recreate-disks" %
+ (GANETI_RAPI_VERSION, instance)), None, body)
+
def GrowInstanceDisk(self, instance, disk, amount, wait_for_sync=None):
"""Grows a disk of an instance.
"amount": amount,
}
- if wait_for_sync is not None:
- body["wait_for_sync"] = wait_for_sync
+ _SetItemIf(body, wait_for_sync is not None, "wait_for_sync", wait_for_sync)
return self._SendRequest(HTTP_POST,
("/%s/instances/%s/disk/%s/grow" %
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/tags" %
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
("/%s/instances/%s/tags" %
"""
query = []
- if reboot_type:
- query.append(("type", reboot_type))
- if ignore_secondaries is not None:
- query.append(("ignore_secondaries", ignore_secondaries))
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
+ _AppendIf(query, reboot_type, ("type", reboot_type))
+ _AppendIf(query, ignore_secondaries is not None,
+ ("ignore_secondaries", ignore_secondaries))
return self._SendRequest(HTTP_POST,
("/%s/instances/%s/reboot" %
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
- if no_remember:
- query.append(("no-remember", 1))
+ _AppendDryRunIf(query, dry_run)
+ _AppendIf(query, no_remember, ("no-remember", 1))
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/shutdown" %
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
- if no_remember:
- query.append(("no-remember", 1))
+ _AppendDryRunIf(query, dry_run)
+ _AppendIf(query, no_remember, ("no-remember", 1))
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/startup" %
body = {
"start": not no_startup,
}
- if os is not None:
- body["os"] = os
- if osparams is not None:
- body["osparams"] = osparams
+ _SetItemIf(body, os is not None, "os", os)
+ _SetItemIf(body, osparams is not None, "osparams", osparams)
return self._SendRequest(HTTP_POST,
("/%s/instances/%s/reinstall" %
(GANETI_RAPI_VERSION, instance)), None, body)
" for instance reinstallation")
query = []
- if os:
- query.append(("os", os))
- if no_startup:
- query.append(("nostartup", 1))
+ _AppendIf(query, os, ("os", os))
+ _AppendIf(query, no_startup, ("nostartup", 1))
+
return self._SendRequest(HTTP_POST,
("/%s/instances/%s/reinstall" %
(GANETI_RAPI_VERSION, instance)), query, None)
# TODO: Convert to body parameters
if disks is not None:
- query.append(("disks", ",".join(str(idx) for idx in disks)))
-
- if remote_node is not None:
- query.append(("remote_node", remote_node))
+ _AppendIf(query, True,
+ ("disks", ",".join(str(idx) for idx in disks)))
- if iallocator is not None:
- query.append(("iallocator", iallocator))
+ _AppendIf(query, remote_node is not None, ("remote_node", remote_node))
+ _AppendIf(query, iallocator is not None, ("iallocator", iallocator))
return self._SendRequest(HTTP_POST,
("/%s/instances/%s/replace-disks" %
"mode": mode,
}
- if shutdown is not None:
- body["shutdown"] = shutdown
-
- if remove_instance is not None:
- body["remove_instance"] = remove_instance
-
- if x509_key_name is not None:
- body["x509_key_name"] = x509_key_name
-
- if destination_x509_ca is not None:
- body["destination_x509_ca"] = destination_x509_ca
+ _SetItemIf(body, shutdown is not None, "shutdown", shutdown)
+ _SetItemIf(body, remove_instance is not None,
+ "remove_instance", remove_instance)
+ _SetItemIf(body, x509_key_name is not None, "x509_key_name", x509_key_name)
+ _SetItemIf(body, destination_x509_ca is not None,
+ "destination_x509_ca", destination_x509_ca)
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/export" %
"""
body = {}
-
- if mode is not None:
- body["mode"] = mode
-
- if cleanup is not None:
- body["cleanup"] = cleanup
+ _SetItemIf(body, mode is not None, "mode", mode)
+ _SetItemIf(body, cleanup is not None, "cleanup", cleanup)
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/migrate" %
"""
body = {}
-
- if iallocator is not None:
- body["iallocator"] = iallocator
-
- if ignore_consistency is not None:
- body["ignore_consistency"] = ignore_consistency
-
- if target_node is not None:
- body["target_node"] = target_node
+ _SetItemIf(body, iallocator is not None, "iallocator", iallocator)
+ _SetItemIf(body, ignore_consistency is not None,
+ "ignore_consistency", ignore_consistency)
+ _SetItemIf(body, target_node is not None, "target_node", target_node)
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/failover" %
"new_name": new_name,
}
- if ip_check is not None:
- body["ip_check"] = ip_check
-
- if name_check is not None:
- body["name_check"] = name_check
+ _SetItemIf(body, ip_check is not None, "ip_check", ip_check)
+ _SetItemIf(body, name_check is not None, "name_check", name_check)
return self._SendRequest(HTTP_PUT,
("/%s/instances/%s/rename" %
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
"/%s/jobs/%s" % (GANETI_RAPI_VERSION, job_id),
"""
query = []
- if bulk:
- query.append(("bulk", 1))
+ _AppendIf(query, bulk, ("bulk", 1))
nodes = self._SendRequest(HTTP_GET, "/%s/nodes" % GANETI_RAPI_VERSION,
query, None)
raise GanetiApiError("Only one of iallocator or remote_node can be used")
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
if _NODE_EVAC_RES1 in self.GetFeatures():
# Server supports body parameters
body = {}
- if iallocator is not None:
- body["iallocator"] = iallocator
- if remote_node is not None:
- body["remote_node"] = remote_node
- if early_release is not None:
- body["early_release"] = early_release
- if mode is not None:
- body["mode"] = mode
+ _SetItemIf(body, iallocator is not None, "iallocator", iallocator)
+ _SetItemIf(body, remote_node is not None, "remote_node", remote_node)
+ _SetItemIf(body, early_release is not None,
+ "early_release", early_release)
+ _SetItemIf(body, mode is not None, "mode", mode)
else:
# Pre-2.5 request format
body = None
if mode is not None and mode != NODE_EVAC_SEC:
raise GanetiApiError("Server can only evacuate secondary instances")
- if iallocator:
- query.append(("iallocator", iallocator))
- if remote_node:
- query.append(("remote_node", remote_node))
- if early_release:
- query.append(("early_release", 1))
+ _AppendIf(query, iallocator, ("iallocator", iallocator))
+ _AppendIf(query, remote_node, ("remote_node", remote_node))
+ _AppendIf(query, early_release, ("early_release", 1))
return self._SendRequest(HTTP_POST,
("/%s/nodes/%s/evacuate" %
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
if _NODE_MIGRATE_REQV1 in self.GetFeatures():
body = {}
- if mode is not None:
- body["mode"] = mode
- if iallocator is not None:
- body["iallocator"] = iallocator
- if target_node is not None:
- body["target_node"] = target_node
+ _SetItemIf(body, mode is not None, "mode", mode)
+ _SetItemIf(body, iallocator is not None, "iallocator", iallocator)
+ _SetItemIf(body, target_node is not None, "target_node", target_node)
assert len(query) <= 1
raise GanetiApiError("Server does not support specifying target node"
" for node migration")
- if mode is not None:
- query.append(("mode", mode))
+ _AppendIf(query, mode is not None, ("mode", mode))
return self._SendRequest(HTTP_POST,
("/%s/nodes/%s/migrate" %
@return: job id
"""
- query = [
- ("force", force),
- ]
-
- if auto_promote is not None:
- query.append(("auto-promote", auto_promote))
+ query = []
+ _AppendForceIf(query, force)
+ _AppendIf(query, auto_promote is not None, ("auto-promote", auto_promote))
return self._SendRequest(HTTP_PUT,
("/%s/nodes/%s/role" %
(GANETI_RAPI_VERSION, node)), query, role)
+ def PowercycleNode(self, node, force=False):
+ """Powercycles a node.
+
+ @type node: string
+ @param node: Node name
+ @type force: bool
+ @param force: Whether to force the operation
+ @rtype: string
+ @return: job id
+
+ """
+ query = []
+ _AppendForceIf(query, force)
+
+ return self._SendRequest(HTTP_POST,
+ ("/%s/nodes/%s/powercycle" %
+ (GANETI_RAPI_VERSION, node)), query, None)
+
def ModifyNode(self, node, **kwargs):
"""Modifies a node.
("name", name),
]
- if allocatable is not None:
- query.append(("allocatable", allocatable))
+ _AppendIf(query, allocatable is not None, ("allocatable", allocatable))
return self._SendRequest(HTTP_PUT,
("/%s/nodes/%s/storage/modify" %
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_PUT,
("/%s/nodes/%s/tags" %
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
("/%s/nodes/%s/tags" %
"""
query = []
- if bulk:
- query.append(("bulk", 1))
+ _AppendIf(query, bulk, ("bulk", 1))
groups = self._SendRequest(HTTP_GET, "/%s/groups" % GANETI_RAPI_VERSION,
query, None)
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
body = {
"name": name,
"""
query = []
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
("/%s/groups/%s" %
"""
query = []
-
- if force:
- query.append(("force", 1))
-
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendForceIf(query, force)
+ _AppendDryRunIf(query, dry_run)
body = {
"nodes": nodes,
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_PUT,
("/%s/groups/%s/tags" %
"""
query = [("tag", t) for t in tags]
- if dry_run:
- query.append(("dry-run", 1))
+ _AppendDryRunIf(query, dry_run)
return self._SendRequest(HTTP_DELETE,
("/%s/groups/%s/tags" %
(GANETI_RAPI_VERSION, group)), query, None)
- def Query(self, what, fields, filter_=None):
+ def Query(self, what, fields, qfilter=None):
"""Retrieves information about resources.
@type what: string
@param what: Resource name, one of L{constants.QR_VIA_RAPI}
@type fields: list of string
@param fields: Requested fields
- @type filter_: None or list
- @param filter_: Query filter
+ @type qfilter: None or list
+ @param qfilter: Query filter
@rtype: string
@return: job id
"fields": fields,
}
- if filter_ is not None:
- body["filter"] = filter_
+ _SetItemIf(body, qfilter is not None, "qfilter", qfilter)
+ # TODO: remove "filter" after 2.7
+ _SetItemIf(body, qfilter is not None, "filter", qfilter)
return self._SendRequest(HTTP_PUT,
("/%s/query/%s" %
query = []
if fields is not None:
- query.append(("fields", ",".join(fields)))
+ _AppendIf(query, True, ("fields", ",".join(fields)))
return self._SendRequest(HTTP_GET,
("/%s/query/%s/fields" %
from ganeti import http
from ganeti import utils
-from ganeti.rapi import baserlib
from ganeti.rapi import rlib2
return (handler, groups, args)
-class R_root(baserlib.R_Generic):
- """/ resource.
-
- """
- _ROOT_PATTERN = re.compile("^R_([a-zA-Z0-9]+)$")
-
- @classmethod
- def GET(cls):
- """Show the list of mapped resources.
-
- @return: a dictionary with 'name' and 'uri' keys for each of them.
-
- """
- rootlist = []
- for handler in CONNECTOR.values():
- m = cls._ROOT_PATTERN.match(handler.__name__)
- if m:
- name = m.group(1)
- if name != "root":
- rootlist.append(name)
-
- return baserlib.BuildUriList(rootlist, "/%s")
-
-
-def _getResources(id_):
- """Return a list of resources underneath given id.
-
- This is to generalize querying of version resources lists.
-
- @return: a list of resources names.
-
- """
- r_pattern = re.compile("^R_%s_([a-zA-Z0-9]+)$" % id_)
-
- rlist = []
- for handler in CONNECTOR.values():
- m = r_pattern.match(handler.__name__)
- if m:
- name = m.group(1)
- rlist.append(name)
-
- return rlist
-
-
-class R_2(baserlib.R_Generic):
- """/2 resource.
-
- This is the root of the version 2 API.
-
- """
- @staticmethod
- def GET():
- """Show the list of mapped resources.
-
- @return: a dictionary with 'name' and 'uri' keys for each of them.
-
- """
- return baserlib.BuildUriList(_getResources("2"), "/2/%s")
-
-
def GetHandlers(node_name_pattern, instance_name_pattern,
group_name_pattern, job_id_pattern, disk_pattern,
query_res_pattern):
# is more flexible and future-compatible than versioning the whole remote
# API.
return {
- "/": R_root,
+ "/": rlib2.R_root,
+ "/2": rlib2.R_2,
"/version": rlib2.R_version,
- "/2": R_2,
-
"/2/nodes": rlib2.R_2_nodes,
re.compile(r"^/2/nodes/(%s)$" % node_name_pattern):
rlib2.R_2_nodes_name,
+ re.compile(r"^/2/nodes/(%s)/powercycle$" % node_name_pattern):
+ rlib2.R_2_nodes_name_powercycle,
re.compile(r"^/2/nodes/(%s)/tags$" % node_name_pattern):
rlib2.R_2_nodes_name_tags,
re.compile(r"^/2/nodes/(%s)/role$" % node_name_pattern):
rlib2.R_2_instances_name_activate_disks,
re.compile(r"^/2/instances/(%s)/deactivate-disks$" % instance_name_pattern):
rlib2.R_2_instances_name_deactivate_disks,
+ re.compile(r"^/2/instances/(%s)/recreate-disks$" % instance_name_pattern):
+ rlib2.R_2_instances_name_recreate_disks,
re.compile(r"^/2/instances/(%s)/prepare-export$" % instance_name_pattern):
rlib2.R_2_instances_name_prepare_export,
re.compile(r"^/2/instances/(%s)/export$" % instance_name_pattern):
from ganeti import rapi
from ganeti import ht
from ganeti import compat
+from ganeti import ssconf
from ganeti.rapi import baserlib
"name",
"node_cnt",
"node_list",
+ "ipolicy",
] + _COMMON_FIELDS
J_FIELDS_BULK = [
]
_NR_DRAINED = "drained"
-_NR_MASTER_CANDIATE = "master-candidate"
+_NR_MASTER_CANDIDATE = "master-candidate"
_NR_MASTER = "master"
_NR_OFFLINE = "offline"
_NR_REGULAR = "regular"
_NR_MAP = {
constants.NR_MASTER: _NR_MASTER,
- constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
+ constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
constants.NR_DRAINED: _NR_DRAINED,
constants.NR_OFFLINE: _NR_OFFLINE,
constants.NR_REGULAR: _NR_REGULAR,
_WFJC_TIMEOUT = 10
-class R_version(baserlib.R_Generic):
+class R_root(baserlib.ResourceBase):
+ """/ resource.
+
+ """
+ @staticmethod
+ def GET():
+ """Supported for legacy reasons.
+
+ """
+ return None
+
+
+class R_2(R_root):
+ """/2 resource.
+
+ """
+
+
+class R_version(baserlib.ResourceBase):
"""/version resource.
This resource should be used to determine the remote API version and
return constants.RAPI_VERSION
-class R_2_info(baserlib.R_Generic):
+class R_2_info(baserlib.OpcodeResource):
"""/2/info resource.
"""
- @staticmethod
- def GET():
+ GET_OPCODE = opcodes.OpClusterQuery
+
+ def GET(self):
"""Returns cluster information.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
return client.QueryClusterInfo()
-class R_2_features(baserlib.R_Generic):
+class R_2_features(baserlib.ResourceBase):
"""/2/features resource.
"""
return list(ALL_FEATURES)
-class R_2_os(baserlib.R_Generic):
+class R_2_os(baserlib.OpcodeResource):
"""/2/os resource.
"""
- @staticmethod
- def GET():
+ GET_OPCODE = opcodes.OpOsDiagnose
+
+ def GET(self):
"""Return a list of all OSes.
Can return error 500 in case of a problem.
Example: ["debian-etch"]
"""
- cl = baserlib.GetClient()
+ cl = self.GetClient()
op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
- job_id = baserlib.SubmitJob([op], cl)
+ job_id = self.SubmitJob([op], cl=cl)
# we use custom feedback function, instead of print we log the status
result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
diagnose_data = result[0]
return os_names
-class R_2_redist_config(baserlib.R_Generic):
+class R_2_redist_config(baserlib.OpcodeResource):
"""/2/redistribute-config resource.
"""
- @staticmethod
- def PUT():
- """Redistribute configuration to all nodes.
-
- """
- return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
+ PUT_OPCODE = opcodes.OpClusterRedistConf
-class R_2_cluster_modify(baserlib.R_Generic):
+class R_2_cluster_modify(baserlib.OpcodeResource):
"""/2/modify resource.
"""
- def PUT(self):
- """Modifies cluster parameters.
-
- @return: a job id
-
- """
- op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
- None)
-
- return baserlib.SubmitJob([op])
+ PUT_OPCODE = opcodes.OpClusterSetParams
-class R_2_jobs(baserlib.R_Generic):
+class R_2_jobs(baserlib.ResourceBase):
"""/2/jobs resource.
"""
@return: a dictionary with jobs id and uri.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
if self.useBulk():
bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
uri_fields=("id", "uri"))
-class R_2_jobs_id(baserlib.R_Generic):
+class R_2_jobs_id(baserlib.ResourceBase):
"""/2/jobs/[job_id] resource.
"""
"""
job_id = self.items[0]
- result = baserlib.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
+ result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
if result is None:
raise http.HttpNotFound()
return baserlib.MapFields(J_FIELDS, result)
"""
job_id = self.items[0]
- result = baserlib.GetClient().CancelJob(job_id)
+ result = self.GetClient().CancelJob(job_id)
return result
-class R_2_jobs_id_wait(baserlib.R_Generic):
+class R_2_jobs_id_wait(baserlib.ResourceBase):
"""/2/jobs/[job_id]/wait resource.
"""
raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
" be a number")
- client = baserlib.GetClient()
+ client = self.GetClient()
result = client.WaitForJobChangeOnce(job_id, fields,
prev_job_info, prev_log_serial,
timeout=_WFJC_TIMEOUT)
}
-class R_2_nodes(baserlib.R_Generic):
+class R_2_nodes(baserlib.OpcodeResource):
"""/2/nodes resource.
"""
+ GET_OPCODE = opcodes.OpNodeQuery
+
def GET(self):
"""Returns a list of all nodes.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
if self.useBulk():
bulkdata = client.QueryNodes([], N_FIELDS, False)
uri_fields=("id", "uri"))
-class R_2_nodes_name(baserlib.R_Generic):
+class R_2_nodes_name(baserlib.OpcodeResource):
"""/2/nodes/[node_name] resource.
"""
+ GET_OPCODE = opcodes.OpNodeQuery
+
def GET(self):
"""Send information about a node.
"""
node_name = self.items[0]
- client = baserlib.GetClient()
+ client = self.GetClient()
result = baserlib.HandleItemQueryErrors(client.QueryNodes,
names=[node_name], fields=N_FIELDS,
return baserlib.MapFields(N_FIELDS, result[0])
-class R_2_nodes_name_role(baserlib.R_Generic):
- """ /2/nodes/[node_name]/role resource.
+class R_2_nodes_name_powercycle(baserlib.OpcodeResource):
+ """/2/nodes/[node_name]/powercycle resource.
+
+ """
+ POST_OPCODE = opcodes.OpNodePowercycle
+
+ def GetPostOpInput(self):
+ """Tries to powercycle a node.
+
+ """
+ return (self.request_body, {
+ "node_name": self.items[0],
+ "force": self.useForce(),
+ })
+
+
+class R_2_nodes_name_role(baserlib.OpcodeResource):
+ """/2/nodes/[node_name]/role resource.
"""
+ PUT_OPCODE = opcodes.OpNodeSetParams
+
def GET(self):
"""Returns the current node role.
"""
node_name = self.items[0]
- client = baserlib.GetClient()
+ client = self.GetClient()
result = client.QueryNodes(names=[node_name], fields=["role"],
use_locking=self.useLocking())
return _NR_MAP[result[0][0]]
- def PUT(self):
+ def GetPutOpInput(self):
"""Sets the node role.
- @return: a job id
-
"""
- if not isinstance(self.request_body, basestring):
- raise http.HttpBadRequest("Invalid body contents, not a string")
+ baserlib.CheckType(self.request_body, basestring, "Body contents")
- node_name = self.items[0]
role = self.request_body
- auto_promote = bool(self._checkIntVariable("auto-promote"))
-
if role == _NR_REGULAR:
candidate = False
offline = False
drained = False
- elif role == _NR_MASTER_CANDIATE:
+ elif role == _NR_MASTER_CANDIDATE:
candidate = True
offline = drained = None
else:
raise http.HttpBadRequest("Can't set '%s' role" % role)
- op = opcodes.OpNodeSetParams(node_name=node_name,
- master_candidate=candidate,
- offline=offline,
- drained=drained,
- auto_promote=auto_promote,
- force=bool(self.useForce()))
+ assert len(self.items) == 1
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "node_name": self.items[0],
+ "master_candidate": candidate,
+ "offline": offline,
+ "drained": drained,
+ "force": self.useForce(),
+ "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)),
+ })
-class R_2_nodes_name_evacuate(baserlib.R_Generic):
+class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/evacuate resource.
"""
- def POST(self):
+ POST_OPCODE = opcodes.OpNodeEvacuate
+
+ def GetPostOpInput(self):
"""Evacuate all instances off a node.
"""
- op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
+ return (self.request_body, {
"node_name": self.items[0],
"dry_run": self.dryRun(),
})
- return baserlib.SubmitJob([op])
-
-class R_2_nodes_name_migrate(baserlib.R_Generic):
+class R_2_nodes_name_migrate(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/migrate resource.
"""
- def POST(self):
+ POST_OPCODE = opcodes.OpNodeMigrate
+
+ def GetPostOpInput(self):
"""Migrate all primary instances from a node.
"""
- node_name = self.items[0]
-
if self.queryargs:
# Support old-style requests
if "live" in self.queryargs and "mode" in self.queryargs:
else:
data = self.request_body
- op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
- "node_name": node_name,
+ return (data, {
+ "node_name": self.items[0],
})
- return baserlib.SubmitJob([op])
-
-class R_2_nodes_name_modify(baserlib.R_Generic):
+class R_2_nodes_name_modify(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/modify resource.
"""
- def POST(self):
- """Changes parameters of a node.
+ POST_OPCODE = opcodes.OpNodeSetParams
- @return: a job id
+ def GetPostOpInput(self):
+ """Changes parameters of a node.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
+ assert len(self.items) == 1
- op = baserlib.FillOpcode(opcodes.OpNodeSetParams, self.request_body, {
+ return (self.request_body, {
"node_name": self.items[0],
})
- return baserlib.SubmitJob([op])
-
-class R_2_nodes_name_storage(baserlib.R_Generic):
+class R_2_nodes_name_storage(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/storage resource.
"""
# LUNodeQueryStorage acquires locks, hence restricting access to GET
GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+ GET_OPCODE = opcodes.OpNodeQueryStorage
- def GET(self):
- node_name = self.items[0]
+ def GetGetOpInput(self):
+ """List storage available on a node.
+ """
storage_type = self._checkStringVariable("storage_type", None)
- if not storage_type:
- raise http.HttpBadRequest("Missing the required 'storage_type'"
- " parameter")
-
output_fields = self._checkStringVariable("output_fields", None)
+
if not output_fields:
raise http.HttpBadRequest("Missing the required 'output_fields'"
" parameter")
- op = opcodes.OpNodeQueryStorage(nodes=[node_name],
- storage_type=storage_type,
- output_fields=output_fields.split(","))
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "nodes": [self.items[0]],
+ "storage_type": storage_type,
+ "output_fields": output_fields.split(","),
+ })
-class R_2_nodes_name_storage_modify(baserlib.R_Generic):
+class R_2_nodes_name_storage_modify(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/storage/modify resource.
"""
- def PUT(self):
- node_name = self.items[0]
+ PUT_OPCODE = opcodes.OpNodeModifyStorage
- storage_type = self._checkStringVariable("storage_type", None)
- if not storage_type:
- raise http.HttpBadRequest("Missing the required 'storage_type'"
- " parameter")
+ def GetPutOpInput(self):
+ """Modifies a storage volume on a node.
+ """
+ storage_type = self._checkStringVariable("storage_type", None)
name = self._checkStringVariable("name", None)
+
if not name:
raise http.HttpBadRequest("Missing the required 'name'"
" parameter")
changes[constants.SF_ALLOCATABLE] = \
bool(self._checkIntVariable("allocatable", default=1))
- op = opcodes.OpNodeModifyStorage(node_name=node_name,
- storage_type=storage_type,
- name=name,
- changes=changes)
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "node_name": self.items[0],
+ "storage_type": storage_type,
+ "name": name,
+ "changes": changes,
+ })
-class R_2_nodes_name_storage_repair(baserlib.R_Generic):
+class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
"""/2/nodes/[node_name]/storage/repair resource.
"""
- def PUT(self):
- node_name = self.items[0]
+ PUT_OPCODE = opcodes.OpRepairNodeStorage
- storage_type = self._checkStringVariable("storage_type", None)
- if not storage_type:
- raise http.HttpBadRequest("Missing the required 'storage_type'"
- " parameter")
+ def GetPutOpInput(self):
+ """Repairs a storage volume on a node.
+ """
+ storage_type = self._checkStringVariable("storage_type", None)
name = self._checkStringVariable("name", None)
if not name:
raise http.HttpBadRequest("Missing the required 'name'"
" parameter")
- op = opcodes.OpRepairNodeStorage(node_name=node_name,
- storage_type=storage_type,
- name=name)
- return baserlib.SubmitJob([op])
-
+ return ({}, {
+ "node_name": self.items[0],
+ "storage_type": storage_type,
+ "name": name,
+ })
-def _ParseCreateGroupRequest(data, dry_run):
- """Parses a request for creating a node group.
- @rtype: L{opcodes.OpGroupAdd}
- @return: Group creation opcode
+class R_2_groups(baserlib.OpcodeResource):
+ """/2/groups resource.
"""
- override = {
- "dry_run": dry_run,
- }
-
- rename = {
+ GET_OPCODE = opcodes.OpGroupQuery
+ POST_OPCODE = opcodes.OpGroupAdd
+ POST_RENAME = {
"name": "group_name",
}
- return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
- rename=rename)
-
+ def GetPostOpInput(self):
+ """Create a node group.
-class R_2_groups(baserlib.R_Generic):
- """/2/groups resource.
+ """
+ assert not self.items
+ return (self.request_body, {
+ "dry_run": self.dryRun(),
+ })
- """
def GET(self):
"""Returns a list of all node groups.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
if self.useBulk():
bulkdata = client.QueryGroups([], G_FIELDS, False)
return baserlib.BuildUriList(groupnames, "/2/groups/%s",
uri_fields=("name", "uri"))
- def POST(self):
- """Create a node group.
- @return: a job id
-
- """
- baserlib.CheckType(self.request_body, dict, "Body contents")
- op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
- return baserlib.SubmitJob([op])
-
-
-class R_2_groups_name(baserlib.R_Generic):
+class R_2_groups_name(baserlib.OpcodeResource):
"""/2/groups/[group_name] resource.
"""
+ DELETE_OPCODE = opcodes.OpGroupRemove
+
def GET(self):
"""Send information about a node group.
"""
group_name = self.items[0]
- client = baserlib.GetClient()
+ client = self.GetClient()
result = baserlib.HandleItemQueryErrors(client.QueryGroups,
names=[group_name], fields=G_FIELDS,
return baserlib.MapFields(G_FIELDS, result[0])
- def DELETE(self):
+ def GetDeleteOpInput(self):
"""Delete a node group.
"""
- op = opcodes.OpGroupRemove(group_name=self.items[0],
- dry_run=bool(self.dryRun()))
-
- return baserlib.SubmitJob([op])
-
-
-def _ParseModifyGroupRequest(name, data):
- """Parses a request for modifying a node group.
-
- @rtype: L{opcodes.OpGroupSetParams}
- @return: Group modify opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
- "group_name": name,
- })
+ assert len(self.items) == 1
+ return ({}, {
+ "group_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
-class R_2_groups_name_modify(baserlib.R_Generic):
+class R_2_groups_name_modify(baserlib.OpcodeResource):
"""/2/groups/[group_name]/modify resource.
"""
- def PUT(self):
- """Changes some parameters of node group.
+ PUT_OPCODE = opcodes.OpGroupSetParams
- @return: a job id
+ def GetPutOpInput(self):
+ """Changes some parameters of node group.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- op = _ParseModifyGroupRequest(self.items[0], self.request_body)
-
- return baserlib.SubmitJob([op])
-
-
-def _ParseRenameGroupRequest(name, data, dry_run):
- """Parses a request for renaming a node group.
-
- @type name: string
- @param name: name of the node group to rename
- @type data: dict
- @param data: the body received by the rename request
- @type dry_run: bool
- @param dry_run: whether to perform a dry run
-
- @rtype: L{opcodes.OpGroupRename}
- @return: Node group rename opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
- "group_name": name,
- "dry_run": dry_run,
- })
+ assert self.items
+ return (self.request_body, {
+ "group_name": self.items[0],
+ })
-class R_2_groups_name_rename(baserlib.R_Generic):
+class R_2_groups_name_rename(baserlib.OpcodeResource):
"""/2/groups/[group_name]/rename resource.
"""
- def PUT(self):
- """Changes the name of a node group.
+ PUT_OPCODE = opcodes.OpGroupRename
- @return: a job id
+ def GetPutOpInput(self):
+ """Changes the name of a node group.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
- op = _ParseRenameGroupRequest(self.items[0], self.request_body,
- self.dryRun())
- return baserlib.SubmitJob([op])
+ assert len(self.items) == 1
+ return (self.request_body, {
+ "group_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
-class R_2_groups_name_assign_nodes(baserlib.R_Generic):
+class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
"""/2/groups/[group_name]/assign-nodes resource.
"""
- def PUT(self):
- """Assigns nodes to a group.
+ PUT_OPCODE = opcodes.OpGroupAssignNodes
- @return: a job id
+ def GetPutOpInput(self):
+ """Assigns nodes to a group.
"""
- op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
+ assert len(self.items) == 1
+ return (self.request_body, {
"group_name": self.items[0],
"dry_run": self.dryRun(),
"force": self.useForce(),
})
- return baserlib.SubmitJob([op])
-
-def _ParseInstanceCreateRequestVersion1(data, dry_run):
- """Parses an instance creation request version 1.
-
- @rtype: L{opcodes.OpInstanceCreate}
- @return: Instance creation opcode
+class R_2_instances(baserlib.OpcodeResource):
+ """/2/instances resource.
"""
- override = {
- "dry_run": dry_run,
- }
-
- rename = {
+ GET_OPCODE = opcodes.OpInstanceQuery
+ POST_OPCODE = opcodes.OpInstanceCreate
+ POST_RENAME = {
"os": "os_type",
"name": "instance_name",
}
- return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
- rename=rename)
-
-
-class R_2_instances(baserlib.R_Generic):
- """/2/instances resource.
-
- """
def GET(self):
"""Returns a list of all available instances.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
use_locking = self.useLocking()
if self.useBulk():
return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
uri_fields=("id", "uri"))
- def POST(self):
+ def GetPostOpInput(self):
"""Create an instance.
@return: a job id
"""
- if not isinstance(self.request_body, dict):
- raise http.HttpBadRequest("Invalid body contents, not a dictionary")
+ baserlib.CheckType(self.request_body, dict, "Body contents")
# Default to request data version 0
data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
if data_version == 0:
raise http.HttpBadRequest("Instance creation request version 0 is no"
" longer supported")
- elif data_version == 1:
- data = self.request_body.copy()
- # Remove "__version__"
- data.pop(_REQ_DATA_VERSION, None)
- op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
- else:
+ elif data_version != 1:
raise http.HttpBadRequest("Unsupported request data version %s" %
data_version)
- return baserlib.SubmitJob([op])
+ data = self.request_body.copy()
+ # Remove "__version__"
+ data.pop(_REQ_DATA_VERSION, None)
+
+ return (data, {
+ "dry_run": self.dryRun(),
+ })
-class R_2_instances_name(baserlib.R_Generic):
+class R_2_instances_name(baserlib.OpcodeResource):
"""/2/instances/[instance_name] resource.
"""
+ GET_OPCODE = opcodes.OpInstanceQuery
+ DELETE_OPCODE = opcodes.OpInstanceRemove
+
def GET(self):
"""Send information about an instance.
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
instance_name = self.items[0]
result = baserlib.HandleItemQueryErrors(client.QueryInstances,
return baserlib.MapFields(I_FIELDS, result[0])
- def DELETE(self):
+ def GetDeleteOpInput(self):
"""Delete an instance.
"""
- op = opcodes.OpInstanceRemove(instance_name=self.items[0],
- ignore_failures=False,
- dry_run=bool(self.dryRun()))
- return baserlib.SubmitJob([op])
+ assert len(self.items) == 1
+ return ({}, {
+ "instance_name": self.items[0],
+ "ignore_failures": False,
+ "dry_run": self.dryRun(),
+ })
-class R_2_instances_name_info(baserlib.R_Generic):
+class R_2_instances_name_info(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/info resource.
"""
- def GET(self):
+ GET_OPCODE = opcodes.OpInstanceQueryData
+
+ def GetGetOpInput(self):
"""Request detailed instance information.
"""
- instance_name = self.items[0]
- static = bool(self._checkIntVariable("static", default=0))
-
- op = opcodes.OpInstanceQueryData(instances=[instance_name],
- static=static)
- return baserlib.SubmitJob([op])
+ assert len(self.items) == 1
+ return ({}, {
+ "instances": [self.items[0]],
+ "static": bool(self._checkIntVariable("static", default=0)),
+ })
-class R_2_instances_name_reboot(baserlib.R_Generic):
+class R_2_instances_name_reboot(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/reboot resource.
Implements an instance reboot.
"""
- def POST(self):
+ POST_OPCODE = opcodes.OpInstanceReboot
+
+ def GetPostOpInput(self):
"""Reboot an instance.
The URI takes type=[hard|soft|full] and
ignore_secondaries=[False|True] parameters.
"""
- instance_name = self.items[0]
- reboot_type = self.queryargs.get("type",
- [constants.INSTANCE_REBOOT_HARD])[0]
- ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
- op = opcodes.OpInstanceReboot(instance_name=instance_name,
- reboot_type=reboot_type,
- ignore_secondaries=ignore_secondaries,
- dry_run=bool(self.dryRun()))
-
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "instance_name": self.items[0],
+ "reboot_type":
+ self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
+ "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
+ "dry_run": self.dryRun(),
+ })
-class R_2_instances_name_startup(baserlib.R_Generic):
+class R_2_instances_name_startup(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/startup resource.
Implements an instance startup.
"""
- def PUT(self):
+ PUT_OPCODE = opcodes.OpInstanceStartup
+
+ def GetPutOpInput(self):
"""Startup an instance.
The URI takes force=[False|True] parameter to start the instance
if even if secondary disks are failing.
"""
- instance_name = self.items[0]
- force_startup = bool(self._checkIntVariable("force"))
- no_remember = bool(self._checkIntVariable("no_remember"))
- op = opcodes.OpInstanceStartup(instance_name=instance_name,
- force=force_startup,
- dry_run=bool(self.dryRun()),
- no_remember=no_remember)
-
- return baserlib.SubmitJob([op])
-
-
-def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
- """Parses a request for an instance shutdown.
-
- @rtype: L{opcodes.OpInstanceShutdown}
- @return: Instance shutdown opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
- "instance_name": name,
- "dry_run": dry_run,
- "no_remember": no_remember,
- })
+ return ({}, {
+ "instance_name": self.items[0],
+ "force": self.useForce(),
+ "dry_run": self.dryRun(),
+ "no_remember": bool(self._checkIntVariable("no_remember")),
+ })
-class R_2_instances_name_shutdown(baserlib.R_Generic):
+class R_2_instances_name_shutdown(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/shutdown resource.
Implements an instance shutdown.
"""
- def PUT(self):
- """Shutdown an instance.
+ PUT_OPCODE = opcodes.OpInstanceShutdown
- @return: a job id
+ def GetPutOpInput(self):
+ """Shutdown an instance.
"""
- no_remember = bool(self._checkIntVariable("no_remember"))
- op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
- bool(self.dryRun()), no_remember)
-
- return baserlib.SubmitJob([op])
+ return (self.request_body, {
+ "instance_name": self.items[0],
+ "no_remember": bool(self._checkIntVariable("no_remember")),
+ "dry_run": self.dryRun(),
+ })
def _ParseInstanceReinstallRequest(name, data):
return ops
-class R_2_instances_name_reinstall(baserlib.R_Generic):
+class R_2_instances_name_reinstall(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/reinstall resource.
Implements an instance reinstall.
"""
+ POST_OPCODE = opcodes.OpInstanceReinstall
+
def POST(self):
"""Reinstall an instance.
ops = _ParseInstanceReinstallRequest(self.items[0], body)
- return baserlib.SubmitJob(ops)
-
-
-def _ParseInstanceReplaceDisksRequest(name, data):
- """Parses a request for an instance export.
-
- @rtype: L{opcodes.OpInstanceReplaceDisks}
- @return: Instance export opcode
-
- """
- override = {
- "instance_name": name,
- }
-
- # Parse disks
- try:
- raw_disks = data.pop("disks")
- except KeyError:
- pass
- else:
- if raw_disks:
- if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
- data["disks"] = raw_disks
- else:
- # Backwards compatibility for strings of the format "1, 2, 3"
- try:
- data["disks"] = [int(part) for part in raw_disks.split(",")]
- except (TypeError, ValueError), err:
- raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
+ return self.SubmitJob(ops)
- return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
-
-class R_2_instances_name_replace_disks(baserlib.R_Generic):
+class R_2_instances_name_replace_disks(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/replace-disks resource.
"""
- def POST(self):
+ POST_OPCODE = opcodes.OpInstanceReplaceDisks
+
+ def GetPostOpInput(self):
"""Replaces disks on an instance.
"""
+ static = {
+ "instance_name": self.items[0],
+ }
+
if self.request_body:
- body = self.request_body
+ data = self.request_body
elif self.queryargs:
# Legacy interface, do not modify/extend
- body = {
+ data = {
"remote_node": self._checkStringVariable("remote_node", default=None),
"mode": self._checkStringVariable("mode", default=None),
"disks": self._checkStringVariable("disks", default=None),
"iallocator": self._checkStringVariable("iallocator", default=None),
}
else:
- body = {}
+ data = {}
- op = _ParseInstanceReplaceDisksRequest(self.items[0], body)
+ # Parse disks
+ try:
+ raw_disks = data.pop("disks")
+ except KeyError:
+ pass
+ else:
+ if raw_disks:
+ if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
+ data["disks"] = raw_disks
+ else:
+ # Backwards compatibility for strings of the format "1, 2, 3"
+ try:
+ data["disks"] = [int(part) for part in raw_disks.split(",")]
+ except (TypeError, ValueError), err:
+ raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
- return baserlib.SubmitJob([op])
+ return (data, static)
-class R_2_instances_name_activate_disks(baserlib.R_Generic):
+class R_2_instances_name_activate_disks(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/activate-disks resource.
"""
- def PUT(self):
+ PUT_OPCODE = opcodes.OpInstanceActivateDisks
+
+ def GetPutOpInput(self):
"""Activate disks for an instance.
The URI might contain ignore_size to ignore current recorded size.
"""
- instance_name = self.items[0]
- ignore_size = bool(self._checkIntVariable("ignore_size"))
-
- op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
- ignore_size=ignore_size)
-
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "instance_name": self.items[0],
+ "ignore_size": bool(self._checkIntVariable("ignore_size")),
+ })
-class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
+class R_2_instances_name_deactivate_disks(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/deactivate-disks resource.
"""
- def PUT(self):
+ PUT_OPCODE = opcodes.OpInstanceDeactivateDisks
+
+ def GetPutOpInput(self):
"""Deactivate disks for an instance.
"""
- instance_name = self.items[0]
-
- op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
-
- return baserlib.SubmitJob([op])
+ return ({}, {
+ "instance_name": self.items[0],
+ })
-class R_2_instances_name_prepare_export(baserlib.R_Generic):
- """/2/instances/[instance_name]/prepare-export resource.
+class R_2_instances_name_recreate_disks(baserlib.OpcodeResource):
+ """/2/instances/[instance_name]/recreate-disks resource.
"""
- def PUT(self):
- """Prepares an export for an instance.
+ POST_OPCODE = opcodes.OpInstanceRecreateDisks
- @return: a job id
+ def GetPostOpInput(self):
+ """Recreate disks for an instance.
"""
- instance_name = self.items[0]
- mode = self._checkStringVariable("mode")
-
- op = opcodes.OpBackupPrepare(instance_name=instance_name,
- mode=mode)
-
- return baserlib.SubmitJob([op])
-
+ return ({}, {
+ "instance_name": self.items[0],
+ })
-def _ParseExportInstanceRequest(name, data):
- """Parses a request for an instance export.
- @rtype: L{opcodes.OpBackupExport}
- @return: Instance export opcode
+class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
+ """/2/instances/[instance_name]/prepare-export resource.
"""
- # Rename "destination" to "target_node"
- try:
- data["target_node"] = data.pop("destination")
- except KeyError:
- pass
+ PUT_OPCODE = opcodes.OpBackupPrepare
- return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
- "instance_name": name,
- })
+ def GetPutOpInput(self):
+ """Prepares an export for an instance.
+ """
+ return ({}, {
+ "instance_name": self.items[0],
+ "mode": self._checkStringVariable("mode"),
+ })
-class R_2_instances_name_export(baserlib.R_Generic):
+
+class R_2_instances_name_export(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/export resource.
"""
- def PUT(self):
- """Exports an instance.
+ PUT_OPCODE = opcodes.OpBackupExport
+ PUT_RENAME = {
+ "destination": "target_node",
+ }
- @return: a job id
+ def GetPutOpInput(self):
+ """Exports an instance.
"""
- if not isinstance(self.request_body, dict):
- raise http.HttpBadRequest("Invalid body contents, not a dictionary")
-
- op = _ParseExportInstanceRequest(self.items[0], self.request_body)
-
- return baserlib.SubmitJob([op])
-
-
-def _ParseMigrateInstanceRequest(name, data):
- """Parses a request for an instance migration.
-
- @rtype: L{opcodes.OpInstanceMigrate}
- @return: Instance migration opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
- "instance_name": name,
- })
+ return (self.request_body, {
+ "instance_name": self.items[0],
+ })
-class R_2_instances_name_migrate(baserlib.R_Generic):
+class R_2_instances_name_migrate(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/migrate resource.
"""
- def PUT(self):
- """Migrates an instance.
+ PUT_OPCODE = opcodes.OpInstanceMigrate
- @return: a job id
+ def GetPutOpInput(self):
+ """Migrates an instance.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
-
- return baserlib.SubmitJob([op])
+ return (self.request_body, {
+ "instance_name": self.items[0],
+ })
-class R_2_instances_name_failover(baserlib.R_Generic):
+class R_2_instances_name_failover(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/failover resource.
"""
- def PUT(self):
- """Does a failover of an instance.
+ PUT_OPCODE = opcodes.OpInstanceFailover
- @return: a job id
+ def GetPutOpInput(self):
+ """Does a failover of an instance.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
+ return (self.request_body, {
"instance_name": self.items[0],
})
- return baserlib.SubmitJob([op])
-
-def _ParseRenameInstanceRequest(name, data):
- """Parses a request for renaming an instance.
-
- @rtype: L{opcodes.OpInstanceRename}
- @return: Instance rename opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
- "instance_name": name,
- })
-
-
-class R_2_instances_name_rename(baserlib.R_Generic):
+class R_2_instances_name_rename(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/rename resource.
"""
- def PUT(self):
- """Changes the name of an instance.
+ PUT_OPCODE = opcodes.OpInstanceRename
- @return: a job id
+ def GetPutOpInput(self):
+ """Changes the name of an instance.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
-
- return baserlib.SubmitJob([op])
-
-
-def _ParseModifyInstanceRequest(name, data):
- """Parses a request for modifying an instance.
-
- @rtype: L{opcodes.OpInstanceSetParams}
- @return: Instance modify opcode
-
- """
- return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
- "instance_name": name,
- })
+ return (self.request_body, {
+ "instance_name": self.items[0],
+ })
-class R_2_instances_name_modify(baserlib.R_Generic):
+class R_2_instances_name_modify(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/modify resource.
"""
- def PUT(self):
- """Changes some parameters of an instance.
+ PUT_OPCODE = opcodes.OpInstanceSetParams
- @return: a job id
+ def GetPutOpInput(self):
+ """Changes parameters of an instance.
"""
- baserlib.CheckType(self.request_body, dict, "Body contents")
-
- op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
-
- return baserlib.SubmitJob([op])
+ return (self.request_body, {
+ "instance_name": self.items[0],
+ })
-class R_2_instances_name_disk_grow(baserlib.R_Generic):
+class R_2_instances_name_disk_grow(baserlib.OpcodeResource):
"""/2/instances/[instance_name]/disk/[disk_index]/grow resource.
"""
- def POST(self):
- """Increases the size of an instance disk.
+ POST_OPCODE = opcodes.OpInstanceGrowDisk
- @return: a job id
+ def GetPostOpInput(self):
+ """Increases the size of an instance disk.
"""
- op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
+ return (self.request_body, {
"instance_name": self.items[0],
"disk": int(self.items[1]),
})
- return baserlib.SubmitJob([op])
-
-class R_2_instances_name_console(baserlib.R_Generic):
+class R_2_instances_name_console(baserlib.ResourceBase):
"""/2/instances/[instance_name]/console resource.
"""
GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+ GET_OPCODE = opcodes.OpInstanceConsole
def GET(self):
"""Request information for connecting to instance's console.
L{objects.InstanceConsole}
"""
- client = baserlib.GetClient()
+ client = self.GetClient()
((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
return [i.strip() for i in fields.split(",")]
-class R_2_query(baserlib.R_Generic):
+class R_2_query(baserlib.ResourceBase):
"""/2/query/[resource] resource.
"""
# Results might contain sensitive information
GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+ GET_OPCODE = opcodes.OpQuery
+ PUT_OPCODE = opcodes.OpQuery
- def _Query(self, fields, filter_):
- return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
+ def _Query(self, fields, qfilter):
+ return self.GetClient().Query(self.items[0], fields, qfilter).ToDict()
def GET(self):
"""Returns resource information.
except KeyError:
fields = _GetQueryFields(self.queryargs)
- return self._Query(fields, self.request_body.get("filter", None))
+ qfilter = body.get("qfilter", None)
+ # TODO: remove this after 2.7
+ if qfilter is None:
+ qfilter = body.get("filter", None)
+
+ return self._Query(fields, qfilter)
-class R_2_query_fields(baserlib.R_Generic):
+class R_2_query_fields(baserlib.ResourceBase):
"""/2/query/[resource]/fields resource.
"""
+ GET_OPCODE = opcodes.OpQueryFields
+
def GET(self):
"""Retrieves list of available fields for a resource.
else:
fields = _SplitQueryFields(raw_fields[0])
- return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
+ return self.GetClient().QueryFields(self.items[0], fields).ToDict()
-class _R_Tags(baserlib.R_Generic):
+class _R_Tags(baserlib.OpcodeResource):
""" Quasiclass for tagging resources
Manages tags. When inheriting this class you must define the
"""
TAG_LEVEL = None
+ GET_OPCODE = opcodes.OpTagsGet
+ PUT_OPCODE = opcodes.OpTagsSet
+ DELETE_OPCODE = opcodes.OpTagsDel
- def __init__(self, items, queryargs, req):
+ def __init__(self, items, queryargs, req, **kwargs):
"""A tag resource constructor.
We have to override the default to sort out cluster naming case.
"""
- baserlib.R_Generic.__init__(self, items, queryargs, req)
+ baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs)
if self.TAG_LEVEL == constants.TAG_CLUSTER:
self.name = None
Example: ["tag1", "tag2", "tag3"]
"""
- # pylint: disable=W0212
- return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
+ kind = self.TAG_LEVEL
+
+ if kind in (constants.TAG_INSTANCE,
+ constants.TAG_NODEGROUP,
+ constants.TAG_NODE):
+ if not self.name:
+ raise http.HttpBadRequest("Missing name on tag request")
+
+ cl = self.GetClient()
+ if kind == constants.TAG_INSTANCE:
+ fn = cl.QueryInstances
+ elif kind == constants.TAG_NODEGROUP:
+ fn = cl.QueryGroups
+ else:
+ fn = cl.QueryNodes
+ result = fn(names=[self.name], fields=["tags"], use_locking=False)
+ if not result or not result[0]:
+ raise http.HttpBadGateway("Invalid response from tag query")
+ tags = result[0][0]
- def PUT(self):
+ elif kind == constants.TAG_CLUSTER:
+ assert not self.name
+ # TODO: Use query API?
+ ssc = ssconf.SimpleStore()
+ tags = ssc.GetClusterTags()
+
+ return list(tags)
+
+ def GetPutOpInput(self):
"""Add a set of tags.
The request as a list of strings should be PUT to this URI. And
you'll have back a job id.
"""
- # pylint: disable=W0212
- if "tag" not in self.queryargs:
- raise http.HttpBadRequest("Please specify tag(s) to add using the"
- " the 'tag' parameter")
- return baserlib._Tags_PUT(self.TAG_LEVEL,
- self.queryargs["tag"], name=self.name,
- dry_run=bool(self.dryRun()))
+ return ({}, {
+ "kind": self.TAG_LEVEL,
+ "name": self.name,
+ "tags": self.queryargs.get("tag", []),
+ "dry_run": self.dryRun(),
+ })
- def DELETE(self):
+ def GetDeleteOpInput(self):
"""Delete a tag.
In order to delete a set of tags, the DELETE
/tags?tag=[tag]&tag=[tag]
"""
- # pylint: disable=W0212
- if "tag" not in self.queryargs:
- # no we not gonna delete all tags
- raise http.HttpBadRequest("Cannot delete all tags - please specify"
- " tag(s) using the 'tag' parameter")
- return baserlib._Tags_DELETE(self.TAG_LEVEL,
- self.queryargs["tag"],
- name=self.name,
- dry_run=bool(self.dryRun()))
+ # Re-use code
+ return self.GetPutOpInput()
class R_2_instances_name_tags(_R_Tags):
# if they need to start using instance attributes
# R0904: Too many public methods
-import os
import logging
import zlib
import base64
from ganeti import netutils
from ganeti import ssconf
from ganeti import runtime
+from ganeti import compat
+from ganeti import rpc_defs
+
+# Special module generated at build time
+from ganeti import _generated_rpc
# pylint has a bug here, doesn't see this import
import ganeti.http.client # pylint: disable=W0611
_TMO_4HRS = 4 * 3600
_TMO_1DAY = 86400
-# Timeout table that will be built later by decorators
-# Guidelines for choosing timeouts:
-# - call used during watcher: timeout -> 1min, _TMO_URGENT
-# - trivial (but be sure it is trivial) (e.g. reading a file): 5min, _TMO_FAST
-# - other calls: 15 min, _TMO_NORMAL
-# - special calls (instance add, etc.): either _TMO_SLOW (1h) or huge timeouts
-
-_TIMEOUTS = {
-}
+#: Special value to describe an offline host
+_OFFLINE = object()
def Init():
curl.setopt(pycurl.CONNECTTIMEOUT, _RPC_CONNECT_TIMEOUT)
-# Aliasing this module avoids the following warning by epydoc: "Warning: No
-# information available for ganeti.rpc._RpcThreadLocal's base threading.local"
-_threading = threading
-
-
-class _RpcThreadLocal(_threading.local):
- def GetHttpClientPool(self):
- """Returns a per-thread HTTP client pool.
-
- @rtype: L{http.client.HttpClientPool}
-
- """
- try:
- pool = self.hcp
- except AttributeError:
- pool = http.client.HttpClientPool(_ConfigRpcCurl)
- self.hcp = pool
-
- return pool
-
-
-# Remove module alias (see above)
-del _threading
-
-
-_thread_local = _RpcThreadLocal()
-
-
-def _RpcTimeout(secs):
- """Timeout decorator.
-
- When applied to a rpc call_* function, it updates the global timeout
- table with the given function/timeout.
-
- """
- def decorator(f):
- name = f.__name__
- assert name.startswith("call_")
- _TIMEOUTS[name[len("call_"):]] = secs
- return f
- return decorator
-
-
def RunWithRPC(fn):
"""RPC-wrapper decorator.
return wrapper
+def _Compress(data):
+ """Compresses a string for transport over RPC.
+
+ Small amounts of data are not compressed.
+
+ @type data: str
+ @param data: Data
+ @rtype: tuple
+ @return: Encoded data to send
+
+ """
+ # Small amounts of data are not compressed
+ if len(data) < 512:
+ return (constants.RPC_ENCODING_NONE, data)
+
+ # Compress with zlib and encode in base64
+ return (constants.RPC_ENCODING_ZLIB_BASE64,
+ base64.b64encode(zlib.compress(data, 3)))
+
+
class RpcResult(object):
"""RPC Result class.
raise ec(*args) # pylint: disable=W0142
-def _AddressLookup(node_list,
- ssc=ssconf.SimpleStore,
- nslookup_fn=netutils.Hostname.GetIP):
+def _SsconfResolver(node_list, _,
+ ssc=ssconf.SimpleStore,
+ nslookup_fn=netutils.Hostname.GetIP):
"""Return addresses for given node names.
@type node_list: list
@param ssc: SimpleStore class that is used to obtain node->ip mappings
@type nslookup_fn: callable
@param nslookup_fn: function use to do NS lookup
- @rtype: list of addresses and/or None's
- @returns: List of corresponding addresses, if found
+ @rtype: list of tuple; (string, string)
+ @return: List of tuples containing node name and IP address
"""
ss = ssc()
iplist = ss.GetNodePrimaryIPList()
family = ss.GetPrimaryIPFamily()
- addresses = []
ipmap = dict(entry.split() for entry in iplist)
- for node in node_list:
- address = ipmap.get(node)
- if address is None:
- address = nslookup_fn(node, family=family)
- addresses.append(address)
- return addresses
-
-
-class Client:
- """RPC Client class.
-
- This class, given a (remote) method name, a list of parameters and a
- list of nodes, will contact (in parallel) all nodes, and return a
- dict of results (key: node name, value: result).
-
- One current bug is that generic failure is still signaled by
- 'False' result, which is not good. This overloading of values can
- cause bugs.
-
- """
- def __init__(self, procedure, body, port, address_lookup_fn=_AddressLookup):
- assert procedure in _TIMEOUTS, ("New RPC call not declared in the"
- " timeouts table")
- self.procedure = procedure
- self.body = body
- self.port = port
- self._request = {}
- self._address_lookup_fn = address_lookup_fn
-
- def ConnectList(self, node_list, address_list=None, read_timeout=None):
- """Add a list of nodes to the target nodes.
-
- @type node_list: list
- @param node_list: the list of node names to connect
- @type address_list: list or None
- @keyword address_list: either None or a list with node addresses,
- which must have the same length as the node list
- @type read_timeout: int
- @param read_timeout: overwrites default timeout for operation
-
- """
- if address_list is None:
- # Always use IP address instead of node name
- address_list = self._address_lookup_fn(node_list)
-
- assert len(node_list) == len(address_list), \
- "Name and address lists must have the same length"
+ result = []
+ for node in node_list:
+ ip = ipmap.get(node)
+ if ip is None:
+ ip = nslookup_fn(node, family=family)
+ result.append((node, ip))
- for node, address in zip(node_list, address_list):
- self.ConnectNode(node, address, read_timeout=read_timeout)
+ return result
- def ConnectNode(self, name, address=None, read_timeout=None):
- """Add a node to the target list.
- @type name: str
- @param name: the node name
- @type address: str
- @param address: the node address, if known
- @type read_timeout: int
- @param read_timeout: overwrites default timeout for operation
+class _StaticResolver:
+ def __init__(self, addresses):
+ """Initializes this class.
"""
- if address is None:
- # Always use IP address instead of node name
- address = self._address_lookup_fn([name])[0]
-
- assert(address is not None)
-
- if read_timeout is None:
- read_timeout = _TIMEOUTS[self.procedure]
-
- self._request[name] = \
- http.client.HttpClientRequest(str(address), self.port,
- http.HTTP_PUT, str("/%s" % self.procedure),
- headers=_RPC_CLIENT_HEADERS,
- post_data=str(self.body),
- read_timeout=read_timeout)
+ self._addresses = addresses
- def GetResults(self, http_pool=None):
- """Call nodes and return results.
-
- @rtype: list
- @return: List of RPC results
+ def __call__(self, hosts, _):
+ """Returns static addresses for hosts.
"""
- if not http_pool:
- http_pool = http.client.HttpClientPool(_ConfigRpcCurl)
-
- http_pool.ProcessRequests(self._request.values())
-
- results = {}
-
- for name, req in self._request.iteritems():
- if req.success and req.resp_status_code == http.HTTP_OK:
- results[name] = RpcResult(data=serializer.LoadJson(req.resp_body),
- node=name, call=self.procedure)
- continue
-
- # TODO: Better error reporting
- if req.error:
- msg = req.error
- else:
- msg = req.resp_body
-
- logging.error("RPC error in %s from node %s: %s",
- self.procedure, name, msg)
- results[name] = RpcResult(data=msg, failed=True, node=name,
- call=self.procedure)
+ assert len(hosts) == len(self._addresses)
+ return zip(hosts, self._addresses)
- return results
+def _CheckConfigNode(name, node, accept_offline_node):
+ """Checks if a node is online.
-def _EncodeImportExportIO(ieio, ieioargs):
- """Encodes import/export I/O information.
+ @type name: string
+ @param name: Node name
+ @type node: L{objects.Node} or None
+ @param node: Node object
"""
- if ieio == constants.IEIO_RAW_DISK:
- assert len(ieioargs) == 1
- return (ieioargs[0].ToDict(), )
+ if node is None:
+ # Depend on DNS for name resolution
+ ip = name
+ elif node.offline and not accept_offline_node:
+ ip = _OFFLINE
+ else:
+ ip = node.primary_ip
+ return (name, ip)
- if ieio == constants.IEIO_SCRIPT:
- assert len(ieioargs) == 2
- return (ieioargs[0].ToDict(), ieioargs[1])
-
- return ieioargs
+def _NodeConfigResolver(single_node_fn, all_nodes_fn, hosts, opts):
+ """Calculate node addresses using configuration.
-class RpcRunner(object):
- """RPC runner class"""
+ """
+ accept_offline_node = (opts is rpc_defs.ACCEPT_OFFLINE_NODE)
- def __init__(self, cfg):
- """Initialized the rpc runner.
+ assert accept_offline_node or opts is None, "Unknown option"
- @type cfg: C{config.ConfigWriter}
- @param cfg: the configuration object that will be used to get data
- about the cluster
+ # Special case for single-host lookups
+ if len(hosts) == 1:
+ (name, ) = hosts
+ return [_CheckConfigNode(name, single_node_fn(name), accept_offline_node)]
+ else:
+ all_nodes = all_nodes_fn()
+ return [_CheckConfigNode(name, all_nodes.get(name, None),
+ accept_offline_node)
+ for name in hosts]
- """
- self._cfg = cfg
- self.port = netutils.GetDaemonPort(constants.NODED)
-
- def _InstDict(self, instance, hvp=None, bep=None, osp=None):
- """Convert the given instance to a dict.
- This is done via the instance's ToDict() method and additionally
- we fill the hvparams with the cluster defaults.
+class _RpcProcessor:
+ def __init__(self, resolver, port, lock_monitor_cb=None):
+ """Initializes this class.
- @type instance: L{objects.Instance}
- @param instance: an Instance object
- @type hvp: dict or None
- @param hvp: a dictionary with overridden hypervisor parameters
- @type bep: dict or None
- @param bep: a dictionary with overridden backend parameters
- @type osp: dict or None
- @param osp: a dictionary with overridden os parameters
- @rtype: dict
- @return: the instance dict, with the hvparams filled with the
- cluster defaults
+ @param resolver: callable accepting a list of hostnames, returning a list
+ of tuples containing name and IP address (IP address can be the name or
+ the special value L{_OFFLINE} to mark offline machines)
+ @type port: int
+ @param port: TCP port
+ @param lock_monitor_cb: Callable for registering with lock monitor
"""
- idict = instance.ToDict()
- cluster = self._cfg.GetClusterInfo()
- idict["hvparams"] = cluster.FillHV(instance)
- if hvp is not None:
- idict["hvparams"].update(hvp)
- idict["beparams"] = cluster.FillBE(instance)
- if bep is not None:
- idict["beparams"].update(bep)
- idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
- if osp is not None:
- idict["osparams"].update(osp)
- for nic in idict["nics"]:
- nic['nicparams'] = objects.FillDict(
- cluster.nicparams[constants.PP_DEFAULT],
- nic['nicparams'])
- return idict
+ self._resolver = resolver
+ self._port = port
+ self._lock_monitor_cb = lock_monitor_cb
- def _ConnectList(self, client, node_list, call, read_timeout=None):
- """Helper for computing node addresses.
+ @staticmethod
+ def _PrepareRequests(hosts, port, procedure, body, read_timeout):
+ """Prepares requests by sorting offline hosts into separate list.
- @type client: L{ganeti.rpc.Client}
- @param client: a C{Client} instance
- @type node_list: list
- @param node_list: the node list we should connect
- @type call: string
- @param call: the name of the remote procedure call, for filling in
- correctly any eventual offline nodes' results
- @type read_timeout: int
- @param read_timeout: overwrites the default read timeout for the
- given operation
+ @type body: dict
+ @param body: a dictionary with per-host body data
"""
- all_nodes = self._cfg.GetAllNodesInfo()
- name_list = []
- addr_list = []
- skip_dict = {}
- for node in node_list:
- if node in all_nodes:
- if all_nodes[node].offline:
- skip_dict[node] = RpcResult(node=node, offline=True, call=call)
- continue
- val = all_nodes[node].primary_ip
+ results = {}
+ requests = {}
+
+ assert isinstance(body, dict)
+ assert len(body) == len(hosts)
+ assert compat.all(isinstance(v, str) for v in body.values())
+ assert frozenset(map(compat.fst, hosts)) == frozenset(body.keys()), \
+ "%s != %s" % (hosts, body.keys())
+
+ for (name, ip) in hosts:
+ if ip is _OFFLINE:
+ # Node is marked as offline
+ results[name] = RpcResult(node=name, offline=True, call=procedure)
else:
- val = None
- addr_list.append(val)
- name_list.append(node)
- if name_list:
- client.ConnectList(name_list, address_list=addr_list,
- read_timeout=read_timeout)
- return skip_dict
-
- def _ConnectNode(self, client, node, call, read_timeout=None):
- """Helper for computing one node's address.
-
- @type client: L{ganeti.rpc.Client}
- @param client: a C{Client} instance
- @type node: str
- @param node: the node we should connect
- @type call: string
- @param call: the name of the remote procedure call, for filling in
- correctly any eventual offline nodes' results
- @type read_timeout: int
- @param read_timeout: overwrites the default read timeout for the
- given operation
-
- """
- node_info = self._cfg.GetNodeInfo(node)
- if node_info is not None:
- if node_info.offline:
- return RpcResult(node=node, offline=True, call=call)
- addr = node_info.primary_ip
- else:
- addr = None
- client.ConnectNode(node, address=addr, read_timeout=read_timeout)
+ requests[name] = \
+ http.client.HttpClientRequest(str(ip), port,
+ http.HTTP_PUT, str("/%s" % procedure),
+ headers=_RPC_CLIENT_HEADERS,
+ post_data=body[name],
+ read_timeout=read_timeout,
+ nicename="%s/%s" % (name, procedure),
+ curl_config_fn=_ConfigRpcCurl)
- def _MultiNodeCall(self, node_list, procedure, args, read_timeout=None):
- """Helper for making a multi-node call
-
- """
- body = serializer.DumpJson(args, indent=False)
- c = Client(procedure, body, self.port)
- skip_dict = self._ConnectList(c, node_list, procedure,
- read_timeout=read_timeout)
- skip_dict.update(c.GetResults())
- return skip_dict
-
- @classmethod
- def _StaticMultiNodeCall(cls, node_list, procedure, args,
- address_list=None, read_timeout=None):
- """Helper for making a multi-node static call
-
- """
- body = serializer.DumpJson(args, indent=False)
- c = Client(procedure, body, netutils.GetDaemonPort(constants.NODED))
- c.ConnectList(node_list, address_list=address_list,
- read_timeout=read_timeout)
- return c.GetResults()
-
- def _SingleNodeCall(self, node, procedure, args, read_timeout=None):
- """Helper for making a single-node call
-
- """
- body = serializer.DumpJson(args, indent=False)
- c = Client(procedure, body, self.port)
- result = self._ConnectNode(c, node, procedure, read_timeout=read_timeout)
- if result is None:
- # we did connect, node is not offline
- result = c.GetResults()[node]
- return result
-
- @classmethod
- def _StaticSingleNodeCall(cls, node, procedure, args, read_timeout=None):
- """Helper for making a single-node static call
-
- """
- body = serializer.DumpJson(args, indent=False)
- c = Client(procedure, body, netutils.GetDaemonPort(constants.NODED))
- c.ConnectNode(node, read_timeout=read_timeout)
- return c.GetResults()[node]
+ return (results, requests)
@staticmethod
- def _Compress(data):
- """Compresses a string for transport over RPC.
-
- Small amounts of data are not compressed.
-
- @type data: str
- @param data: Data
- @rtype: tuple
- @return: Encoded data to send
+ def _CombineResults(results, requests, procedure):
+ """Combines pre-computed results for offline hosts with actual call results.
"""
- # Small amounts of data are not compressed
- if len(data) < 512:
- return (constants.RPC_ENCODING_NONE, data)
-
- # Compress with zlib and encode in base64
- return (constants.RPC_ENCODING_ZLIB_BASE64,
- base64.b64encode(zlib.compress(data, 3)))
-
- #
- # Begin RPC calls
- #
-
- @_RpcTimeout(_TMO_URGENT)
- def call_bdev_sizes(self, node_list, devices):
- """Gets the sizes of requested block devices present on a node
-
- This is a multi-node call.
-
- """
- return self._MultiNodeCall(node_list, "bdev_sizes", [devices])
-
- @_RpcTimeout(_TMO_URGENT)
- def call_lv_list(self, node_list, vg_name):
- """Gets the logical volumes present in a given volume group.
-
- This is a multi-node call.
-
- """
- return self._MultiNodeCall(node_list, "lv_list", [vg_name])
-
- @_RpcTimeout(_TMO_URGENT)
- def call_vg_list(self, node_list):
- """Gets the volume group list.
-
- This is a multi-node call.
-
- """
- return self._MultiNodeCall(node_list, "vg_list", [])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_storage_list(self, node_list, su_name, su_args, name, fields):
- """Get list of storage units.
-
- This is a multi-node call.
-
- """
- return self._MultiNodeCall(node_list, "storage_list",
- [su_name, su_args, name, fields])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_storage_modify(self, node, su_name, su_args, name, changes):
- """Modify a storage unit.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "storage_modify",
- [su_name, su_args, name, changes])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_storage_execute(self, node, su_name, su_args, name, op):
- """Executes an operation on a storage unit.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "storage_execute",
- [su_name, su_args, name, op])
-
- @_RpcTimeout(_TMO_URGENT)
- def call_bridges_exist(self, node, bridges_list):
- """Checks if a node has all the bridges given.
-
- This method checks if all bridges given in the bridges_list are
- present on the remote node, so that an instance that uses interfaces
- on those bridges can be started.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "bridges_exist", [bridges_list])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_instance_start(self, node, instance, hvp, bep, startup_paused):
- """Starts an instance.
-
- This is a single-node call.
-
- """
- idict = self._InstDict(instance, hvp=hvp, bep=bep)
- return self._SingleNodeCall(node, "instance_start", [idict, startup_paused])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_instance_shutdown(self, node, instance, timeout):
- """Stops an instance.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "instance_shutdown",
- [self._InstDict(instance), timeout])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_migration_info(self, node, instance):
- """Gather the information necessary to prepare an instance migration.
-
- This is a single-node call.
-
- @type node: string
- @param node: the node on which the instance is currently running
- @type instance: C{objects.Instance}
- @param instance: the instance definition
-
- """
- return self._SingleNodeCall(node, "migration_info",
- [self._InstDict(instance)])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_accept_instance(self, node, instance, info, target):
- """Prepare a node to accept an instance.
-
- This is a single-node call.
-
- @type node: string
- @param node: the target node for the migration
- @type instance: C{objects.Instance}
- @param instance: the instance definition
- @type info: opaque/hypervisor specific (string/data)
- @param info: result for the call_migration_info call
- @type target: string
- @param target: target hostname (usually ip address) (on the node itself)
-
- """
- return self._SingleNodeCall(node, "accept_instance",
- [self._InstDict(instance), info, target])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_finalize_migration(self, node, instance, info, success):
- """Finalize any target-node migration specific operation.
-
- This is called both in case of a successful migration and in case of error
- (in which case it should abort the migration).
-
- This is a single-node call.
-
- @type node: string
- @param node: the target node for the migration
- @type instance: C{objects.Instance}
- @param instance: the instance definition
- @type info: opaque/hypervisor specific (string/data)
- @param info: result for the call_migration_info call
- @type success: boolean
- @param success: whether the migration was a success or a failure
-
- """
- return self._SingleNodeCall(node, "finalize_migration",
- [self._InstDict(instance), info, success])
-
- @_RpcTimeout(_TMO_SLOW)
- def call_instance_migrate(self, node, instance, target, live):
- """Migrate an instance.
-
- This is a single-node call.
-
- @type node: string
- @param node: the node on which the instance is currently running
- @type instance: C{objects.Instance}
- @param instance: the instance definition
- @type target: string
- @param target: the target node name
- @type live: boolean
- @param live: whether the migration should be done live or not (the
- interpretation of this parameter is left to the hypervisor)
-
- """
- return self._SingleNodeCall(node, "instance_migrate",
- [self._InstDict(instance), target, live])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_instance_reboot(self, node, inst, reboot_type, shutdown_timeout):
- """Reboots an instance.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "instance_reboot",
- [self._InstDict(inst), reboot_type,
- shutdown_timeout])
-
- @_RpcTimeout(_TMO_1DAY)
- def call_instance_os_add(self, node, inst, reinstall, debug, osparams=None):
- """Installs an OS on the given instance.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "instance_os_add",
- [self._InstDict(inst, osp=osparams),
- reinstall, debug])
-
- @_RpcTimeout(_TMO_SLOW)
- def call_instance_run_rename(self, node, inst, old_name, debug):
- """Run the OS rename script for an instance.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "instance_run_rename",
- [self._InstDict(inst), old_name, debug])
-
- @_RpcTimeout(_TMO_URGENT)
- def call_instance_info(self, node, instance, hname):
- """Returns information about a single instance.
-
- This is a single-node call.
-
- @type node: list
- @param node: the list of nodes to query
- @type instance: string
- @param instance: the instance name
- @type hname: string
- @param hname: the hypervisor type of the instance
-
- """
- return self._SingleNodeCall(node, "instance_info", [instance, hname])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_instance_migratable(self, node, instance):
- """Checks whether the given instance can be migrated.
-
- This is a single-node call.
-
- @param node: the node to query
- @type instance: L{objects.Instance}
- @param instance: the instance to check
-
-
- """
- return self._SingleNodeCall(node, "instance_migratable",
- [self._InstDict(instance)])
-
- @_RpcTimeout(_TMO_URGENT)
- def call_all_instances_info(self, node_list, hypervisor_list):
- """Returns information about all instances on the given nodes.
-
- This is a multi-node call.
-
- @type node_list: list
- @param node_list: the list of nodes to query
- @type hypervisor_list: list
- @param hypervisor_list: the hypervisors to query for instances
-
- """
- return self._MultiNodeCall(node_list, "all_instances_info",
- [hypervisor_list])
-
- @_RpcTimeout(_TMO_URGENT)
- def call_instance_list(self, node_list, hypervisor_list):
- """Returns the list of running instances on a given node.
-
- This is a multi-node call.
-
- @type node_list: list
- @param node_list: the list of nodes to query
- @type hypervisor_list: list
- @param hypervisor_list: the hypervisors to query for instances
-
- """
- return self._MultiNodeCall(node_list, "instance_list", [hypervisor_list])
-
- @_RpcTimeout(_TMO_FAST)
- def call_node_tcp_ping(self, node, source, target, port, timeout,
- live_port_needed):
- """Do a TcpPing on the remote node
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "node_tcp_ping",
- [source, target, port, timeout,
- live_port_needed])
-
- @_RpcTimeout(_TMO_FAST)
- def call_node_has_ip_address(self, node, address):
- """Checks if a node has the given IP address.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "node_has_ip_address", [address])
-
- @_RpcTimeout(_TMO_URGENT)
- def call_node_info(self, node_list, vg_name, hypervisor_type):
- """Return node information.
-
- This will return memory information and volume group size and free
- space.
-
- This is a multi-node call.
-
- @type node_list: list
- @param node_list: the list of nodes to query
- @type vg_name: C{string}
- @param vg_name: the name of the volume group to ask for disk space
- information
- @type hypervisor_type: C{str}
- @param hypervisor_type: the name of the hypervisor to ask for
- memory information
-
- """
- return self._MultiNodeCall(node_list, "node_info",
- [vg_name, hypervisor_type])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_etc_hosts_modify(self, node, mode, name, ip):
- """Modify hosts file with name
-
- @type node: string
- @param node: The node to call
- @type mode: string
- @param mode: The mode to operate. Currently "add" or "remove"
- @type name: string
- @param name: The host name to be modified
- @type ip: string
- @param ip: The ip of the entry (just valid if mode is "add")
-
- """
- return self._SingleNodeCall(node, "etc_hosts_modify", [mode, name, ip])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_node_verify(self, node_list, checkdict, cluster_name):
- """Request verification of given parameters.
-
- This is a multi-node call.
-
- """
- return self._MultiNodeCall(node_list, "node_verify",
- [checkdict, cluster_name])
-
- @classmethod
- @_RpcTimeout(_TMO_FAST)
- def call_node_start_master_daemons(cls, node, no_voting):
- """Starts master daemons on a node.
-
- This is a single-node call.
-
- """
- return cls._StaticSingleNodeCall(node, "node_start_master_daemons",
- [no_voting])
-
- @classmethod
- @_RpcTimeout(_TMO_FAST)
- def call_node_activate_master_ip(cls, node):
- """Activates master IP on a node.
-
- This is a single-node call.
-
- """
- return cls._StaticSingleNodeCall(node, "node_activate_master_ip", [])
-
- @classmethod
- @_RpcTimeout(_TMO_FAST)
- def call_node_stop_master(cls, node):
- """Deactivates master IP and stops master daemons on a node.
-
- This is a single-node call.
-
- """
- return cls._StaticSingleNodeCall(node, "node_stop_master", [])
-
- @classmethod
- @_RpcTimeout(_TMO_FAST)
- def call_node_deactivate_master_ip(cls, node):
- """Deactivates master IP on a node.
-
- This is a single-node call.
-
- """
- return cls._StaticSingleNodeCall(node, "node_deactivate_master_ip", [])
-
- @classmethod
- @_RpcTimeout(_TMO_URGENT)
- def call_master_info(cls, node_list):
- """Query master info.
-
- This is a multi-node call.
-
- """
- # TODO: should this method query down nodes?
- return cls._StaticMultiNodeCall(node_list, "master_info", [])
-
- @classmethod
- @_RpcTimeout(_TMO_URGENT)
- def call_version(cls, node_list):
- """Query node version.
-
- This is a multi-node call.
-
- """
- return cls._StaticMultiNodeCall(node_list, "version", [])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_create(self, node, bdev, size, owner, on_primary, info):
- """Request creation of a given block device.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "blockdev_create",
- [bdev.ToDict(), size, owner, on_primary, info])
-
- @_RpcTimeout(_TMO_SLOW)
- def call_blockdev_wipe(self, node, bdev, offset, size):
- """Request wipe at given offset with given size of a block device.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "blockdev_wipe",
- [bdev.ToDict(), offset, size])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_remove(self, node, bdev):
- """Request removal of a given block device.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "blockdev_remove", [bdev.ToDict()])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_rename(self, node, devlist):
- """Request rename of the given block devices.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "blockdev_rename",
- [(d.ToDict(), uid) for d, uid in devlist])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_pause_resume_sync(self, node, disks, pause):
- """Request a pause/resume of given block device.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "blockdev_pause_resume_sync",
- [[bdev.ToDict() for bdev in disks], pause])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_assemble(self, node, disk, owner, on_primary, idx):
- """Request assembling of a given block device.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "blockdev_assemble",
- [disk.ToDict(), owner, on_primary, idx])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_shutdown(self, node, disk):
- """Request shutdown of a given block device.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "blockdev_shutdown", [disk.ToDict()])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_addchildren(self, node, bdev, ndevs):
- """Request adding a list of children to a (mirroring) device.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "blockdev_addchildren",
- [bdev.ToDict(),
- [disk.ToDict() for disk in ndevs]])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_removechildren(self, node, bdev, ndevs):
- """Request removing a list of children from a (mirroring) device.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "blockdev_removechildren",
- [bdev.ToDict(),
- [disk.ToDict() for disk in ndevs]])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_getmirrorstatus(self, node, disks):
- """Request status of a (mirroring) device.
-
- This is a single-node call.
-
- """
- result = self._SingleNodeCall(node, "blockdev_getmirrorstatus",
- [dsk.ToDict() for dsk in disks])
- if not result.fail_msg:
- result.payload = [objects.BlockDevStatus.FromDict(i)
- for i in result.payload]
- return result
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_getmirrorstatus_multi(self, node_list, node_disks):
- """Request status of (mirroring) devices from multiple nodes.
-
- This is a multi-node call.
-
- """
- result = self._MultiNodeCall(node_list, "blockdev_getmirrorstatus_multi",
- [dict((name, [dsk.ToDict() for dsk in disks])
- for name, disks in node_disks.items())])
- for nres in result.values():
- if nres.fail_msg:
- continue
-
- for idx, (success, status) in enumerate(nres.payload):
- if success:
- nres.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
-
- return result
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_find(self, node, disk):
- """Request identification of a given block device.
-
- This is a single-node call.
-
- """
- result = self._SingleNodeCall(node, "blockdev_find", [disk.ToDict()])
- if not result.fail_msg and result.payload is not None:
- result.payload = objects.BlockDevStatus.FromDict(result.payload)
- return result
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_close(self, node, instance_name, disks):
- """Closes the given block devices.
-
- This is a single-node call.
-
- """
- params = [instance_name, [cf.ToDict() for cf in disks]]
- return self._SingleNodeCall(node, "blockdev_close", params)
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_getsize(self, node, disks):
- """Returns the size of the given disks.
-
- This is a single-node call.
-
- """
- params = [[cf.ToDict() for cf in disks]]
- return self._SingleNodeCall(node, "blockdev_getsize", params)
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_drbd_disconnect_net(self, node_list, nodes_ip, disks):
- """Disconnects the network of the given drbd devices.
-
- This is a multi-node call.
-
- """
- return self._MultiNodeCall(node_list, "drbd_disconnect_net",
- [nodes_ip, [cf.ToDict() for cf in disks]])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_drbd_attach_net(self, node_list, nodes_ip,
- disks, instance_name, multimaster):
- """Disconnects the given drbd devices.
-
- This is a multi-node call.
-
- """
- return self._MultiNodeCall(node_list, "drbd_attach_net",
- [nodes_ip, [cf.ToDict() for cf in disks],
- instance_name, multimaster])
-
- @_RpcTimeout(_TMO_SLOW)
- def call_drbd_wait_sync(self, node_list, nodes_ip, disks):
- """Waits for the synchronization of drbd devices is complete.
-
- This is a multi-node call.
-
- """
- return self._MultiNodeCall(node_list, "drbd_wait_sync",
- [nodes_ip, [cf.ToDict() for cf in disks]])
-
- @_RpcTimeout(_TMO_URGENT)
- def call_drbd_helper(self, node_list):
- """Gets drbd helper.
-
- This is a multi-node call.
-
- """
- return self._MultiNodeCall(node_list, "drbd_helper", [])
-
- @classmethod
- @_RpcTimeout(_TMO_NORMAL)
- def call_upload_file(cls, node_list, file_name, address_list=None):
- """Upload a file.
-
- The node will refuse the operation in case the file is not on the
- approved file list.
-
- This is a multi-node call.
-
- @type node_list: list
- @param node_list: the list of node names to upload to
- @type file_name: str
- @param file_name: the filename to upload
- @type address_list: list or None
- @keyword address_list: an optional list of node addresses, in order
- to optimize the RPC speed
-
- """
- file_contents = utils.ReadFile(file_name)
- data = cls._Compress(file_contents)
- st = os.stat(file_name)
- getents = runtime.GetEnts()
- params = [file_name, data, st.st_mode, getents.LookupUid(st.st_uid),
- getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
- return cls._StaticMultiNodeCall(node_list, "upload_file", params,
- address_list=address_list)
-
- @classmethod
- @_RpcTimeout(_TMO_NORMAL)
- def call_write_ssconf_files(cls, node_list, values):
- """Write ssconf files.
-
- This is a multi-node call.
-
- """
- return cls._StaticMultiNodeCall(node_list, "write_ssconf_files", [values])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_run_oob(self, node, oob_program, command, remote_node, timeout):
- """Runs OOB.
-
- This is a single-node call.
-
- """
- return self._SingleNodeCall(node, "run_oob", [oob_program, command,
- remote_node, timeout])
-
- @_RpcTimeout(_TMO_FAST)
- def call_os_diagnose(self, node_list):
- """Request a diagnose of OS definitions.
-
- This is a multi-node call.
-
- """
- return self._MultiNodeCall(node_list, "os_diagnose", [])
-
- @_RpcTimeout(_TMO_FAST)
- def call_os_get(self, node, name):
- """Returns an OS definition.
-
- This is a single-node call.
-
- """
- result = self._SingleNodeCall(node, "os_get", [name])
- if not result.fail_msg and isinstance(result.payload, dict):
- result.payload = objects.OS.FromDict(result.payload)
- return result
-
- @_RpcTimeout(_TMO_FAST)
- def call_os_validate(self, required, nodes, name, checks, params):
- """Run a validation routine for a given OS.
+ for name, req in requests.items():
+ if req.success and req.resp_status_code == http.HTTP_OK:
+ host_result = RpcResult(data=serializer.LoadJson(req.resp_body),
+ node=name, call=procedure)
+ else:
+ # TODO: Better error reporting
+ if req.error:
+ msg = req.error
+ else:
+ msg = req.resp_body
- This is a multi-node call.
+ logging.error("RPC error in %s on node %s: %s", procedure, name, msg)
+ host_result = RpcResult(data=msg, failed=True, node=name,
+ call=procedure)
- """
- return self._MultiNodeCall(nodes, "os_validate",
- [required, name, checks, params])
+ results[name] = host_result
- @_RpcTimeout(_TMO_NORMAL)
- def call_hooks_runner(self, node_list, hpath, phase, env):
- """Call the hooks runner.
+ return results
- Args:
- - op: the OpCode instance
- - env: a dictionary with the environment
+ def __call__(self, hosts, procedure, body, read_timeout, resolver_opts,
+ _req_process_fn=None):
+ """Makes an RPC request to a number of nodes.
- This is a multi-node call.
+ @type hosts: sequence
+ @param hosts: Hostnames
+ @type procedure: string
+ @param procedure: Request path
+ @type body: dictionary
+ @param body: dictionary with request bodies per host
+ @type read_timeout: int or None
+ @param read_timeout: Read timeout for request
"""
- params = [hpath, phase, env]
- return self._MultiNodeCall(node_list, "hooks_runner", params)
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_iallocator_runner(self, node, name, idata):
- """Call an iallocator on a remote node
+ assert read_timeout is not None, \
+ "Missing RPC read timeout for procedure '%s'" % procedure
- Args:
- - name: the iallocator name
- - input: the json-encoded input string
+ if _req_process_fn is None:
+ _req_process_fn = http.client.ProcessRequests
- This is a single-node call.
+ (results, requests) = \
+ self._PrepareRequests(self._resolver(hosts, resolver_opts), self._port,
+ procedure, body, read_timeout)
- """
- return self._SingleNodeCall(node, "iallocator_runner", [name, idata])
+ _req_process_fn(requests.values(), lock_monitor_cb=self._lock_monitor_cb)
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_grow(self, node, cf_bdev, amount, dryrun):
- """Request a snapshot of the given block device.
+ assert not frozenset(results).intersection(requests)
- This is a single-node call.
+ return self._CombineResults(results, requests, procedure)
- """
- return self._SingleNodeCall(node, "blockdev_grow",
- [cf_bdev.ToDict(), amount, dryrun])
- @_RpcTimeout(_TMO_1DAY)
- def call_blockdev_export(self, node, cf_bdev,
- dest_node, dest_path, cluster_name):
- """Export a given disk to another node.
-
- This is a single-node call.
+class _RpcClientBase:
+ def __init__(self, resolver, encoder_fn, lock_monitor_cb=None,
+ _req_process_fn=None):
+ """Initializes this class.
"""
- return self._SingleNodeCall(node, "blockdev_export",
- [cf_bdev.ToDict(), dest_node, dest_path,
- cluster_name])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_blockdev_snapshot(self, node, cf_bdev):
- """Request a snapshot of the given block device.
+ proc = _RpcProcessor(resolver,
+ netutils.GetDaemonPort(constants.NODED),
+ lock_monitor_cb=lock_monitor_cb)
+ self._proc = compat.partial(proc, _req_process_fn=_req_process_fn)
+ self._encoder = compat.partial(self._EncodeArg, encoder_fn)
- This is a single-node call.
+ @staticmethod
+ def _EncodeArg(encoder_fn, (argkind, value)):
+ """Encode argument.
"""
- return self._SingleNodeCall(node, "blockdev_snapshot", [cf_bdev.ToDict()])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_finalize_export(self, node, instance, snap_disks):
- """Request the completion of an export operation.
-
- This writes the export config file, etc.
+ if argkind is None:
+ return value
+ else:
+ return encoder_fn(argkind)(value)
- This is a single-node call.
+ def _Call(self, cdef, node_list, args):
+ """Entry point for automatically generated RPC wrappers.
"""
- flat_disks = []
- for disk in snap_disks:
- if isinstance(disk, bool):
- flat_disks.append(disk)
- else:
- flat_disks.append(disk.ToDict())
+ (procedure, _, resolver_opts, timeout, argdefs,
+ prep_fn, postproc_fn, _) = cdef
- return self._SingleNodeCall(node, "finalize_export",
- [self._InstDict(instance), flat_disks])
+ if callable(timeout):
+ read_timeout = timeout(args)
+ else:
+ read_timeout = timeout
- @_RpcTimeout(_TMO_FAST)
- def call_export_info(self, node, path):
- """Queries the export information in a given path.
+ if callable(resolver_opts):
+ req_resolver_opts = resolver_opts(args)
+ else:
+ req_resolver_opts = resolver_opts
- This is a single-node call.
+ if len(args) != len(argdefs):
+ raise errors.ProgrammerError("Number of passed arguments doesn't match")
- """
- return self._SingleNodeCall(node, "export_info", [path])
-
- @_RpcTimeout(_TMO_FAST)
- def call_export_list(self, node_list):
- """Gets the stored exports list.
+ enc_args = map(self._encoder, zip(map(compat.snd, argdefs), args))
+ if prep_fn is None:
+ # for a no-op prep_fn, we serialise the body once, and then we
+ # reuse it in the dictionary values
+ body = serializer.DumpJson(enc_args)
+ pnbody = dict((n, body) for n in node_list)
+ else:
+ # for a custom prep_fn, we pass the encoded arguments and the
+ # node name to the prep_fn, and we serialise its return value
+ assert callable(prep_fn)
+ pnbody = dict((n, serializer.DumpJson(prep_fn(n, enc_args)))
+ for n in node_list)
+
+ result = self._proc(node_list, procedure, pnbody, read_timeout,
+ req_resolver_opts)
+
+ if postproc_fn:
+ return dict(map(lambda (key, value): (key, postproc_fn(value)),
+ result.items()))
+ else:
+ return result
- This is a multi-node call.
- """
- return self._MultiNodeCall(node_list, "export_list", [])
+def _ObjectToDict(value):
+ """Converts an object to a dictionary.
- @_RpcTimeout(_TMO_FAST)
- def call_export_remove(self, node, export):
- """Requests removal of a given export.
+ @note: See L{objects}.
- This is a single-node call.
+ """
+ return value.ToDict()
- """
- return self._SingleNodeCall(node, "export_remove", [export])
- @classmethod
- @_RpcTimeout(_TMO_NORMAL)
- def call_node_leave_cluster(cls, node, modify_ssh_setup):
- """Requests a node to clean the cluster information it has.
+def _ObjectListToDict(value):
+ """Converts a list of L{objects} to dictionaries.
- This will remove the configuration information from the ganeti data
- dir.
+ """
+ return map(_ObjectToDict, value)
- This is a single-node call.
- """
- return cls._StaticSingleNodeCall(node, "node_leave_cluster",
- [modify_ssh_setup])
+def _EncodeNodeToDiskDict(value):
+ """Encodes a dictionary with node name as key and disk objects as values.
- @_RpcTimeout(_TMO_FAST)
- def call_node_volumes(self, node_list):
- """Gets all volumes on node(s).
+ """
+ return dict((name, _ObjectListToDict(disks))
+ for name, disks in value.items())
- This is a multi-node call.
- """
- return self._MultiNodeCall(node_list, "node_volumes", [])
+def _PrepareFileUpload(getents_fn, filename):
+ """Loads a file and prepares it for an upload to nodes.
- @_RpcTimeout(_TMO_FAST)
- def call_node_demote_from_mc(self, node):
- """Demote a node from the master candidate role.
+ """
+ statcb = utils.FileStatHelper()
+ data = _Compress(utils.ReadFile(filename, preread=statcb))
+ st = statcb.st
- This is a single-node call.
+ if getents_fn is None:
+ getents_fn = runtime.GetEnts
- """
- return self._SingleNodeCall(node, "node_demote_from_mc", [])
+ getents = getents_fn()
- @_RpcTimeout(_TMO_NORMAL)
- def call_node_powercycle(self, node, hypervisor):
- """Tries to powercycle a node.
+ return [filename, data, st.st_mode, getents.LookupUid(st.st_uid),
+ getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime]
- This is a single-node call.
- """
- return self._SingleNodeCall(node, "node_powercycle", [hypervisor])
+def _PrepareFinalizeExportDisks(snap_disks):
+ """Encodes disks for finalizing export.
- @_RpcTimeout(None)
- def call_test_delay(self, node_list, duration):
- """Sleep for a fixed time on given node(s).
+ """
+ flat_disks = []
- This is a multi-node call.
+ for disk in snap_disks:
+ if isinstance(disk, bool):
+ flat_disks.append(disk)
+ else:
+ flat_disks.append(disk.ToDict())
- """
- return self._MultiNodeCall(node_list, "test_delay", [duration],
- read_timeout=int(duration + 5))
+ return flat_disks
- @_RpcTimeout(_TMO_FAST)
- def call_file_storage_dir_create(self, node, file_storage_dir):
- """Create the given file storage directory.
- This is a single-node call.
+def _EncodeImportExportIO((ieio, ieioargs)):
+ """Encodes import/export I/O information.
- """
- return self._SingleNodeCall(node, "file_storage_dir_create",
- [file_storage_dir])
+ """
+ if ieio == constants.IEIO_RAW_DISK:
+ assert len(ieioargs) == 1
+ return (ieio, (ieioargs[0].ToDict(), ))
- @_RpcTimeout(_TMO_FAST)
- def call_file_storage_dir_remove(self, node, file_storage_dir):
- """Remove the given file storage directory.
+ if ieio == constants.IEIO_SCRIPT:
+ assert len(ieioargs) == 2
+ return (ieio, (ieioargs[0].ToDict(), ieioargs[1]))
- This is a single-node call.
+ return (ieio, ieioargs)
- """
- return self._SingleNodeCall(node, "file_storage_dir_remove",
- [file_storage_dir])
- @_RpcTimeout(_TMO_FAST)
- def call_file_storage_dir_rename(self, node, old_file_storage_dir,
- new_file_storage_dir):
- """Rename file storage directory.
+def _EncodeBlockdevRename(value):
+ """Encodes information for renaming block devices.
- This is a single-node call.
+ """
+ return [(d.ToDict(), uid) for d, uid in value]
- """
- return self._SingleNodeCall(node, "file_storage_dir_rename",
- [old_file_storage_dir, new_file_storage_dir])
- @classmethod
- @_RpcTimeout(_TMO_URGENT)
- def call_jobqueue_update(cls, node_list, address_list, file_name, content):
- """Update job queue.
+#: Generic encoders
+_ENCODERS = {
+ rpc_defs.ED_OBJECT_DICT: _ObjectToDict,
+ rpc_defs.ED_OBJECT_DICT_LIST: _ObjectListToDict,
+ rpc_defs.ED_NODE_TO_DISK_DICT: _EncodeNodeToDiskDict,
+ rpc_defs.ED_COMPRESS: _Compress,
+ rpc_defs.ED_FINALIZE_EXPORT_DISKS: _PrepareFinalizeExportDisks,
+ rpc_defs.ED_IMPEXP_IO: _EncodeImportExportIO,
+ rpc_defs.ED_BLOCKDEV_RENAME: _EncodeBlockdevRename,
+ }
- This is a multi-node call.
- """
- return cls._StaticMultiNodeCall(node_list, "jobqueue_update",
- [file_name, cls._Compress(content)],
- address_list=address_list)
+class RpcRunner(_RpcClientBase,
+ _generated_rpc.RpcClientDefault,
+ _generated_rpc.RpcClientBootstrap,
+ _generated_rpc.RpcClientConfig):
+ """RPC runner class.
- @classmethod
- @_RpcTimeout(_TMO_NORMAL)
- def call_jobqueue_purge(cls, node):
- """Purge job queue.
+ """
+ def __init__(self, cfg, lock_monitor_cb, _req_process_fn=None, _getents=None):
+ """Initialized the RPC runner.
- This is a single-node call.
+ @type cfg: L{config.ConfigWriter}
+ @param cfg: Configuration
+ @type lock_monitor_cb: callable
+ @param lock_monitor_cb: Lock monitor callback
"""
- return cls._StaticSingleNodeCall(node, "jobqueue_purge", [])
-
- @classmethod
- @_RpcTimeout(_TMO_URGENT)
- def call_jobqueue_rename(cls, node_list, address_list, rename):
- """Rename a job queue file.
+ self._cfg = cfg
- This is a multi-node call.
+ encoders = _ENCODERS.copy()
+
+ encoders.update({
+ # Encoders requiring configuration object
+ rpc_defs.ED_INST_DICT: self._InstDict,
+ rpc_defs.ED_INST_DICT_HVP_BEP: self._InstDictHvpBep,
+ rpc_defs.ED_INST_DICT_OSP: self._InstDictOsp,
+
+ # Encoders with special requirements
+ rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),
+ })
+
+ # Resolver using configuration
+ resolver = compat.partial(_NodeConfigResolver, cfg.GetNodeInfo,
+ cfg.GetAllNodesInfo)
+
+ # Pylint doesn't recognize multiple inheritance properly, see
+ # <http://www.logilab.org/ticket/36586> and
+ # <http://www.logilab.org/ticket/35642>
+ # pylint: disable=W0233
+ _RpcClientBase.__init__(self, resolver, encoders.get,
+ lock_monitor_cb=lock_monitor_cb,
+ _req_process_fn=_req_process_fn)
+ _generated_rpc.RpcClientConfig.__init__(self)
+ _generated_rpc.RpcClientBootstrap.__init__(self)
+ _generated_rpc.RpcClientDefault.__init__(self)
- """
- return cls._StaticMultiNodeCall(node_list, "jobqueue_rename", rename,
- address_list=address_list)
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_hypervisor_validate_params(self, node_list, hvname, hvparams):
- """Validate the hypervisor params.
+ def _InstDict(self, instance, hvp=None, bep=None, osp=None):
+ """Convert the given instance to a dict.
- This is a multi-node call.
+ This is done via the instance's ToDict() method and additionally
+ we fill the hvparams with the cluster defaults.
- @type node_list: list
- @param node_list: the list of nodes to query
- @type hvname: string
- @param hvname: the hypervisor name
- @type hvparams: dict
- @param hvparams: the hypervisor parameters to be validated
+ @type instance: L{objects.Instance}
+ @param instance: an Instance object
+ @type hvp: dict or None
+ @param hvp: a dictionary with overridden hypervisor parameters
+ @type bep: dict or None
+ @param bep: a dictionary with overridden backend parameters
+ @type osp: dict or None
+ @param osp: a dictionary with overridden os parameters
+ @rtype: dict
+ @return: the instance dict, with the hvparams filled with the
+ cluster defaults
"""
+ idict = instance.ToDict()
cluster = self._cfg.GetClusterInfo()
- hv_full = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
- return self._MultiNodeCall(node_list, "hypervisor_validate_params",
- [hvname, hv_full])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_x509_cert_create(self, node, validity):
- """Creates a new X509 certificate for SSL/TLS.
-
- This is a single-node call.
+ idict["hvparams"] = cluster.FillHV(instance)
+ if hvp is not None:
+ idict["hvparams"].update(hvp)
+ idict["beparams"] = cluster.FillBE(instance)
+ if bep is not None:
+ idict["beparams"].update(bep)
+ idict["osparams"] = cluster.SimpleFillOS(instance.os, instance.osparams)
+ if osp is not None:
+ idict["osparams"].update(osp)
+ for nic in idict["nics"]:
+ nic['nicparams'] = objects.FillDict(
+ cluster.nicparams[constants.PP_DEFAULT],
+ nic['nicparams'])
+ return idict
- @type validity: int
- @param validity: Validity in seconds
+ def _InstDictHvpBep(self, (instance, hvp, bep)):
+ """Wrapper for L{_InstDict}.
"""
- return self._SingleNodeCall(node, "x509_cert_create", [validity])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_x509_cert_remove(self, node, name):
- """Removes a X509 certificate.
-
- This is a single-node call.
+ return self._InstDict(instance, hvp=hvp, bep=bep)
- @type name: string
- @param name: Certificate name
+ def _InstDictOsp(self, (instance, osparams)):
+ """Wrapper for L{_InstDict}.
"""
- return self._SingleNodeCall(node, "x509_cert_remove", [name])
+ return self._InstDict(instance, osp=osparams)
- @_RpcTimeout(_TMO_NORMAL)
- def call_import_start(self, node, opts, instance, component,
- dest, dest_args):
- """Starts a listener for an import.
- This is a single-node call.
+class JobQueueRunner(_RpcClientBase, _generated_rpc.RpcClientJobQueue):
+ """RPC wrappers for job queue.
- @type node: string
- @param node: Node name
- @type instance: C{objects.Instance}
- @param instance: Instance object
- @type component: string
- @param component: which part of the instance is being imported
+ """
+ def __init__(self, context, address_list):
+ """Initializes this class.
"""
- return self._SingleNodeCall(node, "import_start",
- [opts.ToDict(),
- self._InstDict(instance), component, dest,
- _EncodeImportExportIO(dest, dest_args)])
+ if address_list is None:
+ resolver = _SsconfResolver
+ else:
+ # Caller provided an address list
+ resolver = _StaticResolver(address_list)
- @_RpcTimeout(_TMO_NORMAL)
- def call_export_start(self, node, opts, host, port,
- instance, component, source, source_args):
- """Starts an export daemon.
+ _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
+ lock_monitor_cb=context.glm.AddToLockMonitor)
+ _generated_rpc.RpcClientJobQueue.__init__(self)
- This is a single-node call.
- @type node: string
- @param node: Node name
- @type instance: C{objects.Instance}
- @param instance: Instance object
- @type component: string
- @param component: which part of the instance is being imported
+class BootstrapRunner(_RpcClientBase, _generated_rpc.RpcClientBootstrap):
+ """RPC wrappers for bootstrapping.
- """
- return self._SingleNodeCall(node, "export_start",
- [opts.ToDict(), host, port,
- self._InstDict(instance),
- component, source,
- _EncodeImportExportIO(source, source_args)])
-
- @_RpcTimeout(_TMO_FAST)
- def call_impexp_status(self, node, names):
- """Gets the status of an import or export.
-
- This is a single-node call.
-
- @type node: string
- @param node: Node name
- @type names: List of strings
- @param names: Import/export names
- @rtype: List of L{objects.ImportExportStatus} instances
- @return: Returns a list of the state of each named import/export or None if
- a status couldn't be retrieved
+ """
+ def __init__(self):
+ """Initializes this class.
"""
- result = self._SingleNodeCall(node, "impexp_status", [names])
-
- if not result.fail_msg:
- decoded = []
-
- for i in result.payload:
- if i is None:
- decoded.append(None)
- continue
- decoded.append(objects.ImportExportStatus.FromDict(i))
+ _RpcClientBase.__init__(self, _SsconfResolver, _ENCODERS.get)
+ _generated_rpc.RpcClientBootstrap.__init__(self)
- result.payload = decoded
- return result
+class ConfigRunner(_RpcClientBase, _generated_rpc.RpcClientConfig):
+ """RPC wrappers for L{config}.
- @_RpcTimeout(_TMO_NORMAL)
- def call_impexp_abort(self, node, name):
- """Aborts an import or export.
-
- This is a single-node call.
-
- @type node: string
- @param node: Node name
- @type name: string
- @param name: Import/export name
+ """
+ def __init__(self, context, address_list):
+ """Initializes this class.
"""
- return self._SingleNodeCall(node, "impexp_abort", [name])
-
- @_RpcTimeout(_TMO_NORMAL)
- def call_impexp_cleanup(self, node, name):
- """Cleans up after an import or export.
-
- This is a single-node call.
+ if context:
+ lock_monitor_cb = context.glm.AddToLockMonitor
+ else:
+ lock_monitor_cb = None
- @type node: string
- @param node: Node name
- @type name: string
- @param name: Import/export name
+ if address_list is None:
+ resolver = _SsconfResolver
+ else:
+ # Caller provided an address list
+ resolver = _StaticResolver(address_list)
- """
- return self._SingleNodeCall(node, "impexp_cleanup", [name])
+ _RpcClientBase.__init__(self, resolver, _ENCODERS.get,
+ lock_monitor_cb=lock_monitor_cb)
+ _generated_rpc.RpcClientConfig.__init__(self)
--- /dev/null
+#
+#
+
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+"""RPC definitions for communication between master and node daemons.
+
+RPC definition fields:
+
+ - Name as string
+ - L{SINGLE} for single-node calls, L{MULTI} for multi-node
+ - Name resolver option(s), can be callable receiving all arguments in a tuple
+ - Timeout (e.g. L{TMO_NORMAL}), or callback receiving all arguments in a
+ tuple to calculate timeout
+ - List of arguments as tuples
+
+ - Name as string
+ - Argument kind used for encoding/decoding
+ - Description for docstring (can be C{None})
+
+ - Custom body encoder (e.g. for preparing per-node bodies)
+ - Return value wrapper (e.g. for deserializing into L{objects}-based objects)
+ - Short call description for docstring
+
+"""
+
+from ganeti import utils
+from ganeti import objects
+
+
+# Guidelines for choosing timeouts:
+# - call used during watcher: timeout of 1min, _TMO_URGENT
+# - trivial (but be sure it is trivial) (e.g. reading a file): 5min, _TMO_FAST
+# - other calls: 15 min, _TMO_NORMAL
+# - special calls (instance add, etc.): either _TMO_SLOW (1h) or huge timeouts
+TMO_URGENT = 60 # one minute
+TMO_FAST = 5 * 60 # five minutes
+TMO_NORMAL = 15 * 60 # 15 minutes
+TMO_SLOW = 3600 # one hour
+TMO_4HRS = 4 * 3600
+TMO_1DAY = 86400
+
+SINGLE = "single-node"
+MULTI = "multi-node"
+
+ACCEPT_OFFLINE_NODE = object()
+
+# Constants for encoding/decoding
+(ED_OBJECT_DICT,
+ ED_OBJECT_DICT_LIST,
+ ED_INST_DICT,
+ ED_INST_DICT_HVP_BEP,
+ ED_NODE_TO_DISK_DICT,
+ ED_INST_DICT_OSP,
+ ED_IMPEXP_IO,
+ ED_FILE_DETAILS,
+ ED_FINALIZE_EXPORT_DISKS,
+ ED_COMPRESS,
+ ED_BLOCKDEV_RENAME) = range(1, 12)
+
+
+def _Prepare(calls):
+ """Converts list of calls to dictionary.
+
+ """
+ return utils.SequenceToDict(calls)
+
+
+def _MigrationStatusPostProc(result):
+ """Post-processor for L{rpc.RpcRunner.call_instance_get_migration_status}.
+
+ """
+ if not result.fail_msg and result.payload is not None:
+ result.payload = objects.MigrationStatus.FromDict(result.payload)
+ return result
+
+
+def _BlockdevFindPostProc(result):
+ """Post-processor for L{rpc.RpcRunner.call_blockdev_find}.
+
+ """
+ if not result.fail_msg and result.payload is not None:
+ result.payload = objects.BlockDevStatus.FromDict(result.payload)
+ return result
+
+
+def _BlockdevGetMirrorStatusPostProc(result):
+ """Post-processor for L{rpc.RpcRunner.call_blockdev_getmirrorstatus}.
+
+ """
+ if not result.fail_msg:
+ result.payload = map(objects.BlockDevStatus.FromDict, result.payload)
+ return result
+
+
+def _BlockdevGetMirrorStatusMultiPreProc(node, args):
+ """Prepares the appropriate node values for blockdev_getmirrorstatus_multi.
+
+ """
+ # there should be only one argument to this RPC, already holding a
+ # node->disks dictionary, we just need to extract the value for the
+ # current node
+ assert len(args) == 1
+ return [args[0][node]]
+
+
+def _BlockdevGetMirrorStatusMultiPostProc(result):
+ """Post-processor for L{rpc.RpcRunner.call_blockdev_getmirrorstatus_multi}.
+
+ """
+ if not result.fail_msg:
+ for idx, (success, status) in enumerate(result.payload):
+ if success:
+ result.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
+
+ return result
+
+
+def _OsGetPostProc(result):
+ """Post-processor for L{rpc.RpcRunner.call_os_get}.
+
+ """
+ if not result.fail_msg and isinstance(result.payload, dict):
+ result.payload = objects.OS.FromDict(result.payload)
+ return result
+
+
+def _ImpExpStatusPostProc(result):
+ """Post-processor for import/export status.
+
+ @rtype: Payload containing list of L{objects.ImportExportStatus} instances
+ @return: Returns a list of the state of each named import/export or None if
+ a status couldn't be retrieved
+
+ """
+ if not result.fail_msg:
+ decoded = []
+
+ for i in result.payload:
+ if i is None:
+ decoded.append(None)
+ continue
+ decoded.append(objects.ImportExportStatus.FromDict(i))
+
+ result.payload = decoded
+
+ return result
+
+
+def _TestDelayTimeout((duration, )):
+ """Calculate timeout for "test_delay" RPC.
+
+ """
+ return int(duration + 5)
+
+
+_FILE_STORAGE_CALLS = [
+ ("file_storage_dir_create", SINGLE, None, TMO_FAST, [
+ ("file_storage_dir", None, "File storage directory"),
+ ], None, None, "Create the given file storage directory"),
+ ("file_storage_dir_remove", SINGLE, None, TMO_FAST, [
+ ("file_storage_dir", None, "File storage directory"),
+ ], None, None, "Remove the given file storage directory"),
+ ("file_storage_dir_rename", SINGLE, None, TMO_FAST, [
+ ("old_file_storage_dir", None, "Old name"),
+ ("new_file_storage_dir", None, "New name"),
+ ], None, None, "Rename file storage directory"),
+ ]
+
+_STORAGE_CALLS = [
+ ("storage_list", MULTI, None, TMO_NORMAL, [
+ ("su_name", None, None),
+ ("su_args", None, None),
+ ("name", None, None),
+ ("fields", None, None),
+ ], None, None, "Get list of storage units"),
+ ("storage_modify", SINGLE, None, TMO_NORMAL, [
+ ("su_name", None, None),
+ ("su_args", None, None),
+ ("name", None, None),
+ ("changes", None, None),
+ ], None, None, "Modify a storage unit"),
+ ("storage_execute", SINGLE, None, TMO_NORMAL, [
+ ("su_name", None, None),
+ ("su_args", None, None),
+ ("name", None, None),
+ ("op", None, None),
+ ], None, None, "Executes an operation on a storage unit"),
+ ]
+
+_INSTANCE_CALLS = [
+ ("instance_info", SINGLE, None, TMO_URGENT, [
+ ("instance", None, "Instance name"),
+ ("hname", None, "Hypervisor type"),
+ ], None, None, "Returns information about a single instance"),
+ ("all_instances_info", MULTI, None, TMO_URGENT, [
+ ("hypervisor_list", None, "Hypervisors to query for instances"),
+ ], None, None,
+ "Returns information about all instances on the given nodes"),
+ ("instance_list", MULTI, None, TMO_URGENT, [
+ ("hypervisor_list", None, "Hypervisors to query for instances"),
+ ], None, None, "Returns the list of running instances on the given nodes"),
+ ("instance_reboot", SINGLE, None, TMO_NORMAL, [
+ ("inst", ED_INST_DICT, "Instance object"),
+ ("reboot_type", None, None),
+ ("shutdown_timeout", None, None),
+ ], None, None, "Returns the list of running instances on the given nodes"),
+ ("instance_shutdown", SINGLE, None, TMO_NORMAL, [
+ ("instance", ED_INST_DICT, "Instance object"),
+ ("timeout", None, None),
+ ], None, None, "Stops an instance"),
+ ("instance_run_rename", SINGLE, None, TMO_SLOW, [
+ ("instance", ED_INST_DICT, "Instance object"),
+ ("old_name", None, None),
+ ("debug", None, None),
+ ], None, None, "Run the OS rename script for an instance"),
+ ("instance_migratable", SINGLE, None, TMO_NORMAL, [
+ ("instance", ED_INST_DICT, "Instance object"),
+ ], None, None, "Checks whether the given instance can be migrated"),
+ ("migration_info", SINGLE, None, TMO_NORMAL, [
+ ("instance", ED_INST_DICT, "Instance object"),
+ ], None, None,
+ "Gather the information necessary to prepare an instance migration"),
+ ("accept_instance", SINGLE, None, TMO_NORMAL, [
+ ("instance", ED_INST_DICT, "Instance object"),
+ ("info", None, "Result for the call_migration_info call"),
+ ("target", None, "Target hostname (usually an IP address)"),
+ ], None, None, "Prepare a node to accept an instance"),
+ ("instance_finalize_migration_dst", SINGLE, None, TMO_NORMAL, [
+ ("instance", ED_INST_DICT, "Instance object"),
+ ("info", None, "Result for the call_migration_info call"),
+ ("success", None, "Whether the migration was a success or failure"),
+ ], None, None, "Finalize any target-node migration specific operation"),
+ ("instance_migrate", SINGLE, None, TMO_SLOW, [
+ ("instance", ED_INST_DICT, "Instance object"),
+ ("target", None, "Target node name"),
+ ("live", None, "Whether the migration should be done live or not"),
+ ], None, None, "Migrate an instance"),
+ ("instance_finalize_migration_src", SINGLE, None, TMO_SLOW, [
+ ("instance", ED_INST_DICT, "Instance object"),
+ ("success", None, "Whether the migration succeeded or not"),
+ ("live", None, "Whether the user requested a live migration or not"),
+ ], None, None, "Finalize the instance migration on the source node"),
+ ("instance_get_migration_status", SINGLE, None, TMO_SLOW, [
+ ("instance", ED_INST_DICT, "Instance object"),
+ ], None, _MigrationStatusPostProc, "Report migration status"),
+ ("instance_start", SINGLE, None, TMO_NORMAL, [
+ ("instance_hvp_bep", ED_INST_DICT_HVP_BEP, None),
+ ("startup_paused", None, None),
+ ], None, None, "Starts an instance"),
+ ("instance_os_add", SINGLE, None, TMO_1DAY, [
+ ("instance_osp", ED_INST_DICT_OSP, None),
+ ("reinstall", None, None),
+ ("debug", None, None),
+ ], None, None, "Starts an instance"),
+ ]
+
+_IMPEXP_CALLS = [
+ ("import_start", SINGLE, None, TMO_NORMAL, [
+ ("opts", ED_OBJECT_DICT, None),
+ ("instance", ED_INST_DICT, None),
+ ("component", None, None),
+ ("dest", ED_IMPEXP_IO, "Import destination"),
+ ], None, None, "Starts an import daemon"),
+ ("export_start", SINGLE, None, TMO_NORMAL, [
+ ("opts", ED_OBJECT_DICT, None),
+ ("host", None, None),
+ ("port", None, None),
+ ("instance", ED_INST_DICT, None),
+ ("component", None, None),
+ ("source", ED_IMPEXP_IO, "Export source"),
+ ], None, None, "Starts an export daemon"),
+ ("impexp_status", SINGLE, None, TMO_FAST, [
+ ("names", None, "Import/export names"),
+ ], None, _ImpExpStatusPostProc, "Gets the status of an import or export"),
+ ("impexp_abort", SINGLE, None, TMO_NORMAL, [
+ ("name", None, "Import/export name"),
+ ], None, None, "Aborts an import or export"),
+ ("impexp_cleanup", SINGLE, None, TMO_NORMAL, [
+ ("name", None, "Import/export name"),
+ ], None, None, "Cleans up after an import or export"),
+ ("export_info", SINGLE, None, TMO_FAST, [
+ ("path", None, None),
+ ], None, None, "Queries the export information in a given path"),
+ ("finalize_export", SINGLE, None, TMO_NORMAL, [
+ ("instance", ED_INST_DICT, None),
+ ("snap_disks", ED_FINALIZE_EXPORT_DISKS, None),
+ ], None, None, "Request the completion of an export operation"),
+ ("export_list", MULTI, None, TMO_FAST, [], None, None,
+ "Gets the stored exports list"),
+ ("export_remove", SINGLE, None, TMO_FAST, [
+ ("export", None, None),
+ ], None, None, "Requests removal of a given export"),
+ ]
+
+_X509_CALLS = [
+ ("x509_cert_create", SINGLE, None, TMO_NORMAL, [
+ ("validity", None, "Validity in seconds"),
+ ], None, None, "Creates a new X509 certificate for SSL/TLS"),
+ ("x509_cert_remove", SINGLE, None, TMO_NORMAL, [
+ ("name", None, "Certificate name"),
+ ], None, None, "Removes a X509 certificate"),
+ ]
+
+_BLOCKDEV_CALLS = [
+ ("bdev_sizes", MULTI, None, TMO_URGENT, [
+ ("devices", None, None),
+ ], None, None,
+ "Gets the sizes of requested block devices present on a node"),
+ ("blockdev_create", SINGLE, None, TMO_NORMAL, [
+ ("bdev", ED_OBJECT_DICT, None),
+ ("size", None, None),
+ ("owner", None, None),
+ ("on_primary", None, None),
+ ("info", None, None),
+ ], None, None, "Request creation of a given block device"),
+ ("blockdev_wipe", SINGLE, None, TMO_SLOW, [
+ ("bdev", ED_OBJECT_DICT, None),
+ ("offset", None, None),
+ ("size", None, None),
+ ], None, None,
+ "Request wipe at given offset with given size of a block device"),
+ ("blockdev_remove", SINGLE, None, TMO_NORMAL, [
+ ("bdev", ED_OBJECT_DICT, None),
+ ], None, None, "Request removal of a given block device"),
+ ("blockdev_pause_resume_sync", SINGLE, None, TMO_NORMAL, [
+ ("disks", ED_OBJECT_DICT_LIST, None),
+ ("pause", None, None),
+ ], None, None, "Request a pause/resume of given block device"),
+ ("blockdev_assemble", SINGLE, None, TMO_NORMAL, [
+ ("disk", ED_OBJECT_DICT, None),
+ ("owner", None, None),
+ ("on_primary", None, None),
+ ("idx", None, None),
+ ], None, None, "Request assembling of a given block device"),
+ ("blockdev_shutdown", SINGLE, None, TMO_NORMAL, [
+ ("disk", ED_OBJECT_DICT, None),
+ ], None, None, "Request shutdown of a given block device"),
+ ("blockdev_addchildren", SINGLE, None, TMO_NORMAL, [
+ ("bdev", ED_OBJECT_DICT, None),
+ ("ndevs", ED_OBJECT_DICT_LIST, None),
+ ], None, None,
+ "Request adding a list of children to a (mirroring) device"),
+ ("blockdev_removechildren", SINGLE, None, TMO_NORMAL, [
+ ("bdev", ED_OBJECT_DICT, None),
+ ("ndevs", ED_OBJECT_DICT_LIST, None),
+ ], None, None,
+ "Request removing a list of children from a (mirroring) device"),
+ ("blockdev_close", SINGLE, None, TMO_NORMAL, [
+ ("instance_name", None, None),
+ ("disks", ED_OBJECT_DICT_LIST, None),
+ ], None, None, "Closes the given block devices"),
+ ("blockdev_getsize", SINGLE, None, TMO_NORMAL, [
+ ("disks", ED_OBJECT_DICT_LIST, None),
+ ], None, None, "Returns the size of the given disks"),
+ ("drbd_disconnect_net", MULTI, None, TMO_NORMAL, [
+ ("nodes_ip", None, None),
+ ("disks", ED_OBJECT_DICT_LIST, None),
+ ], None, None, "Disconnects the network of the given drbd devices"),
+ ("drbd_attach_net", MULTI, None, TMO_NORMAL, [
+ ("nodes_ip", None, None),
+ ("disks", ED_OBJECT_DICT_LIST, None),
+ ("instance_name", None, None),
+ ("multimaster", None, None),
+ ], None, None, "Connects the given DRBD devices"),
+ ("drbd_wait_sync", MULTI, None, TMO_SLOW, [
+ ("nodes_ip", None, None),
+ ("disks", ED_OBJECT_DICT_LIST, None),
+ ], None, None,
+ "Waits for the synchronization of drbd devices is complete"),
+ ("blockdev_grow", SINGLE, None, TMO_NORMAL, [
+ ("cf_bdev", ED_OBJECT_DICT, None),
+ ("amount", None, None),
+ ("dryrun", None, None),
+ ], None, None, "Request a snapshot of the given block device"),
+ ("blockdev_export", SINGLE, None, TMO_1DAY, [
+ ("cf_bdev", ED_OBJECT_DICT, None),
+ ("dest_node", None, None),
+ ("dest_path", None, None),
+ ("cluster_name", None, None),
+ ], None, None, "Export a given disk to another node"),
+ ("blockdev_snapshot", SINGLE, None, TMO_NORMAL, [
+ ("cf_bdev", ED_OBJECT_DICT, None),
+ ], None, None, "Export a given disk to another node"),
+ ("blockdev_rename", SINGLE, None, TMO_NORMAL, [
+ ("devlist", ED_BLOCKDEV_RENAME, None),
+ ], None, None, "Request rename of the given block devices"),
+ ("blockdev_find", SINGLE, None, TMO_NORMAL, [
+ ("disk", ED_OBJECT_DICT, None),
+ ], None, _BlockdevFindPostProc,
+ "Request identification of a given block device"),
+ ("blockdev_getmirrorstatus", SINGLE, None, TMO_NORMAL, [
+ ("disks", ED_OBJECT_DICT_LIST, None),
+ ], None, _BlockdevGetMirrorStatusPostProc,
+ "Request status of a (mirroring) device"),
+ ("blockdev_getmirrorstatus_multi", MULTI, None, TMO_NORMAL, [
+ ("node_disks", ED_NODE_TO_DISK_DICT, None),
+ ], _BlockdevGetMirrorStatusMultiPreProc,
+ _BlockdevGetMirrorStatusMultiPostProc,
+ "Request status of (mirroring) devices from multiple nodes"),
+ ]
+
+_OS_CALLS = [
+ ("os_diagnose", MULTI, None, TMO_FAST, [], None, None,
+ "Request a diagnose of OS definitions"),
+ ("os_validate", MULTI, None, TMO_FAST, [
+ ("required", None, None),
+ ("name", None, None),
+ ("checks", None, None),
+ ("params", None, None),
+ ], None, None, "Run a validation routine for a given OS"),
+ ("os_get", SINGLE, None, TMO_FAST, [
+ ("name", None, None),
+ ], None, _OsGetPostProc, "Returns an OS definition"),
+ ]
+
+_NODE_CALLS = [
+ ("node_has_ip_address", SINGLE, None, TMO_FAST, [
+ ("address", None, "IP address"),
+ ], None, None, "Checks if a node has the given IP address"),
+ ("node_info", MULTI, None, TMO_URGENT, [
+ ("vg_names", None,
+ "Names of the volume groups to ask for disk space information"),
+ ("hv_names", None,
+ "Names of the hypervisors to ask for node information"),
+ ], None, None, "Return node information"),
+ ("node_verify", MULTI, None, TMO_NORMAL, [
+ ("checkdict", None, None),
+ ("cluster_name", None, None),
+ ], None, None, "Request verification of given parameters"),
+ ("node_volumes", MULTI, None, TMO_FAST, [], None, None,
+ "Gets all volumes on node(s)"),
+ ("node_demote_from_mc", SINGLE, None, TMO_FAST, [], None, None,
+ "Demote a node from the master candidate role"),
+ ("node_powercycle", SINGLE, ACCEPT_OFFLINE_NODE, TMO_NORMAL, [
+ ("hypervisor", None, "Hypervisor type"),
+ ], None, None, "Tries to powercycle a node"),
+ ]
+
+_MISC_CALLS = [
+ ("lv_list", MULTI, None, TMO_URGENT, [
+ ("vg_name", None, None),
+ ], None, None, "Gets the logical volumes present in a given volume group"),
+ ("vg_list", MULTI, None, TMO_URGENT, [], None, None,
+ "Gets the volume group list"),
+ ("bridges_exist", SINGLE, None, TMO_URGENT, [
+ ("bridges_list", None, "Bridges which must be present on remote node"),
+ ], None, None, "Checks if a node has all the bridges given"),
+ ("etc_hosts_modify", SINGLE, None, TMO_NORMAL, [
+ ("mode", None,
+ "Mode to operate; currently L{constants.ETC_HOSTS_ADD} or"
+ " L{constants.ETC_HOSTS_REMOVE}"),
+ ("name", None, "Hostname to be modified"),
+ ("ip", None, "IP address (L{constants.ETC_HOSTS_ADD} only)"),
+ ], None, None, "Modify hosts file with name"),
+ ("drbd_helper", MULTI, None, TMO_URGENT, [], None, None, "Gets DRBD helper"),
+ ("run_oob", SINGLE, None, TMO_NORMAL, [
+ ("oob_program", None, None),
+ ("command", None, None),
+ ("remote_node", None, None),
+ ("timeout", None, None),
+ ], None, None, "Runs out-of-band command"),
+ ("hooks_runner", MULTI, None, TMO_NORMAL, [
+ ("hpath", None, None),
+ ("phase", None, None),
+ ("env", None, None),
+ ], None, None, "Call the hooks runner"),
+ ("iallocator_runner", SINGLE, None, TMO_NORMAL, [
+ ("name", None, "Iallocator name"),
+ ("idata", None, "JSON-encoded input string"),
+ ], None, None, "Call an iallocator on a remote node"),
+ ("test_delay", MULTI, None, _TestDelayTimeout, [
+ ("duration", None, None),
+ ], None, None, "Sleep for a fixed time on given node(s)"),
+ ("hypervisor_validate_params", MULTI, None, TMO_NORMAL, [
+ ("hvname", None, "Hypervisor name"),
+ ("hvfull", None, "Parameters to be validated"),
+ ], None, None, "Validate hypervisor params"),
+ ]
+
+CALLS = {
+ "RpcClientDefault": \
+ _Prepare(_IMPEXP_CALLS + _X509_CALLS + _OS_CALLS + _NODE_CALLS +
+ _FILE_STORAGE_CALLS + _MISC_CALLS + _INSTANCE_CALLS +
+ _BLOCKDEV_CALLS + _STORAGE_CALLS),
+ "RpcClientJobQueue": _Prepare([
+ ("jobqueue_update", MULTI, None, TMO_URGENT, [
+ ("file_name", None, None),
+ ("content", ED_COMPRESS, None),
+ ], None, None, "Update job queue file"),
+ ("jobqueue_purge", SINGLE, None, TMO_NORMAL, [], None, None,
+ "Purge job queue"),
+ ("jobqueue_rename", MULTI, None, TMO_URGENT, [
+ ("rename", None, None),
+ ], None, None, "Rename job queue file"),
+ ]),
+ "RpcClientBootstrap": _Prepare([
+ ("node_start_master_daemons", SINGLE, None, TMO_FAST, [
+ ("no_voting", None, None),
+ ], None, None, "Starts master daemons on a node"),
+ ("node_activate_master_ip", SINGLE, None, TMO_FAST, [
+ ("master_params", ED_OBJECT_DICT, "Network parameters of the master"),
+ ("use_external_mip_script", None,
+ "Whether to use the user-provided master IP address setup script"),
+ ], None, None,
+ "Activates master IP on a node"),
+ ("node_stop_master", SINGLE, None, TMO_FAST, [], None, None,
+ "Deactivates master IP and stops master daemons on a node"),
+ ("node_deactivate_master_ip", SINGLE, None, TMO_FAST, [
+ ("master_params", ED_OBJECT_DICT, "Network parameters of the master"),
+ ("use_external_mip_script", None,
+ "Whether to use the user-provided master IP address setup script"),
+ ], None, None,
+ "Deactivates master IP on a node"),
+ ("node_change_master_netmask", SINGLE, None, TMO_FAST, [
+ ("old_netmask", None, "The old value of the netmask"),
+ ("netmask", None, "The new value of the netmask"),
+ ("master_ip", None, "The master IP"),
+ ("master_netdev", None, "The master network device"),
+ ], None, None, "Change master IP netmask"),
+ ("node_leave_cluster", SINGLE, None, TMO_NORMAL, [
+ ("modify_ssh_setup", None, None),
+ ], None, None,
+ "Requests a node to clean the cluster information it has"),
+ ("master_info", MULTI, None, TMO_URGENT, [], None, None,
+ "Query master info"),
+ ("version", MULTI, None, TMO_URGENT, [], None, None, "Query node version"),
+ ]),
+ "RpcClientConfig": _Prepare([
+ ("upload_file", MULTI, None, TMO_NORMAL, [
+ ("file_name", ED_FILE_DETAILS, None),
+ ], None, None, "Upload a file"),
+ ("write_ssconf_files", MULTI, None, TMO_NORMAL, [
+ ("values", None, None),
+ ], None, None, "Write ssconf files"),
+ ]),
+ }
@ivar rapi_uid: The resolved uid of the rapi user
@ivar rapi_gid: The resolved gid of the rapi group
@ivar noded_uid: The resolved uid of the noded user
-
@ivar daemons_gid: The resolved gid of the daemons group
@ivar admin_gid: The resolved gid of the admin group
+
"""
def __init__(self, _getpwnam=pwd.getpwnam, _getgrnam=grp.getgrnam):
"""Initialize the resolver.
# C0103: Invalid name, since pylint doesn't see that Dump points to a
# function and not a constant
-import simplejson
import re
+# Python 2.6 and above contain a JSON module based on simplejson. Unfortunately
+# the standard library version is significantly slower than the external
+# module. While it should be better from at least Python 3.2 on (see Python
+# issue 7451), for now Ganeti needs to work well with older Python versions
+# too.
+import simplejson
+
from ganeti import errors
from ganeti import utils
-_JSON_INDENT = 2
-
_RE_EOLSP = re.compile("[ \t]+$", re.MULTILINE)
-def _GetJsonDumpers(_encoder_class=simplejson.JSONEncoder):
- """Returns two JSON functions to serialize data.
-
- @rtype: (callable, callable)
- @return: The function to generate a compact form of JSON and another one to
- generate a more readable, indented form of JSON (if supported)
-
- """
- plain_encoder = _encoder_class(sort_keys=True)
-
- # Check whether the simplejson module supports indentation
- try:
- indent_encoder = _encoder_class(indent=_JSON_INDENT, sort_keys=True)
- except TypeError:
- # Indentation not supported
- indent_encoder = plain_encoder
-
- return (plain_encoder.encode, indent_encoder.encode)
-
-
-(_DumpJson, _DumpJsonIndent) = _GetJsonDumpers()
-
-
-def DumpJson(data, indent=True):
+def DumpJson(data):
"""Serialize a given object.
@param data: the data to serialize
- @param indent: whether to indent output (depends on simplejson version)
-
@return: the string representation of data
"""
- if indent:
- fn = _DumpJsonIndent
- else:
- fn = _DumpJson
+ encoded = simplejson.dumps(data)
- txt = _RE_EOLSP.sub("", fn(data))
+ txt = _RE_EOLSP.sub("", encoded)
if not txt.endswith("\n"):
txt += "\n"
@return: the string representation of data signed by the hmac key
"""
- txt = DumpJson(data, indent=False)
+ txt = DumpJson(data)
if salt is None:
salt = ""
signed_dict = {
signed_dict["hmac"] = utils.Sha1Hmac(key, txt, salt=salt + key_selector)
- return DumpJson(signed_dict, indent=False)
+ return DumpJson(signed_dict)
def LoadSignedJson(txt, key):
self.server.request_workers.AddTask((self.server, message, self))
+class _MasterShutdownCheck:
+ """Logic for master daemon shutdown.
+
+ """
+ #: How long to wait between checks
+ _CHECK_INTERVAL = 5.0
+
+ #: How long to wait after all jobs are done (e.g. to give clients time to
+ #: retrieve the job status)
+ _SHUTDOWN_LINGER = 5.0
+
+ def __init__(self):
+ """Initializes this class.
+
+ """
+ self._had_active_jobs = None
+ self._linger_timeout = None
+
+ def __call__(self, jq_prepare_result):
+ """Determines if master daemon is ready for shutdown.
+
+ @param jq_prepare_result: Result of L{jqueue.JobQueue.PrepareShutdown}
+ @rtype: None or number
+ @return: None if master daemon is ready, timeout if the check must be
+ repeated
+
+ """
+ if jq_prepare_result:
+ # Check again shortly
+ logging.info("Job queue has been notified for shutdown but is still"
+ " busy; next check in %s seconds", self._CHECK_INTERVAL)
+ self._had_active_jobs = True
+ return self._CHECK_INTERVAL
+
+ if not self._had_active_jobs:
+ # Can shut down as there were no active jobs on the first check
+ return None
+
+ # No jobs are running anymore, but maybe some clients want to collect some
+ # information. Give them a short amount of time.
+ if self._linger_timeout is None:
+ self._linger_timeout = utils.RunningTimeout(self._SHUTDOWN_LINGER, True)
+
+ remaining = self._linger_timeout.Remaining()
+
+ logging.info("Job queue no longer busy; shutting down master daemon"
+ " in %s seconds", remaining)
+
+ # TODO: Should the master daemon socket be closed at this point? Doing so
+ # wouldn't affect existing connections.
+
+ if remaining < 0:
+ return None
+ else:
+ return remaining
+
+
class MasterServer(daemon.AsyncStreamServer):
"""Master Server.
"""
family = socket.AF_UNIX
- def __init__(self, mainloop, address, uid, gid):
+ def __init__(self, address, uid, gid):
"""MasterServer constructor
- @type mainloop: ganeti.daemon.Mainloop
- @param mainloop: Mainloop used to poll for I/O events
@param address: the unix socket address to bind the MasterServer to
@param uid: The uid of the owner of the socket
@param gid: The gid of the owner of the socket
os.chown(temp_name, uid, gid)
os.rename(temp_name, address)
- self.mainloop = mainloop
self.awaker = daemon.AsyncAwaker()
# We'll only start threads once we've forked.
self.context = None
self.request_workers = None
+ self._shutdown_check = None
+
def handle_connection(self, connected_socket, client_address):
# TODO: add connection count and limit the number of open connections to a
# maximum number to avoid breaking for lack of file descriptors or memory.
CLIENT_REQUEST_WORKERS,
ClientRequestWorker)
+ def WaitForShutdown(self):
+ """Prepares server for shutdown.
+
+ """
+ if self._shutdown_check is None:
+ self._shutdown_check = _MasterShutdownCheck()
+
+ return self._shutdown_check(self.context.jobqueue.PrepareShutdown())
+
def server_cleanup(self):
"""Cleanup the server.
queue = self.server.context.jobqueue
# TODO: Parameter validation
+ if not isinstance(args, (tuple, list)):
+ logging.info("Received invalid arguments of type '%s'", type(args))
+ raise ValueError("Invalid arguments type '%s'" % type(args))
# TODO: Rewrite to not exit in each 'if/elif' branch
return queue.SubmitManyJobs(jobs)
elif method == luxi.REQ_CANCEL_JOB:
- job_id = args
+ (job_id, ) = args
logging.info("Received job cancel request for %s", job_id)
return queue.CancelJob(job_id)
elif method == luxi.REQ_ARCHIVE_JOB:
- job_id = args
+ (job_id, ) = args
logging.info("Received job archive request for %s", job_id)
return queue.ArchiveJob(job_id)
prev_log_serial, timeout)
elif method == luxi.REQ_QUERY:
- req = objects.QueryRequest.FromDict(args)
+ (what, fields, qfilter) = args
+ req = objects.QueryRequest(what=what, fields=fields, qfilter=qfilter)
if req.what in constants.QR_VIA_OP:
result = self._Query(opcodes.OpQuery(what=req.what, fields=req.fields,
- filter=req.filter))
+ qfilter=req.qfilter))
elif req.what == constants.QR_LOCK:
- if req.filter is not None:
+ if req.qfilter is not None:
raise errors.OpPrereqError("Lock queries can't be filtered")
return self.server.context.glm.QueryLocks(req.fields)
elif req.what in constants.QR_VIA_LUXI:
return result
elif method == luxi.REQ_QUERY_FIELDS:
- req = objects.QueryFieldsRequest.FromDict(args)
+ (what, fields) = args
+ req = objects.QueryFieldsRequest(what=what, fields=fields)
try:
fielddefs = query.ALL_FIELDS[req.what]
return self._Query(op)
elif method == luxi.REQ_QUERY_EXPORTS:
- nodes, use_locking = args
+ (nodes, use_locking) = args
if use_locking:
raise errors.OpPrereqError("Sync queries are not allowed",
errors.ECODE_INVAL)
return self._Query(op)
elif method == luxi.REQ_QUERY_CONFIG_VALUES:
- fields = args
+ (fields, ) = args
logging.info("Received config values query request for %s", fields)
op = opcodes.OpClusterConfigQuery(output_fields=fields)
return self._Query(op)
return self._Query(op)
elif method == luxi.REQ_QUERY_TAGS:
- kind, name = args
+ (kind, name) = args
logging.info("Received tags query request")
op = opcodes.OpTagsGet(kind=kind, name=name)
return self._Query(op)
return self.server.context.glm.OldStyleQueryLocks(fields)
elif method == luxi.REQ_QUEUE_SET_DRAIN_FLAG:
- drain_flag = args
+ (drain_flag, ) = args
logging.info("Received queue drain flag change request to %s",
drain_flag)
return queue.SetDrainFlag(drain_flag)
self.cfg.GetNodeGroupList(),
self.cfg.GetInstanceList())
+ self.cfg.SetContext(self)
+
+ # RPC runner
+ self.rpc = rpc.RpcRunner(self.cfg, self.glm.AddToLockMonitor)
+
# Job queue
self.jobqueue = jqueue.JobQueue(self)
# Add the new node to the Ganeti Lock Manager
self.glm.add(locking.LEVEL_NODE, node.name)
+ self.glm.add(locking.LEVEL_NODE_RES, node.name)
def ReaddNode(self, node):
"""Updates a node that's already in the configuration
# Remove the node from the Ganeti Lock Manager
self.glm.remove(locking.LEVEL_NODE, name)
+ self.glm.remove(locking.LEVEL_NODE_RES, name)
def _SetWatcherPause(until):
@rpc.RunWithRPC
def ActivateMasterIP():
# activate ip
- master_node = ssconf.SimpleStore().GetMasterNode()
- result = rpc.RpcRunner.call_node_activate_master_ip(master_node)
+ cfg = config.ConfigWriter()
+ master_params = cfg.GetMasterNetworkParameters()
+ ems = cfg.GetUseExternalMipScript()
+ runner = rpc.BootstrapRunner()
+ result = runner.call_node_activate_master_ip(master_params.name,
+ master_params, ems)
+
msg = result.fail_msg
if msg:
logging.error("Can't activate master IP address: %s", msg)
utils.RemoveFile(constants.MASTER_SOCKET)
mainloop = daemon.Mainloop()
- master = MasterServer(mainloop, constants.MASTER_SOCKET,
- options.uid, options.gid)
+ master = MasterServer(constants.MASTER_SOCKET, options.uid, options.gid)
return (mainloop, master)
try:
master.setup_queue()
try:
- mainloop.Run()
+ mainloop.Run(shutdown_wait_fn=master.WaitForShutdown)
finally:
master.server_cleanup()
finally:
finally:
utils.RemoveFile(constants.MASTER_SOCKET)
+ logging.info("Clean master daemon shutdown")
+
def Main():
"""Main function"""
logging.exception("Error in RPC call")
result = (False, "Error while executing backend function: %s" % str(err))
- return serializer.DumpJson(result, indent=False)
+ return serializer.DumpJson(result)
# the new block devices --------------------------
"""Remove a block device.
"""
- devlist = [(objects.Disk.FromDict(ds), uid) for ds, uid in params]
+ devlist = [(objects.Disk.FromDict(ds), uid) for ds, uid in params[0]]
return backend.BlockdevRename(devlist)
@staticmethod
"""
disks = [objects.Disk.FromDict(dsk_s)
- for dsk_s in params]
+ for dsk_s in params[0]]
return [status.ToDict()
for status in backend.BlockdevGetmirrorstatus(disks)]
"""
(node_disks, ) = params
- node_name = netutils.Hostname.GetSysName()
-
- disks = [objects.Disk.FromDict(dsk_s)
- for dsk_s in node_disks.get(node_name, [])]
+ disks = [objects.Disk.FromDict(dsk_s) for dsk_s in node_disks]
result = []
return backend.AcceptInstance(instance, info, target)
@staticmethod
- def perspective_finalize_migration(params):
- """Finalize the instance migration.
+ def perspective_instance_finalize_migration_dst(params):
+ """Finalize the instance migration on the destination node.
"""
instance, info, success = params
instance = objects.Instance.FromDict(instance)
- return backend.FinalizeMigration(instance, info, success)
+ return backend.FinalizeMigrationDst(instance, info, success)
@staticmethod
def perspective_instance_migrate(params):
return backend.MigrateInstance(instance, target, live)
@staticmethod
+ def perspective_instance_finalize_migration_src(params):
+ """Finalize the instance migration on the source node.
+
+ """
+ instance, success, live = params
+ instance = objects.Instance.FromDict(instance)
+ return backend.FinalizeMigrationSource(instance, success, live)
+
+ @staticmethod
+ def perspective_instance_get_migration_status(params):
+ """Reports migration status.
+
+ """
+ instance = objects.Instance.FromDict(params[0])
+ return backend.GetMigrationStatus(instance).ToDict()
+
+ @staticmethod
def perspective_instance_reboot(params):
"""Reboot an instance.
# node --------------------------
@staticmethod
- def perspective_node_tcp_ping(params):
- """Do a TcpPing on the remote node.
-
- """
- return netutils.TcpPing(params[1], params[2], timeout=params[3],
- live_port_needed=params[4], source=params[0])
-
- @staticmethod
def perspective_node_has_ip_address(params):
"""Checks if a node has the given ip address.
"""Query node information.
"""
- vgname, hypervisor_type = params
- return backend.GetNodeInfo(vgname, hypervisor_type)
+ (vg_names, hv_names) = params
+ return backend.GetNodeInfo(vg_names, hv_names)
@staticmethod
def perspective_etc_hosts_modify(params):
"""Activate the master IP on this node.
"""
- return backend.ActivateMasterIp()
+ master_params = objects.MasterNetworkParameters.FromDict(params[0])
+ return backend.ActivateMasterIp(master_params, params[1])
@staticmethod
def perspective_node_deactivate_master_ip(params):
"""Deactivate the master IP on this node.
"""
- return backend.DeactivateMasterIp()
+ master_params = objects.MasterNetworkParameters.FromDict(params[0])
+ return backend.DeactivateMasterIp(master_params, params[1])
@staticmethod
def perspective_node_stop_master(params):
- """Deactivate the master IP and stops master daemons on this node.
-
- Sometimes both operations need to be executed at the same time (doing one of
- the two would make impossible to do the other one).
+ """Stops master daemons on this node.
"""
- backend.DeactivateMasterIp()
return backend.StopMasterDaemons()
@staticmethod
+ def perspective_node_change_master_netmask(params):
+ """Change the master IP netmask.
+
+ """
+ return backend.ChangeMasterNetmask(params[0], params[1], params[2],
+ params[3])
+
+ @staticmethod
def perspective_node_leave_cluster(params):
"""Cleanup after leaving a cluster.
files are accepted.
"""
- return backend.UploadFile(*params)
+ return backend.UploadFile(*(params[0]))
@staticmethod
def perspective_master_info(params):
"""
# TODO: What if a file fails to rename?
- return [backend.JobQueueRename(old, new) for old, new in params]
+ return [backend.JobQueueRename(old, new) for old, new in params[0]]
# hypervisor ---------------
"""Starts an import daemon.
"""
- (opts_s, instance, component, dest, dest_args) = params
+ (opts_s, instance, component, (dest, dest_args)) = params
opts = objects.ImportExportOptions.FromDict(opts_s)
"""Starts an export daemon.
"""
- (opts_s, host, port, instance, component, source, source_args) = params
+ (opts_s, host, port, instance, component, (source, source_args)) = params
opts = objects.ImportExportOptions.FromDict(opts_s)
@return: the body of the message
"""
- return serializer.DumpJson(values, indent=True)
+ return serializer.DumpJson(values)
class RemoteApiHttpServer(http.auth.HttpServerRequestAuthentication,
def GetMasterNetdev(self):
return self._config_data["cluster"]["master_netdev"]
+ def GetMasterNetmask(self):
+ return self._config_data["cluster"]["master_netmask"]
+
def GetFileStorageDir(self):
return self._config_data["cluster"]["file_storage_dir"]
constants.SS_MASTER_CANDIDATES_IPS,
constants.SS_MASTER_IP,
constants.SS_MASTER_NETDEV,
+ constants.SS_MASTER_NETMASK,
constants.SS_MASTER_NODE,
constants.SS_NODE_LIST,
constants.SS_NODE_PRIMARY_IPS,
"""
return self._ReadFile(constants.SS_MASTER_NETDEV)
+ def GetMasterNetmask(self):
+ """Get the master netmask.
+
+ """
+ try:
+ return self._ReadFile(constants.SS_MASTER_NETMASK)
+ except errors.ConfigurationError:
+ family = self.GetPrimaryIPFamily()
+ ipcls = netutils.IPAddress.GetClassFromIpFamily(family)
+ return ipcls.iplen
+
def GetMasterNode(self):
"""Get the hostname of the master node for this cluster.
getent.masterd_gid, False),
(constants.RAPI_CERT_FILE, FILE, 0440, getent.rapi_uid,
getent.masterd_gid, False),
+ (constants.SPICE_CERT_FILE, FILE, 0440, getent.noded_uid,
+ getent.masterd_gid, False),
+ (constants.SPICE_CACERT_FILE, FILE, 0440, getent.noded_uid,
+ getent.masterd_gid, False),
(constants.NODED_CERT_FILE, FILE, 0440, getent.masterd_uid,
getent.masterd_gid, False),
]
import re
import errno
import pwd
+import time
import itertools
import select
import logging
return cpu_list
+def ParseMultiCpuMask(cpu_mask):
+ """Parse a multiple CPU mask definition and return the list of CPU IDs.
+
+ CPU mask format: colon-separated list of comma-separated list of CPU IDs
+ or dash-separated ID ranges, with optional "all" as CPU value
+ Example: "0-2,5:all:1,5,6:2" -> [ [ 0,1,2,5 ], [ -1 ], [ 1, 5, 6 ], [ 2 ] ]
+
+ @type cpu_mask: str
+ @param cpu_mask: multiple CPU mask definition
+ @rtype: list of lists of int
+ @return: list of lists of CPU IDs
+
+ """
+ if not cpu_mask:
+ return []
+ cpu_list = []
+ for range_def in cpu_mask.split(constants.CPU_PINNING_SEP):
+ if range_def == constants.CPU_PINNING_ALL:
+ cpu_list.append([constants.CPU_PINNING_ALL_VAL, ])
+ else:
+ # Uniquify and sort the list before adding
+ cpu_list.append(sorted(set(ParseCpuMask(range_def))))
+
+ return cpu_list
+
+
def GetHomeDir(user, default=None):
"""Try to get the homedir of the given user.
return wrap
+def TimeoutExpired(epoch, timeout, _time_fn=time.time):
+ """Checks whether a timeout has expired.
+
+ """
+ return _time_fn() > (epoch + timeout)
+
+
class SignalWakeupFd(object):
try:
# This is only supported in Python 2.5 and above (some distributions
import re
import time
+import itertools
+
+from ganeti import compat
+from ganeti.utils import text
_SORTER_GROUPS = 8
return dict(zip(dict_in.values(), dict_in.keys()))
+def InsertAtPos(src, pos, other):
+ """Inserts C{other} at given C{pos} into C{src}.
+
+ @note: This function does not modify C{src} in place but returns a new copy
+
+ @type src: list
+ @param src: The source list in which we want insert elements
+ @type pos: int
+ @param pos: The position where we want to start insert C{other}
+ @type other: list
+ @param other: The other list to insert into C{src}
+ @return: A copy of C{src} with C{other} inserted at C{pos}
+
+ """
+ new = src[:pos]
+ new.extend(other)
+ new.extend(src[pos:])
+
+ return new
+
+
+def SequenceToDict(seq, key=compat.fst):
+ """Converts a sequence to a dictionary with duplicate detection.
+
+ @type seq: sequen
+ @param seq: Input sequence
+ @type key: callable
+ @param key: Function for retrieving dictionary key from sequence element
+ @rtype: dict
+
+ """
+ keys = map(key, seq)
+
+ duplicates = FindDuplicates(keys)
+ if duplicates:
+ raise ValueError("Duplicate keys found: %s" % text.CommaJoin(duplicates))
+
+ assert len(keys) == len(seq)
+
+ return dict(zip(keys, seq))
+
+
+def _MakeFlatToDict(data):
+ """Helper function for C{FlatToDict}.
+
+ This function is recursively called
+
+ @param data: The input data as described in C{FlatToDict}, already splitted
+ @returns: The so far converted dict
+
+ """
+ if not compat.fst(compat.fst(data)):
+ assert len(data) == 1, \
+ "not bottom most element, found %d elements, expected 1" % len(data)
+ return compat.snd(compat.fst(data))
+
+ keyfn = lambda e: compat.fst(e).pop(0)
+ return dict([(k, _MakeFlatToDict(list(g)))
+ for (k, g) in itertools.groupby(sorted(data), keyfn)])
+
+
+def FlatToDict(data, field_sep="/"):
+ """Converts a flat structure to a fully fledged dict.
+
+ It accept a list of tuples in the form::
+
+ [
+ ("foo/bar", {"key1": "data1", "key2": "data2"}),
+ ("foo/baz", {"key3" :"data3" }),
+ ]
+
+ where the first element is the key separated by C{field_sep}.
+
+ This would then return::
+
+ {
+ "foo": {
+ "bar": {"key1": "data1", "key2": "data2"},
+ "baz": {"key3" :"data3" },
+ },
+ }
+
+ @type data: list of tuple
+ @param data: Input list to convert
+ @type field_sep: str
+ @param field_sep: The separator for the first field of the tuple
+ @returns: A dict based on the input list
+
+ """
+ return _MakeFlatToDict([(keys.split(field_sep), value)
+ for (keys, value) in data])
+
+
class RunningTimeout(object):
"""Class to calculate remaining timeout when doing several operations.
#: system's root directory
_LOST_AND_FOUND = "lost+found"
+# Possible values for keep_perms in WriteFile()
+KP_NEVER = 0
+KP_ALWAYS = 1
+KP_IF_EXISTS = 2
+
+KEEP_PERMS_VALUES = [
+ KP_NEVER,
+ KP_ALWAYS,
+ KP_IF_EXISTS,
+ ]
+
+
+def ErrnoOrStr(err):
+ """Format an EnvironmentError exception.
+
+ If the L{err} argument has an errno attribute, it will be looked up
+ and converted into a textual C{E...} description. Otherwise the
+ string representation of the error will be returned.
+
+ @type err: L{EnvironmentError}
+ @param err: the exception to format
+
+ """
+ if hasattr(err, "errno"):
+ detail = errno.errorcode[err.errno]
+ else:
+ detail = str(err)
+ return detail
+
+
+class FileStatHelper:
+ """Helper to store file handle's C{fstat}.
+
+ Useful in combination with L{ReadFile}'s C{preread} parameter.
+
+ """
+ def __init__(self):
+ """Initializes this class.
+
+ """
+ self.st = None
+
+ def __call__(self, fh):
+ """Calls C{fstat} on file handle.
+
+ """
+ self.st = os.fstat(fh.fileno())
+
def ReadFile(file_name, size=-1, preread=None):
"""Reads a file.
mode=None, uid=-1, gid=-1,
atime=None, mtime=None, close=True,
dry_run=False, backup=False,
- prewrite=None, postwrite=None):
+ prewrite=None, postwrite=None, keep_perms=KP_NEVER):
"""(Over)write a file atomically.
The file_name and either fn (a function taking one argument, the
@param prewrite: function to be called before writing content
@type postwrite: callable
@param postwrite: function to be called after writing content
+ @type keep_perms: members of L{KEEP_PERMS_VALUES}
+ @param keep_perms: if L{KP_NEVER} (default), owner, group, and mode are
+ taken from the other parameters; if L{KP_ALWAYS}, owner, group, and
+ mode are copied from the existing file; if L{KP_IF_EXISTS}, owner,
+ group, and mode are taken from the file, and if the file doesn't
+ exist, they are taken from the other parameters. It is an error to
+ pass L{KP_ALWAYS} when the file doesn't exist or when C{uid}, C{gid},
+ or C{mode} are set to non-default values.
@rtype: None or int
@return: None if the 'close' parameter evaluates to True,
raise errors.ProgrammerError("Both atime and mtime must be either"
" set or None")
+ if not keep_perms in KEEP_PERMS_VALUES:
+ raise errors.ProgrammerError("Invalid value for keep_perms: %s" %
+ keep_perms)
+ if keep_perms == KP_ALWAYS and (uid != -1 or gid != -1 or mode is not None):
+ raise errors.ProgrammerError("When keep_perms==KP_ALWAYS, 'uid', 'gid',"
+ " and 'mode' cannot be set")
+
if backup and not dry_run and os.path.isfile(file_name):
CreateBackup(file_name)
+ if keep_perms == KP_ALWAYS or keep_perms == KP_IF_EXISTS:
+ # os.stat() raises an exception if the file doesn't exist
+ try:
+ file_stat = os.stat(file_name)
+ mode = stat.S_IMODE(file_stat.st_mode)
+ uid = file_stat.st_uid
+ gid = file_stat.st_gid
+ except OSError:
+ if keep_perms == KP_ALWAYS:
+ raise
+ # else: if keeep_perms == KP_IF_EXISTS it's ok if the file doesn't exist
+
# Whether temporary file needs to be removed (e.g. if any error occurs)
do_remove = True
"""
return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
+
+
+class TemporaryFileManager(object):
+ """Stores the list of files to be deleted and removes them on demand.
+
+ """
+
+ def __init__(self):
+ self._files = []
+
+ def __del__(self):
+ self.Cleanup()
+
+ def Add(self, filename):
+ """Add file to list of files to be deleted.
+
+ @type filename: string
+ @param filename: path to filename to be added
+
+ """
+ self._files.append(filename)
+
+ def Remove(self, filename):
+ """Remove file from list of files to be deleted.
+
+ @type filename: string
+ @param filename: path to filename to be deleted
+
+ """
+ self._files.remove(filename)
+
+ def Cleanup(self):
+ """Delete all files marked for deletion
+
+ """
+ while self._files:
+ RemoveFile(self._files.pop())
ctypes = None
-# Flags for mlockall() (from bits/mman.h)
+# Flags for mlockall(2) (from bits/mman.h)
_MCL_CURRENT = 1
_MCL_FUTURE = 2
def Mlockall(_ctypes=ctypes):
"""Lock current process' virtual address space into RAM.
- This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
- see mlock(2) for more details. This function requires ctypes module.
+ This is equivalent to the C call C{mlockall(MCL_CURRENT | MCL_FUTURE)}. See
+ mlockall(2) for more details. This function requires the C{ctypes} module.
- @raises errors.NoCtypesError: if ctypes module is not found
+ @raises errors.NoCtypesError: If the C{ctypes} module is not found
"""
if _ctypes is None:
logging.error("Cannot set memory lock, ctypes cannot load libc")
return
- # Some older version of the ctypes module don't have built-in functionality
- # to access the errno global variable, where function error codes are stored.
- # By declaring this variable as a pointer to an integer we can then access
- # its value correctly, should the mlockall call fail, in order to see what
- # the actual error code was.
+ # The ctypes module before Python 2.6 does not have built-in functionality to
+ # access the global errno global (which, depending on the libc and build
+ # options, is per thread), where function error codes are stored. Use GNU
+ # libc's way to retrieve errno(3) instead, which is to use the pointer named
+ # "__errno_location" (see errno.h and bits/errno.h).
# pylint: disable=W0212
libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
out.write(line)
_write_entry(written)
- io.WriteFile(file_name, data=out.getvalue(), mode=0644)
+ io.WriteFile(file_name, data=out.getvalue(), uid=0, gid=0, mode=0644,
+ keep_perms=io.KP_IF_EXISTS)
def AddHostToEtcHosts(hostname, ip):
out.write(line)
- io.WriteFile(file_name, data=out.getvalue(), mode=0644)
+ io.WriteFile(file_name, data=out.getvalue(), uid=0, gid=0, mode=0644,
+ keep_perms=io.KP_IF_EXISTS)
def RemoveHostFromEtcHosts(hostname):
"""
assert self._indent >= 0
- self._fh.write(self._indent * self.INDENT_STR)
-
if args:
- self._fh.write(txt % args)
+ line = txt % args
else:
- self._fh.write(txt)
+ line = txt
+
+ if line:
+ # Indent only if there's something on the line
+ self._fh.write(self._indent * self.INDENT_STR)
+
+ self._fh.write(line)
self._fh.write("\n")
@param common_name: commonName value
@type validity: int
@param validity: Validity for certificate in seconds
+ @return: a tuple of strings containing the PEM-encoded private key and
+ certificate
"""
# Create private and public key
@param common_name: commonName value
@type validity: int
@param validity: validity of certificate in number of days
+ @return: a tuple of strings containing the PEM-encoded private key and
+ certificate
"""
# TODO: Investigate using the cluster name instead of X505_CERT_CN for
validity * 24 * 60 * 60)
utils_io.WriteFile(filename, mode=0400, data=key_pem + cert_pem)
+ return (key_pem, cert_pem)
# on master or not, try to start the node daemon
utils.EnsureDaemon(constants.NODED)
# start confd as well. On non candidates it will be in disabled mode.
- utils.EnsureDaemon(constants.CONFD)
+ if constants.ENABLE_CONFD:
+ utils.EnsureDaemon(constants.CONFD)
def RunWatcherHooks():
for inst in instances])
-class _StatCb:
- """Helper to store file handle's C{fstat}.
-
- """
- def __init__(self):
- """Initializes this class.
-
- """
- self.st = None
-
- def __call__(self, fh):
- """Calls C{fstat} on file handle.
-
- """
- self.st = os.fstat(fh.fileno())
-
-
def _ReadInstanceStatus(filename):
"""Reads an instance status file.
"""
logging.debug("Reading per-group instance status from '%s'", filename)
- statcb = _StatCb()
+ statcb = utils.FileStatHelper()
try:
content = utils.ReadFile(filename, preread=statcb)
except EnvironmentError, err:
opcodes.OpQuery(what=constants.QR_INSTANCE,
fields=["name", "status", "admin_state", "snodes",
"pnode.group.uuid", "snodes.group.uuid"],
- filter=[qlang.OP_EQUAL, "pnode.group.uuid", uuid],
+ qfilter=[qlang.OP_EQUAL, "pnode.group.uuid", uuid],
use_locking=True),
# Get all nodes in group
opcodes.OpQuery(what=constants.QR_NODE,
fields=["name", "bootid", "offline"],
- filter=[qlang.OP_EQUAL, "group.uuid", uuid],
+ qfilter=[qlang.OP_EQUAL, "group.uuid", uuid],
use_locking=True),
]
"""Check node status versus cluster desired state.
"""
+ if not constants.ENABLE_CONFD:
+ logging.warning("Confd use not enabled, cannot do maintenance")
+ return
+
my_name = netutils.Hostname.GetSysName()
req = \
confd.client.ConfdClientRequest(type=constants.CONFD_REQ_NODE_ROLE_BYNAME,
self._last_worker_id = 0
self._workers = []
self._quiescing = False
+ self._active = True
# Terminating workers
self._termworkers = []
finally:
self._lock.release()
+ def SetActive(self, active):
+ """Enable/disable processing of tasks.
+
+ This is different from L{Quiesce} in the sense that this function just
+ changes an internal flag and doesn't wait for the queue to be empty. Tasks
+ already being processed continue normally, but no new tasks will be
+ started. New tasks can still be added.
+
+ @type active: bool
+ @param active: Whether tasks should be processed
+
+ """
+ self._lock.acquire()
+ try:
+ self._active = active
+
+ if active:
+ # Tell all workers to continue processing
+ self._pool_to_worker.notifyAll()
+ finally:
+ self._lock.release()
+
def _WaitForTaskUnlocked(self, worker):
"""Waits for a task for a worker.
return _TERMINATE
# We only wait if there's no task for us.
- if not self._tasks:
+ if not (self._active and self._tasks):
logging.debug("Waiting for tasks")
- # wait() releases the lock and sleeps until notified
- self._pool_to_worker.wait()
+ while True:
+ # wait() releases the lock and sleeps until notified
+ self._pool_to_worker.wait()
- logging.debug("Notified while waiting")
+ logging.debug("Notified while waiting")
- # Were we woken up in order to terminate?
- if self._ShouldWorkerTerminateUnlocked(worker):
- return _TERMINATE
+ # Were we woken up in order to terminate?
+ if self._ShouldWorkerTerminateUnlocked(worker):
+ return _TERMINATE
- if not self._tasks:
- # Spurious notification, ignore
- return None
+ # Just loop if pool is not processing tasks at this time
+ if self._active and self._tasks:
+ break
# Get task from queue and tell pool about it
try:
return True
return False
+ def HasRunningTasks(self):
+ """Checks whether there's at least one task running.
+
+ """
+ self._lock.acquire()
+ try:
+ return self._HasRunningTasksUnlocked()
+ finally:
+ self._lock.release()
+
def Quiesce(self):
"""Waits until the task queue is empty.
document.
+Hypervisor State Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using ``--hypervisor-state`` you can set hypervisor specific states as
+pointed out in ``Ganeti Resource Model <design-resource-model.rst>``.
+
+The format is: ``hypervisor:option=value``.
+
+Currently we support the following hypervisor state values:
+
+mem_total
+ Total node memory, as discovered by this hypervisor
+mem_node
+ Memory used by, or reserved for, the node itself; note that some
+ hypervisors can report this in an authoritative way, other not
+mem_hv
+ Memory used either by the hypervisor itself or lost due to instance
+ allocation rounding; usually this cannot be precisely computed, but
+ only roughly estimated
+cpu_total
+ Total node cpu (core) count; usually this can be discovered
+ automatically
+cpu_node
+ Number of cores reserved for the node itself; this can either be
+ discovered or set manually. Only used for estimating how many VCPUs
+ are left for instances
+
+
+Disk State Parameters
+~~~~~~~~~~~~~~~~~~~~~
+
+Using ``--disk-state`` you can set disk specific states as pointed out
+in ``Ganeti Resource Model <design-resource-model.rst>``.
+
+The format is: ``storage_type/identifier:option=value``. Where we
+currently just support ``lvm`` as storage type. The identifier in this
+case is the LVM volume group. By default this is ``xenvg``.
+
+Currently we support the following hypervisor state values:
+
+disk_total
+ Total disk size (usually discovered automatically)
+disk_reserved
+ Reserved disk size; this is a lower limit on the free space, if such a
+ limit is desired
+disk_overhead
+ Disk that is expected to be used by other volumes (set via
+ ``reserved_lvs``); usually should be zero
+
+
Cluster configuration
~~~~~~~~~~~~~~~~~~~~~
The option ``--priority`` sets the priority for opcodes submitted
by the command.
+Defaults
+~~~~~~~~
+
+For certain commands you can use environment variables to provide
+default command line arguments. Just assign the arguments as a string to
+the corresponding environment variable. The format of that variable
+name is **binary**_*command*. **binary** is the name of the ``gnt-*``
+script all upper case and dashes replaced by underscores, and *command*
+is the command invoked on that script.
+
+Currently supported commands are ``gnt-node list``, ``gnt-group list``
+and ``gnt-instance list``. So you can configure default command line
+flags by setting ``GNT_NODE_LIST``, ``GNT_GROUP_LIST`` and
+``GNT_INSTANCE_LIST``.
+
Field formatting
----------------
instance. If no such parameters are specified, the values are
inherited from the export. Possible parameters are:
-memory
- the memory size of the instance; as usual, suffixes can be used to
- denote the unit, otherwise the value is taken in mebibites
+maxmem
+ the maximum memory size of the instance; as usual, suffixes can be
+ used to denote the unit, otherwise the value is taken in mebibytes
+
+minmem
+ the minimum memory size of the instance; as usual, suffixes can be
+ used to denote the unit, otherwise the value is taken in mebibytes
vcpus
the number of VCPUs to assign to the instance (if this value makes
whether the instance is considered in the N+1 cluster checks
(enough redundancy in the cluster to survive a node failure)
+always\_failover
+ ``True`` or ``False``, whether the instance must be failed over
+ (shut down and rebooted) always or it may be migrated (briefly
+ suspended)
+
The ``-t`` options specifies the disk layout type for the instance.
If not passed, the configuration of the original instance is used.
| [{-s|--secondary-ip} *secondary\_ip*]
| [--vg-name *vg-name*]
| [--master-netdev *interface-name*]
+| [--master-netmask *netmask*]
+| [--use-external-mip-script {yes \| no}]
| [{-m|--mac-prefix} *mac-prefix*]
| [--no-lvm-storage]
| [--no-etc-hosts]
| [--file-storage-dir *dir*]
| [--enabled-hypervisors *hypervisors*]
| [{-H|--hypervisor-parameters} *hypervisor*:*hv-param*=*value*[,*hv-param*=*value*...]]
-| [{-B|--backend-parameters} *be-param*=*value* [,*be-param*=*value*...]]
-| [{-N|--nic-parameters} *nic-param*=*value* [,*nic-param*=*value*...]]
+| [{-B|--backend-parameters} *be-param*=*value*[,*be-param*=*value*...]]
+| [{-N|--nic-parameters} *nic-param*=*value*[,*nic-param*=*value*...]]
+| [{-D|--disk-parameters} *disk-template*:*disk-param*=*value*[,*disk-param*=*value*...]]
| [--maintain-node-health {yes \| no}]
| [--uid-pool *user-id pool definition*]
| [{-I|--default-iallocator} *default instance allocator*]
| [--prealloc-wipe-disks {yes \| no}]
| [--node-parameters *ndparams*]
| [{-C|--candidate-pool-size} *candidate\_pool\_size*]
+| [--specs-cpu-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-disk-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-disk-size *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-mem-size *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-nic-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--disk-state *diskstate*]
+| [--hypervisor-state *hvstate*]
| {*clustername*}
This commands is only run once initially on the first node of the
important that all nodes have this interface because you'll need it
for a master failover.
+The ``--master-netmask`` option allows to specify a netmask for the
+master IP. The netmask must be specified as an integer, and will be
+interpreted as a CIDR netmask. The default value is 32 for an IPv4
+address and 128 for an IPv6 address.
+
+The ``--use-external-mip-script`` options allows to specify
+whether to use an user-supplied master IP address setup script, whose
+location is ``/etc/ganeti/scripts/master-ip-setup``. If the option value
+is set to False, the default script, whose location is
+``/usr/local/lib/ganeti/tools/master-ip-setup``, will be executed.
+
The ``-m (--mac-prefix)`` option will let you specify a three byte
prefix under which the virtual MAC addresses of your instances will be
generated. The prefix must be specified in the format ``XX:XX:XX`` and
Number of VCPUs to set for an instance by default, must be an
integer, will be set to 1 if no specified.
-memory
- Amount of memory to allocate for an instance by default, can be
- either an integer or an integer followed by a unit (M for mebibytes
- and G for gibibytes are supported), will be set to 128M if not
- specified.
+maxmem
+ Maximum amount of memory to allocate for an instance by default, can
+ be either an integer or an integer followed by a unit (M for
+ mebibytes and G for gibibytes are supported), will be set to 128M if
+ not specified.
+
+minmem
+ Minimum amount of memory to allocate for an instance by default, can
+ be either an integer or an integer followed by a unit (M for
+ mebibytes and G for gibibytes are supported), will be set to 128M if
+ not specified.
auto\_balance
Value of the auto\_balance flag for instances to use by default,
will be set to true if not specified.
+always\_failover
+ Default value for the ``always\_failover`` flag for instances; if
+ not set, ``False`` is used.
+
The ``-N (--nic-parameters)`` option allows you to set the default nic
parameters for the cluster. The parameter format is a comma-separated
network script it is interpreted as a routing table number or
name.
+The ``-D (--disk-parameters)`` option allows you to set the default disk
+template parameters at cluster level. The format used for this option is
+similar to the one use by the ``-H`` option: the disk template name
+must be specified first, followed by a colon and by a comma-separated
+list of key-value pairs. These parameters can only be specified at
+cluster and node group level; the cluster-level parameter are inherited
+by the node group at the moment of its creation, and can be further
+modified at node group level using the **gnt-group**(8) command.
+
+The following is the list of disk parameters available for the **drbd**
+template, with measurement units specified in square brackets at the end
+of the description (when applicable):
+
+resync-rate
+ Static re-synchronization rate. [KiB/s]
+
+data-stripes
+ Number of stripes to use for data LVs.
+
+meta-stripes
+ Number of stripes to use for meta LVs.
+
+disk-barriers
+ What kind of barriers to **disable** for disks. It can either assume
+ the value "n", meaning no barrier disabled, or a non-empty string
+ containing a subset of the characters "bfd". "b" means disable disk
+ barriers, "f" means disable disk flushes, "d" disables disk drains.
+
+meta-barriers
+ Boolean value indicating whether the meta barriers should be
+ disabled (True) or not (False).
+
+metavg
+ String containing the name of the default LVM volume group for DRBD
+ metadata. By default, it is set to ``xenvg``. It can be overridden
+ during the instance creation process by using the ``metavg`` key of
+ the ``--disk`` parameter.
+
+disk-custom
+ String containing additional parameters to be appended to the
+ arguments list of ``drbdsetup disk``.
+
+net-custom
+ String containing additional parameters to be appended to the
+ arguments list of ``drbdsetup net``.
+
+dynamic-resync
+ Boolean indicating whether to use the dynamic resync speed
+ controller or not. If enabled, c-plan-ahead must be non-zero and all
+ the c-* parameters will be used by DRBD. Otherwise, the value of
+ resync-rate will be used as a static resync speed.
+
+c-plan-ahead
+ Agility factor of the dynamic resync speed controller. (the higher,
+ the slower the algorithm will adapt the resync speed). A value of 0
+ (that is the default) disables the controller. [ds]
+
+c-fill-target
+ Maximum amount of in-flight resync data for the dynamic resync speed
+ controller. [sectors]
+
+c-delay-target
+ Maximum estimated peer response latency for the dynamic resync speed
+ controller. [ds]
+
+c-min-rate
+ Minimum resync speed for the dynamic resync speed controller. [KiB/s]
+
+c-max-rate
+ Upper bound on resync speed for the dynamic resync speed controller.
+ [KiB/s]
+
+List of parameters available for the **plain** template:
+
+stripes
+ Number of stripes to use for new LVs.
+
The option ``--maintain-node-health`` allows one to enable/disable
automatic maintenance actions on nodes. Currently these include
automatic shutdown of instances and deactivation of DRBD devices on
offline nodes; in the future it might be extended to automatic
-removal of unknown LVM volumes, etc.
+removal of unknown LVM volumes, etc. Note that this option is only
+useful if the use of ``ganeti-confd`` was enabled at compilation.
The ``--uid-pool`` option initializes the user-id pool. The
*user-id pool definition* can contain a list of user-ids and/or a
that the master will try to keep as master\_candidates. For more
details about this role and other node roles, see the ganeti(7).
+The ``--specs-..`` options specify instance policy on the cluster. Each
+option can have three values: ``min``, ``max`` and ``std``, which can
+also be modified on group level (except for ``std``, which is defined
+once for the entire cluster). Please note, that ``std`` values are not
+the same as defaults set by ``--beparams``.
+``--specs-cpu-count`` sets the number of VCPUs that can be used by an
+instance.
+``--specs-disk-count`` sets the number of disks
+``--specs-disk-size`` limits the disk size for every disk used
+``--specs-mem-size`` limits the amount of memory available
+``--specs-nic-count`` sets limits on the amount of nics used
+
+For details about how to use ``--hypervisor-state`` and ``--disk-state``
+have a look at **ganeti**(7).
+
LIST-TAGS
~~~~~~~~~
| [--no-lvm-storage]
| [--enabled-hypervisors *hypervisors*]
| [{-H|--hypervisor-parameters} *hypervisor*:*hv-param*=*value*[,*hv-param*=*value*...]]
-| [{-B|--backend-parameters} *be-param*=*value* [,*be-param*=*value*...]]
-| [{-N|--nic-parameters} *nic-param*=*value* [,*nic-param*=*value*...]]
+| [{-B|--backend-parameters} *be-param*=*value*[,*be-param*=*value*...]]
+| [{-N|--nic-parameters} *nic-param*=*value*[,*nic-param*=*value*...]]
+| [{-D|--disk-parameters} *disk-template*:*disk-param*=*value*[,*disk-param*=*value*...]]
| [--uid-pool *user-id pool definition*]
| [--add-uids *user-id pool definition*]
| [--remove-uids *user-id pool definition*]
| [--reserved-lvs=*NAMES*]
| [--node-parameters *ndparams*]
| [--master-netdev *interface-name*]
+| [--master-netmask *netmask*]
+| [--use-external-mip-script {yes \| no}]
+| [--hypervisor-state *hvstate*]
+| [--disk-state *diskstate*]
+| [--specs-cpu-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-disk-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-disk-size *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-mem-size *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-nic-count *spec-param*=*value* [,*spec-param*=*value*...]]
+
Modify the options for the cluster.
The ``--vg-name``, ``--no-lvm-storarge``, ``--enabled-hypervisors``,
``-H (--hypervisor-parameters)``, ``-B (--backend-parameters)``,
-``--nic-parameters``, ``-C (--candidate-pool-size)``,
-``--maintain-node-health``, ``--prealloc-wipe-disks``, ``--uid-pool``,
-``--node-parameters``, ``--master-netdev`` options are described in
-the **init** command.
+``-D (--disk-parameters)``, ``--nic-parameters``, ``-C
+(--candidate-pool-size)``, ``--maintain-node-health``,
+``--prealloc-wipe-disks``, ``--uid-pool``, ``--node-parameters``,
+``--master-netdev``, ``--master-netmask`` and
+``--use-external-mip-script`` options are described in the **init**
+command.
+
+The ``--hypervisor-state`` and ``--disk-state`` options are described in
+detail in **ganeti**(7).
The ``--add-uids`` and ``--remove-uids`` options can be used to
modify the user-id pool by adding/removing a list of user-ids or
command. To clear the default iallocator, just pass an empty string
('').
+The ``--specs-..`` options are described in the **init** command.
+
QUEUE
~~~~~
| **renew-crypto** [-f]
| [--new-cluster-certificate] [--new-confd-hmac-key]
| [--new-rapi-certificate] [--rapi-certificate *rapi-cert*]
+| [--new-spice-certificate | --spice-certificate *spice-cert*
+| -- spice-ca-certificate *spice-ca-cert*]
| [--new-cluster-domain-secret] [--cluster-domain-secret *filename*]
This command will stop all Ganeti daemons in the cluster and start
use your own certificate, e.g. one signed by a certificate
authority (CA), pass its filename to ``--rapi-certificate``.
+To generate a new self-signed SPICE certificate, used by SPICE
+connections to the KVM hypervisor, specify the
+``--new-spice-certificate`` option. If you want to provide a
+certificate, pass its filename to ``--spice-certificate`` and pass the
+signing CA certificate to ``--spice-ca-certificate``.
+
``--new-cluster-domain-secret`` generates a new, random cluster
domain secret. ``--cluster-domain-secret`` reads the secret from a
file. The cluster domain secret is used to sign information
VERIFY
~~~~~~
-**verify** [--no-nplus1-mem] [--node-group *nodegroup*]
+| **verify** [--no-nplus1-mem] [--node-group *nodegroup*]
+| [--error-codes] [{-I|--ignore-errors} *errorcode*]
+| [{-I|--ignore-errors} *errorcode*...]
Verify correctness of cluster configuration. This is safe with
respect to running instances, and incurs no downtime of the
settings, but will allow to perform verification of a group while other
operations are ongoing in other groups.
+The ``--error-codes`` option outputs each error in the following
+parseable format: *ftype*:*ecode*:*edomain*:*name*:*msg*.
+These fields have the following meaning:
+
+ftype
+ Failure type. Can be *WARNING* or *ERROR*.
+
+ecode
+ Error code of the failure. See below for a list of error codes.
+
+edomain
+ Can be *cluster*, *node* or *instance*.
+
+name
+ Contains the name of the item that is affected from the failure.
+
+msg
+ Contains a descriptive error message about the error
+
+``gnt-cluster verify`` will have a non-zero exit code if at least one of
+the failures that are found are of type *ERROR*.
+
+The ``--ignore-errors`` option can be used to change this behaviour,
+because it demotes the error represented by the error code received as a
+parameter to a warning. The option must be repeated for each error that
+should be ignored (e.g.: ``-I ENODEVERSION -I ENODEORPHANLV``). The
+``--error-codes`` option can be used to determine the error code of a
+given error.
+
+List of error codes:
+
+@CONSTANTS_ECODES@
+
VERIFY-DISKS
~~~~~~~~~~~~
~~~~~~~~~~
**submit-job** [--verbose] [--timing-stats] [--job-repeat ``N``]
-[--op-repeat ``N``] {opcodes_file...}
+[--op-repeat ``N``] [--each] {opcodes_file...}
This command builds a list of opcodes from files in JSON format and
submits a job per file to the master daemon. It can be used to test
of each of the opcodes in the file to be executed (equivalent to
each file containing N copies of the opcodes).
+The ``each`` option allow to submit each job separately (using ``N``
+SubmitJob LUXI requests instead of one SubmitManyJobs request).
+
TEST-JOBQUEUE
~~~~~~~~~~~~~
| **add**
| [--node-parameters=*NDPARAMS*]
| [--alloc-policy=*POLICY*]
+| [{-D|--disk-parameters} *disk-template*:*disk-param*=*value*[,*disk-param*=*value*...]]
+| [--specs-cpu-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-disk-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-disk-size *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-mem-size *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-nic-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--disk-state *diskstate*]
+| [--hypervisor-state *hvstate*]
| {*group*}
Creates a new group with the given name. The node group will be
The ``--node-parameters`` option allows you to set default node
parameters for nodes in the group. Please see **ganeti**(7) for more
-information about supported key=value pairs.
+information about supported key=value pairs and their corresponding
+options.
The ``--alloc-policy`` option allows you to set an allocation policy for
the group at creation time. Possible values are:
(this is the default). Note that prioritization among groups in this
state will be deferred to the iallocator plugin that's being used.
+The ``-D (--disk-parameters)`` option allows you to set the disk
+parameters for the node group; please see the section about
+**gnt-cluster add** in **gnt-cluster**(8) for more information about
+disk parameters
+
+The ``--specs-..`` options specify instance policy on the cluster. Each
+option can have two values: ``min`` and ``max``.
+``--specs-cpu-count`` sets the number of VCPUs that can be used by an
+instance.
+``--specs-disk-count`` sets the number of disks
+``--specs-disk-size`` limits the disk size for every disk used
+``--specs-mem-size`` limits the amount of memory available
+``--specs-nic-count`` sets limits on the amount of nics used
+
ASSIGN-NODES
~~~~~~~~~~~~
| **modify**
| [--node-parameters=*NDPARAMS*]
| [--alloc-policy=*POLICY*]
+| [--hypervisor-state *hvstate*]
+| [{-D|--disk-parameters} *disk-template*:*disk-param*=*value*[,*disk-param*=*value*...]]
+| [--disk-state *diskstate*]
+| [--specs-cpu-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-disk-count *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-disk-size *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-mem-size *spec-param*=*value* [,*spec-param*=*value*...]]
+| [--specs-nic-count *spec-param*=*value* [,*spec-param*=*value*...]]
| {*group*}
Modifies some parameters from the node group.
-The ``--node-parameters`` and ``--alloc-policy`` optiosn are documented
-in the **add** command above.
+The ``--node-parameters`` and ``--alloc-policy`` options are documented
+in the **add** command above. ``--hypervisor-state`` as well as
+``--disk-state`` are documented in detail in **ganeti**(7).
+
+The ``--node-parameters``, ``--alloc-policy``, ``-D
+(--disk-parameters)`` and ``--specs-..`` options are documented in the
+**add** command above.
REMOVE
~~~~~~
instance's disks. Ganeti will rename these volumes to the standard
format, and (without installing the OS) will use them as-is for the
instance. This allows migrating instances from non-managed mode
-(e.q. plain KVM with LVM) to being managed via Ganeti. Note that
+(e.g. plain KVM with LVM) to being managed via Ganeti. Please note that
this works only for the \`plain' disk template (see below for
template details).
parameters for the instance. If no such parameters are specified, the
values are inherited from the cluster. Possible parameters are:
-memory
- the memory size of the instance; as usual, suffixes can be used to
- denote the unit, otherwise the value is taken in mebibites
+maxmem
+ the maximum memory size of the instance; as usual, suffixes can be
+ used to denote the unit, otherwise the value is taken in mebibytes
+
+minmem
+ the minimum memory size of the instance; as usual, suffixes can be
+ used to denote the unit, otherwise the value is taken in mebibytes
vcpus
the number of VCPUs to assign to the instance (if this value makes
whether the instance is considered in the N+1 cluster checks
(enough redundancy in the cluster to survive a node failure)
+always\_failover
+ ``True`` or ``False``, whether the instance must be failed over
+ (shut down and rebooted) always or it may be migrated (briefly
+ suspended)
+
+Note that before 2.6 Ganeti had a ``memory`` parameter, which was the
+only value of memory an instance could have. With the
+``maxmem``/``minmem`` change Ganeti guarantees that at least the minimum
+memory is always available for an instance, but allows more memory to be
+used (up to the maximum memory) should it be free.
The ``-H (--hypervisor-parameters)`` option specified the hypervisor
to use for the instance (must be one of the enabled hypervisors on the
n
network boot (PXE)
- The default is not to set an HVM boot order which is interpreted
+ The default is not to set an HVM boot order, which is interpreted
as 'dc'.
For KVM the boot order is either "floppy", "cdrom", "disk" or
Configures whether SPICE should compress audio streams or not.
+spice\_use\_tls
+ Valid for the KVM hypervisor.
+
+ Specifies that the SPICE server must use TLS to encrypt all the
+ traffic with the client.
+
+spice\_tls\_ciphers
+ Valid for the KVM hypervisor.
+
+ Specifies a list of comma-separated ciphers that SPICE should use
+ for TLS connections. For the format, see man cipher(1).
+
+spice\_use\_vdagent
+ Valid for the KVM hypervisor.
+
+ Enables or disables passing mouse events via SPICE vdagent.
+
acpi
Valid for the Xen HVM and KVM hypervisors.
Example::
- # gnt-instance add -t file --disk 0:size=30g -B memory=512 -o debian-etch \
+ # gnt-instance add -t file --disk 0:size=30g -B maxmem=512 -o debian-etch \
-n node1.example.com --file-storage-dir=mysubdir instance1.example.com
- # gnt-instance add -t plain --disk 0:size=30g -B memory=512 -o debian-etch \
- -n node1.example.com instance1.example.com
+ # gnt-instance add -t plain --disk 0:size=30g -B maxmem=1024,minmem=512 \
+ -o debian-etch -n node1.example.com instance1.example.com
# gnt-instance add -t plain --disk 0:size=30g --disk 1:size=100g,vg=san \
- -B memory=512 -o debian-etch -n node1.example.com instance1.example.com
- # gnt-instance add -t drbd --disk 0:size=30g -B memory=512 -o debian-etch \
+ -B maxmem=512 -o debian-etch -n node1.example.com instance1.example.com
+ # gnt-instance add -t drbd --disk 0:size=30g -B maxmem=512 -o debian-etch \
-n node1.example.com:node2.example.com instance2.example.com
"iallocator": "dumb",
"hypervisor": "xen-hvm",
"hvparams": {"acpi": true},
- "backend": {"memory": 512}
+ "backend": {"maxmem": 512, "minmem": 256}
}
}
| [{-t|--disk-template} plain | {-t|--disk-template} drbd -n *new_secondary*] [--no-wait-for-sync]
| [--os-type=*OS* [--force-variant]]
| [{-O|--os-parameters} *param*=*value*... ]
+| [--offline \| --online]
| [--submit]
| {*instance*}
``--force-variant`` is passed. An invalid OS will also be refused,
unless the ``--force`` option is given.
+The ``--online`` and ``--offline`` options are used to transition an
+instance into and out of the ``offline`` state. An instance can be
+turned offline only if it was previously down. The ``--online`` option
+fails if the instance was not in the ``offline`` state, otherwise it
+changes instance's state to ``down``. These modifications take effect
+immediately.
+
The ``--submit`` option is used to send the job to the master daemon
but not wait for its completion. The job ID will be shown so that it
can be examined via **gnt-job info**.
-All the changes take effect at the next restart. If the instance is
+Most of the changes take effect at the next restart. If the instance is
running, there is no effect on the instance.
REINSTALL
forth, e.g.::
# gnt-instance start -H kernel_args="single" instance1
- # gnt-instance start -B memory=2048 instance2
+ # gnt-instance start -B maxmem=2048 instance2
The first form will start the instance instance1 in single-user mode,
The option ``-f`` will skip the prompting for confirmation.
If ``--allow-failover`` is specified it tries to fallback to failover if
-it already can determine that a migration wont work (i.e. if the
-instance is shutdown). Please note that the fallback will not happen
+it already can determine that a migration won't work (e.g. if the
+instance is shut down). Please note that the fallback will not happen
during execution. If a migration fails during execution it still fails.
Example (and expected output)::
# gnt-instance migrate instance1
- Migrate will happen to the instance instance1. Note that migration is
- **experimental** in this version. This might impact the instance if
- anything goes wrong. Continue?
+ Instance instance1 will be migrated. Note that migration
+ might impact the instance if anything goes wrong (e.g. due to bugs in
+ the hypervisor). Continue?
y/[n]/?: y
+ Migrating instance instance1.example.com
* checking disk consistency between source and target
- * ensuring the target is in secondary mode
+ * switching node node2.example.com to secondary mode
+ * changing into standalone mode
* changing disks into dual-master mode
- - INFO: Waiting for instance instance1 to sync disks.
- - INFO: Instance instance1's disks are in sync.
+ * wait until resync is done
+ * preparing node2.example.com to accept the instance
* migrating instance to node2.example.com
- * changing the instance's disks on source node to secondary
- - INFO: Waiting for instance instance1 to sync disks.
- - INFO: Instance instance1's disks are in sync.
- * changing the instance's disks to single-master
+ * switching node node1.example.com to secondary mode
+ * wait until resync is done
+ * changing into standalone mode
+ * changing disks into single-master mode
+ * wait until resync is done
+ * done
#
| [{-g|--node-group} *nodegroup*]
| [--master-capable=``yes|no``] [--vm-capable=``yes|no``]
| [--node-parameters *ndparams*]
+| [--disk-state *diskstate*]
+| [--hypervisor-state *hvstate*]
| {*nodename*}
Adds the given node to the cluster.
specific node group, specified by UUID or name. If only one node group
exists you can skip this option, otherwise it's mandatory.
-The ``vm_capable``, ``master_capable`` and ``ndparams`` options are
-described in **ganeti**(7), and are used to set the properties of the
-new node.
+The ``vm_capable``, ``master_capable``, ``ndparams``, ``diskstate`` and
+``hvstate`` options are described in **ganeti**(7), and are used to set
+the properties of the new node.
Example::
| [{-s|--secondary-ip} *secondary_ip*]
| [--node-parameters *ndparams*]
| [--node-powered=``yes|no``]
+| [--hypervisor-state *hvstate*]
+| [--disk-state *diskstate*]
| {*node*}
This command changes the role of the node. Each options takes
**powercycle** [``--yes``] [``--force``] {*node*}
This command (tries to) forcefully reboot a node. It is a command
-that can be used if the node environemnt is broken, such that the
-admin can no longer login over ssh, but the Ganeti node daemon is
+that can be used if the node environment is broken, such that the
+admin can no longer login over SSH, but the Ganeti node daemon is
still working.
Note that this command is not guaranteed to work; it depends on the
hypervisor how effective is the reboot attempt. For Linux, this
-command require that the kernel option CONFIG\_MAGIC\_SYSRQ is
+command requires the kernel option ``CONFIG_MAGIC_SYSRQ`` to be
enabled.
The ``--yes`` option can be used to skip confirmation, while the
man page **htools**(1) for more details about this option.
-t *datafile*, --text-data=*datafile*
- The name of the file holding cluster information, to override the
- data in the JSON request itself. This is mostly used for debugging.
+ The name of the file holding cluster information, to override the data
+ in the JSON request itself. This is mostly used for debugging. The
+ format of the file is described in the man page **htools**(1).
--simulate *description*
- Similar to the **-t** option, this allows overriding the cluster
- data with a simulated cluster. For details about the description,
- see the man page **hspace**(1).
+ Backend specification: similar to the **-t** option, this allows
+ overriding the cluster data with a simulated cluster. For details
+ about the description, see the man page **htools**(1).
-S *filename*, --save-cluster=*filename*
If given, the state of the cluster before and the iallocator run is
saved to a file named *filename.pre-ialloc*, respectively
*filename.post-ialloc*. This allows re-feeding the cluster state to
- any of the htools utilities.
+ any of the htools utilities via the ``-t`` option.
-v
This option increases verbosity and can be used for debugging in order
Prints the before and after instance map. This is less useful as the
node status, but it can help in understanding instance moves.
--o, --oneline
- Only shows a one-line output from the program, designed for the case
- when one wants to look at multiple clusters at once and check their
- status.
-
- The line will contain four fields:
-
- - initial cluster score
- - number of steps in the solution
- - final cluster score
- - improvement in the cluster score
-
-O *name*
This option (which can be given multiple times) will mark nodes as
being *offline*. This means a couple of things:
metrics and thus the influence of the dynamic utilisation will be
practically insignificant.
--t *datafile*, --text-data=*datafile*
- The name of the file holding node and instance information (if not
- collecting via RAPI or LUXI). This or one of the other backends must
- be selected.
-
-S *filename*, --save-cluster=*filename*
If given, the state of the cluster before the balancing is saved to
the given file plus the extension "original"
(i.e. *filename*.original), and the state at the end of the
balancing is saved to the given file plus the extension "balanced"
(i.e. *filename*.balanced). This allows re-feeding the cluster state
- to either hbal itself or for example hspace.
+ to either hbal itself or for example hspace via the ``-t`` option.
+
+-t *datafile*, --text-data=*datafile*
+ Backend specification: the name of the file holding node and instance
+ information (if not collecting via RAPI or LUXI). This or one of the
+ other backends must be selected. The option is described in the man
+ page **htools**(1).
-m *cluster*
- Collect data directly from the *cluster* given as an argument via
- RAPI. If the argument doesn't contain a colon (:), then it is
- converted into a fully-built URL via prepending ``https://`` and
- appending the default RAPI port, otherwise it's considered a
- fully-specified URL and is used as-is.
+ Backend specification: collect data directly from the *cluster* given
+ as an argument via RAPI. The option is described in the man page
+ **htools**(1).
-L [*path*]
- Collect data directly from the master daemon, which is to be
- contacted via the luxi (an internal Ganeti protocol). An optional
- *path* argument is interpreted as the path to the unix socket on
- which the master daemon listens; otherwise, the default path used by
- ganeti when installed with *--localstatedir=/var* is used.
+ Backend specification: collect data directly from the master daemon,
+ which is to be contacted via LUXI (an internal Ganeti protocol). The
+ option is described in the man page **htools**(1).
-X
When using the Luxi backend, hbal can also execute the given
number. For example, specifying *disk-ratio* as **0.25** means that
at least one quarter of disk space should be left free on nodes.
+-l *rounds*, --max-length=*rounds*
+ Restrict the number of instance allocations to this length. This is
+ not very useful in practice, but can be used for testing hspace
+ itself, or to limit the runtime for very big clusters.
+
-p, --print-nodes
Prints the before and after node status, in a format designed to allow
the user to understand the node's most important parameters. See the
are reported by RAPI as such, or that have "?" in file-based input
in any numeric fields.
--t *datafile*, --text-data=*datafile*
- The name of the file holding node and instance information (if not
- collecting via RAPI or LUXI). This or one of the other backends must
- be selected.
-
-S *filename*, --save-cluster=*filename*
If given, the state of the cluster at the end of the allocation is
saved to a file named *filename.alloc*, and if tiered allocation is
enabled, the state after tiered allocation will be saved to
*filename.tiered*. This allows re-feeding the cluster state to
either hspace itself (with different parameters) or for example
- hbal.
+ hbal, via the ``-t`` option.
+
+-t *datafile*, --text-data=*datafile*
+ Backend specification: the name of the file holding node and instance
+ information (if not collecting via RAPI or LUXI). This or one of the
+ other backends must be selected. The option is described in the man
+ page **htools**(1).
-m *cluster*
- Collect data directly from the *cluster* given as an argument via
- RAPI. If the argument doesn't contain a colon (:), then it is
- converted into a fully-built URL via prepending ``https://`` and
- appending the default RAPI port, otherwise it's considered a
- fully-specified URL and is used as-is.
+ Backend specification: collect data directly from the *cluster* given
+ as an argument via RAPI. The option is described in the man page
+ **htools**(1).
-L [*path*]
- Collect data directly from the master daemon, which is to be
- contacted via the luxi (an internal Ganeti protocol). An optional
- *path* argument is interpreted as the path to the unix socket on
- which the master daemon listens; otherwise, the default path used by
- ganeti when installed with *--localstatedir=/var* is used.
+ Backend specification: collect data directly from the master daemon,
+ which is to be contacted via LUXI (an internal Ganeti protocol). The
+ option is described in the man page **htools**(1).
--simulate *description*
- Instead of using actual data, build an empty cluster given a node
- description. The *description* parameter must be a comma-separated
- list of five elements, describing in order:
-
- - the allocation policy for this node group
- - the number of nodes in the cluster
- - the disk size of the nodes (default in mebibytes, units can be used)
- - the memory size of the nodes (default in mebibytes, units can be used)
- - the cpu core count for the nodes
-
- An example description would be **preferred,B20,100G,16g,4**
- describing a 20-node cluster where each node has 100GB of disk
- space, 16GiB of memory and 4 CPU cores. Note that all nodes must
- have the same specs currently.
-
- This option can be given multiple times, and each new use defines a
- new node group. Hence different node groups can have different
- allocation policies and node count/specifications.
+ Backend specification: similar to the **-t** option, this allows
+ overriding the cluster data with a simulated cluster. For details
+ about the description, see the man page **htools**(1).
--tiered-alloc *spec*
Besides the standard, fixed-size allocation, also do a tiered
lNet
the dynamic net load (if the information is available)
+-t *datafile*, --text-data=*datafile*
+ Backend specification: the name of the file holding node and instance
+ information (if not collecting via RAPI or LUXI). This or one of the
+ other backends must be selected. The option is described in the man
+ page **htools**(1).
+
+ The file should contain text data, line-based, with two empty lines
+ separating sections. The lines themselves are column-based, with the
+ pipe symbol (``|``) acting as separator.
+
+ The first section contains group data, with two columns:
+
+ - group name
+ - group uuid
+
+ The second sections contains node data, with the following columns:
+
+ - node name
+ - node total memory
+ - node free memory
+ - node total disk
+ - node free disk
+ - node physical cores
+ - offline field (as ``Y`` or ``N``)
+ - group UUID
+
+ The third section contains instance data, with the fields:
+
+ - instance name
+ - instance memory
+ - instance disk size
+ - instance vcpus
+ - instance status (in Ganeti's format, e.g. ``running`` or ``ERROR_down``)
+ - instance ``auto_balance`` flag (see man page **gnt-instance** (7))
+ - instance primary node
+ - instance secondary node(s), if any
+ - instance disk type (e.g. ``plain`` or ``drbd``)
+ - instance tags
+
+ The fourth and last section contains the cluster tags, with one tag
+ per line (no columns/no column processing).
+
+-m *cluster*
+ Backend specification: collect data directly from the *cluster* given
+ as an argument via RAPI. If the argument doesn't contain a colon (:),
+ then it is converted into a fully-built URL via prepending
+ ``https://`` and appending the default RAPI port, otherwise it is
+ considered a fully-specified URL and used as-is.
+
+-L [*path*]
+ Backend specification: collect data directly from the master daemon,
+ which is to be contacted via LUXI (an internal Ganeti protocol). An
+ optional *path* argument is interpreted as the path to the unix socket
+ on which the master daemon listens; otherwise, the default path used
+ by Ganeti (configured at build time) is used.
+
+--simulate *description*
+ Backend specification: instead of using actual data, build an empty
+ cluster given a node description. The *description* parameter must be
+ a comma-separated list of five elements, describing in order:
+
+ - the allocation policy for this node group (*preferred*, *allocable*
+ or *unallocable*, or alternatively the short forms *p*, *a* or *u*)
+ - the number of nodes in the cluster
+ - the disk size of the nodes (default in mebibytes, units can be used)
+ - the memory size of the nodes (default in mebibytes, units can be used)
+ - the cpu core count for the nodes
+
+ An example description would be **preferred,B20,100G,16g,4**
+ describing a 20-node cluster where each node has 100GB of disk
+ space, 16GiB of memory and 4 CPU cores. Note that all nodes must
+ have the same specs currently.
+
+ This option can be given multiple times, and each new use defines a
+ new node group. Hence different node groups can have different
+ allocation policies and node count/specifications.
+
-v, --verbose
Increase the output verbosity. Each usage of this option will
increase the verbosity (currently more than 2 doesn't make sense)
"""
for test, fn in [
+ ("create-cluster", qa_cluster.TestClusterInitDisk),
("cluster-renew-crypto", qa_cluster.TestClusterRenewCrypto),
("cluster-verify", qa_cluster.TestClusterVerify),
("cluster-reserved-lvs", qa_cluster.TestClusterReservedLvs),
# TODO: add more cluster modify tests
("cluster-modify", qa_cluster.TestClusterModifyBe),
+ ("cluster-modify", qa_cluster.TestClusterModifyDisk),
("cluster-rename", qa_cluster.TestClusterRename),
("cluster-info", qa_cluster.TestClusterVersion),
("cluster-info", qa_cluster.TestClusterInfo),
try:
RunTestIf("node-readd", qa_node.TestNodeReadd, pnode)
RunTestIf("node-modify", qa_node.TestNodeModify, pnode)
+ RunTestIf("delay", qa_cluster.TestDelay, pnode)
finally:
qa_config.ReleaseNode(pnode)
"primary_ip_version": 4,
"os": "debian-etch",
- "mem": "512M",
+ "maxmem": "1024M",
+ "minmem": "512M",
+
+ "# Instance policy specs": null,
+ "ispec_mem_size_max": 1024,
+ "ispec_disk_size_min": 512.
"# Lists of disk sizes": null,
"disk": ["1G", "512M"],
"tags": true,
"rapi": true,
"test-jobqueue": true,
+ "delay": true,
"create-cluster": true,
"cluster-verify": true,
AssertEqual(qa_utils.GetCommandOutput(node["primary"], cmd), content)
+# data for testing failures due to bad keys/values for disk parameters
+_FAIL_PARAMS = ["nonexistent:resync-rate=1",
+ "drbd:nonexistent=1",
+ "drbd:resync-rate=invalid",
+ ]
+
+
+def TestClusterInitDisk():
+ """gnt-cluster init -D"""
+ name = qa_config.get("name")
+ for param in _FAIL_PARAMS:
+ AssertCommand(["gnt-cluster", "init", "-D", param, name], fail=True)
+
+
def TestClusterInit(rapi_user, rapi_secret):
"""gnt-cluster init"""
master = qa_config.GetMasterNode()
cmd.append("--primary-ip-version=%d" %
qa_config.get("primary_ip_version", 4))
+ for spec_type in ("mem-size", "disk-size", "disk-count", "cpu-count",
+ "nic-count"):
+ for spec_val in ("min", "max", "std"):
+ spec = qa_config.get("ispec_%s_%s" %
+ (spec_type.replace('-', '_'), spec_val), None)
+ if spec:
+ cmd.append("--specs-%s=%s=%d" % (spec_type, spec_val, spec))
+
if master.get("secondary", None):
cmd.append("--secondary-ip=%s" % master["secondary"])
cmd.append("--enabled-hypervisors=%s" % htype)
cmd.append(qa_config.get("name"))
-
AssertCommand(cmd)
cmd = ["gnt-cluster", "modify"]
+
# hypervisor parameter modifications
hvp = qa_config.get("hypervisor-parameters", {})
for k, v in hvp.items():
AssertCommand(["gnt-debug", "test-jobqueue"])
+def TestDelay(node):
+ """gnt-debug delay"""
+ AssertCommand(["gnt-debug", "delay", "1"])
+ AssertCommand(["gnt-debug", "delay", "--no-master", "1"])
+ AssertCommand(["gnt-debug", "delay", "--no-master",
+ "-n", node["primary"], "1"])
+
+
def TestClusterReservedLvs():
"""gnt-cluster reserved lvs"""
for fail, cmd in [
AssertCommand(cmd, fail=fail)
+def TestClusterModifyDisk():
+ """gnt-cluster modify -D"""
+ for param in _FAIL_PARAMS:
+ AssertCommand(["gnt-cluster", "modify", "-D", param], fail=True)
+
+
def TestClusterModifyBe():
"""gnt-cluster modify -B"""
for fail, cmd in [
- # mem
- (False, ["gnt-cluster", "modify", "-B", "memory=256"]),
- (False, ["sh", "-c", "gnt-cluster info|grep '^ *memory: 256$'"]),
- (True, ["gnt-cluster", "modify", "-B", "memory=a"]),
- (False, ["gnt-cluster", "modify", "-B", "memory=128"]),
- (False, ["sh", "-c", "gnt-cluster info|grep '^ *memory: 128$'"]),
+ # max/min mem
+ (False, ["gnt-cluster", "modify", "-B", "maxmem=256"]),
+ (False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 256$'"]),
+ (False, ["gnt-cluster", "modify", "-B", "minmem=256"]),
+ (False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 256$'"]),
+ (True, ["gnt-cluster", "modify", "-B", "maxmem=a"]),
+ (False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 256$'"]),
+ (True, ["gnt-cluster", "modify", "-B", "minmem=a"]),
+ (False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 256$'"]),
+ (False, ["gnt-cluster", "modify", "-B", "maxmem=128,minmem=128"]),
+ (False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 128$'"]),
+ (False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 128$'"]),
# vcpus
(False, ["gnt-cluster", "modify", "-B", "vcpus=4"]),
(False, ["sh", "-c", "gnt-cluster info|grep '^ *vcpus: 4$'"]),
def _GetGenericAddParameters():
- params = ["-B", "%s=%s" % (constants.BE_MEMORY, qa_config.get("mem"))]
+ params = ["-B"]
+ params.append("%s=%s,%s=%s" % (constants.BE_MINMEM,
+ qa_config.get(constants.BE_MINMEM),
+ constants.BE_MAXMEM,
+ qa_config.get(constants.BE_MAXMEM)))
for idx, size in enumerate(qa_config.get("disk")):
params.extend(["--disk", "%s:size=%s" % (idx, size)])
return params
instance["name"]])
AssertCommand(["gnt-instance", "start", instance["name"]])
AssertCommand(cmd)
+ AssertCommand(["gnt-instance", "modify", "-B",
+ ("%s=%s" %
+ (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)),
+ instance["name"]])
+ AssertCommand(cmd, fail=True)
+ AssertCommand(["gnt-instance", "migrate", "--force", "--allow-failover",
+ instance["name"]])
+ AssertCommand(["gnt-instance", "modify", "-B",
+ ("%s=%s" %
+ (constants.BE_ALWAYS_FAILOVER, constants.VALUE_FALSE)),
+ instance["name"]])
+ AssertCommand(cmd)
def TestInstanceInfo(instance):
test_kernel = "/sbin/init"
test_initrd = test_kernel
- orig_memory = qa_config.get("mem")
+ orig_maxmem = qa_config.get(constants.BE_MAXMEM)
+ orig_minmem = qa_config.get(constants.BE_MINMEM)
#orig_bridge = qa_config.get("bridge", "xen-br0")
args = [
- ["-B", "%s=128" % constants.BE_MEMORY],
- ["-B", "%s=%s" % (constants.BE_MEMORY, orig_memory)],
+ ["-B", "%s=128" % constants.BE_MINMEM],
+ ["-B", "%s=128" % constants.BE_MAXMEM],
+ ["-B", "%s=%s,%s=%s" % (constants.BE_MINMEM, orig_minmem,
+ constants.BE_MAXMEM, orig_maxmem)],
["-B", "%s=2" % constants.BE_VCPUS],
["-B", "%s=1" % constants.BE_VCPUS],
["-B", "%s=%s" % (constants.BE_VCPUS, constants.VALUE_DEFAULT)],
+ ["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)],
+ ["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_DEFAULT)],
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, test_kernel)],
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, constants.VALUE_DEFAULT)],
AssertCommand(["gnt-node", "modify", "--master-candidate=yes",
"--auto-promote", node["primary"]])
+ # Test setting secondary IP address
+ AssertCommand(["gnt-node", "modify", "--secondary-ip=%s" % node["secondary"],
+ node["primary"]])
+
def _CreateOobScriptStructure():
"""Create a simple OOB handling script and its structure."""
"""Test adding a new instance via RAPI"""
instance = qa_config.AcquireInstance()
try:
- memory = utils.ParseUnit(qa_config.get("mem"))
disk_sizes = [utils.ParseUnit(size) for size in qa_config.get("disk")]
disks = [{"size": size} for size in disk_sizes]
nics = [{}]
beparams = {
- constants.BE_MEMORY: memory,
+ constants.BE_MAXMEM: utils.ParseUnit(qa_config.get(constants.BE_MAXMEM)),
+ constants.BE_MINMEM: utils.ParseUnit(qa_config.get(constants.BE_MINMEM)),
}
if use_client:
#!/bin/bash
#
-# Copyright (C) 2010 Google Inc.
+# Copyright (C) 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
exit 1
}
+if ! grep -q '^ENABLE_CONFD = ' lib/_autoconf.py; then
+ err "Please update $0, confd enable feature is missing"
+fi
+
+if grep -q '^ENABLE_CONFD = True' lib/_autoconf.py; then
+ DAEMONS="$(echo ganeti-{noded,masterd,rapi,confd})"
+ STOPDAEMONS="$(echo ganeti-{confd,rapi,masterd,noded})"
+else
+ DAEMONS="$(echo ganeti-{noded,masterd,rapi})"
+ STOPDAEMONS="$(echo ganeti-{rapi,masterd,noded})"
+fi
+
$daemon_util >/dev/null 2>&1 &&
err "daemon-util succeeded without command"
err "check-exitcode 11 (not master) didn't return 0"
tmp=$(echo $($daemon_util list-start-daemons))
-test "$tmp" == "$(echo ganeti-{noded,masterd,rapi,confd})" ||
+test "$tmp" == "$DAEMONS" ||
err "list-start-daemons didn't return correct list of daemons"
tmp=$(echo $($daemon_util list-stop-daemons))
-test "$tmp" == "$(echo ganeti-{confd,rapi,masterd,noded})" ||
+test "$tmp" == "$STOPDAEMONS" ||
err "list-stop-daemons didn't return correct list of daemons"
$daemon_util is-daemon-name >/dev/null 2>&1 &&
err "is-daemon-name thinks '$i' is a daemon name"
done
-for i in ganeti-{confd,rapi,masterd,noded}; do
+for i in $DAEMONS; do
$daemon_util is-daemon-name $i >/dev/null 2>&1 ||
err "is-daemon-name doesn't think '$i' is a daemon name"
done
--- /dev/null
+[instance]
+disk0_dump = rawdisk.raw
+nic0_mode = routed
+name = ganeti-test-xen
+hypervisor = xen-pvm
+disk_count = 1
+nic0_mac = aa:00:00:d8:2c:1e
+nic_count = 1
+nic0_link = br0
+nic0_ip = None
+disk0_ivname = disk/0
+disk0_size = 0
+
+[hypervisor]
+root-path = /dev/sda
+kernel_args = ro
+
+[export]
+version = 0
+os = lenny-image
+
+[os]
+
+[backend]
+auto_balance = False
+vcpus = 1
+memory = 512
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--Generated by VMware ovftool 2.0.1 (build-260188), User: , UTC time: 2011-08-17T15:12:11.715742Z-->
+<Envelope vmw:buildId="build-260188" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:cim="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <References>
+ <File ovf:href="other_disk.vmdk" ovf:id="file1" ovf:size="761627136"/>
+ </References>
+ <DiskSection>
+ <Info>Virtual disk information</Info>
+ <Disk ovf:capacity="16514" ovf:capacityAllocationUnits="byte * 2^20" ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" ovf:populatedSize="2042953728"/>
+ </DiskSection>
+ <NetworkSection>
+ <Info>The list of logical networks</Info>
+ <Network ovf:name="bridged">
+ <Description>The bridged network</Description>
+ </Network>
+ </NetworkSection>
+ <VirtualSystem ovf:id="vm">
+ <Info>A virtual machine</Info>
+ <Name>AyertiennaSUSE.x86_64-0.0.2</Name>
+ <OperatingSystemSection ovf:id="83" vmw:osType="suse64Guest">
+ <Info>The kind of installed guest operating system</Info>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ <Info>Virtual hardware requirements</Info>
+ <System>
+ <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemIdentifier>AyertiennaSUSE.x86_64-0.0.2</vssd:VirtualSystemIdentifier>
+ <vssd:VirtualSystemType>vmx-04</vssd:VirtualSystemType>
+ </System>
+ <Item>
+ <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+ <rasd:Description>Number of Virtual CPUs</rasd:Description>
+ <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:ElementName>512MB of memory</rasd:ElementName>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:VirtualQuantity>512</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>SCSI Controller</rasd:Description>
+ <rasd:ElementName>scsiController0</rasd:ElementName>
+ <rasd:InstanceID>4</rasd:InstanceID>
+ <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>
+ <rasd:ResourceType>6</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>IDE Controller</rasd:Description>
+ <rasd:ElementName>ideController0</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>
+ <rasd:InstanceID>8</rasd:InstanceID>
+ <rasd:Parent>4</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:AddressOnParent>2</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>bridged</rasd:Connection>
+ <rasd:Description>E1000 ethernet adapter on "bridged"</rasd:Description>
+ <rasd:ElementName>ethernet0</rasd:ElementName>
+ <rasd:InstanceID>9</rasd:InstanceID>
+ <rasd:ResourceSubType>E1000</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </Item>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope>
+
--- /dev/null
+[instance]
+[hypervisor]
+[export]
+[os]
+[backend]
\ No newline at end of file
--- /dev/null
+<?xml version="1.0"?>
+<Envelope ovf:version="1.0" xml:lang="en-US" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <References>
+ </References>
+ <DiskSection>
+ </DiskSection>
+ <NetworkSection>
+ </NetworkSection>
+ <VirtualSystem>
+ <Info>A virtual machine</Info>
+ <OperatingSystemSection>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope>
--- /dev/null
+SHA1(ganeti.ovf)= d298200d9044c54b0fde13efaa90e564badc5961
+SHA1(new_disk.vmdk)= 711c48f14c934228b8e117d036c913cdb9d63305
--- /dev/null
+<?xml version="1.0"?>
+<Envelope ovf:version="1.0" xml:lang="en-US" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:gnt="http://ganeti">
+ <References>
+ <File ovf:href="new_disk.vmdk" ovf:id="file1"/>
+ </References>
+ <DiskSection>
+ <Info>List of the virtual disks used in the package</Info>
+ <Disk ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/specifications/vmdk.html#sparse"/>
+ </DiskSection>
+ <gnt:GanetiSection>
+ <gnt:Version>0</gnt:Version>
+ <gnt:AutoBalance>False</gnt:AutoBalance>
+ <gnt:Tags></gnt:Tags>
+ <gnt:DiskTemplate>plain</gnt:DiskTemplate>
+ <gnt:OperatingSystem>
+ <gnt:Name>lenny-image</gnt:Name>
+ </gnt:OperatingSystem>
+ <gnt:Network>
+ <gnt:Nic ovf:name="routed">
+ <gnt:Mode>bridged</gnt:Mode>
+ <gnt:MACAddress>aa:00:00:d8:2c:1e</gnt:MACAddress>
+ <gnt:IPAddress>none</gnt:IPAddress>
+ <gnt:Link>xen-br0</gnt:Link>
+ </gnt:Nic>
+ </gnt:Network>
+ <gnt:Hypervisor>
+ <gnt:Name>xen-pvm</gnt:Name>
+ <gnt:Parameters>
+ <gnt:root-path>/dev/sda</gnt:root-path>
+ <gnt:kernel_args>ro</gnt:kernel_args>
+ </gnt:Parameters>
+ </gnt:Hypervisor>
+ </gnt:GanetiSection>
+ <NetworkSection>
+ <Info>Logical networks used in the package</Info>
+ <Network ovf:name="routed">
+ <Description>Logical network used by this appliance.</Description>
+ </Network>
+ </NetworkSection>
+ <VirtualSystem ovf:id="New-shiny-instance">
+ <Info>A virtual machine</Info>
+ <Name>ganeti-test-xen</Name>
+ <OperatingSystemSection ovf:id="93">
+ <Info>The kind of installed guest operating system</Info>
+ <Description>Ubuntu</Description>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ <Info>Virtual hardware requirements for a virtual machine</Info>
+ <System>
+ <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemIdentifier>Ubuntu-freshly-created</vssd:VirtualSystemIdentifier>
+ <vssd:VirtualSystemType>virtualbox-2.2</vssd:VirtualSystemType>
+ </System>
+ <Item>
+ <rasd:Caption>1 virtual CPU</rasd:Caption>
+ <rasd:ElementName>1 virtual CPU</rasd:ElementName>
+ <rasd:Description>Number of virtual CPUs</rasd:Description>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Caption>2048 MB of memory</rasd:Caption>
+ <rasd:ElementName>2048 MB of memory</rasd:ElementName>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:AllocationUnits>MegaBytes</rasd:AllocationUnits>
+ <rasd:VirtualQuantity>2048</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Caption>Ethernet adapter on 'NAT'</rasd:Caption>
+ <rasd:ElementName>Ethernet adapter on 'NAT'</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ <rasd:ResourceSubType>PCNet32</rasd:ResourceSubType>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection></rasd:Connection>
+ </Item>
+ <Item>
+ <rasd:Caption>disk1</rasd:Caption>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:Description>Disk Image</rasd:Description>
+ <rasd:InstanceID>7</rasd:InstanceID>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ <rasd:HostResource>/disk/vmdisk1</rasd:HostResource>
+ <rasd:Parent>3</rasd:Parent>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ </Item>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope>
--- /dev/null
+<?xml version="1.0"?>
+<Envelope ovf:version="1.0" xml:lang="en-US" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:gnt="http://ganeti">
+ <References>
+ <File ovf:href="compr_disk.vmdk.gz" ovf:compression="gzip" ovf:id="file1"/>
+ </References>
+ <DiskSection>
+ <Info>List of the virtual disks used in the package</Info>
+ <Disk ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/specifications/vmdk.html#sparse"/>
+ </DiskSection>
+ <gnt:GanetiSection>
+ <gnt:Version>0</gnt:Version>
+ <gnt:AutoBalance>False</gnt:AutoBalance>
+ <gnt:Tags></gnt:Tags>
+ <gnt:DiskTemplate>plain</gnt:DiskTemplate>
+ <gnt:OperatingSystem>
+ <gnt:Name>lenny-image</gnt:Name>
+ </gnt:OperatingSystem>
+ <gnt:Network>
+ <gnt:Nic ovf:name="routed">
+ <gnt:Mode>bridged</gnt:Mode>
+ <gnt:MACAddress>aa:00:00:d8:2c:1e</gnt:MACAddress>
+ <gnt:IPAddress>none</gnt:IPAddress>
+ <gnt:Link>xen-br0</gnt:Link>
+ </gnt:Nic>
+ </gnt:Network>
+ <gnt:Hypervisor>
+ <gnt:Name>xen-pvm</gnt:Name>
+ <gnt:Parameters>
+ <gnt:root-path>/dev/sda</gnt:root-path>
+ <gnt:kernel_args>ro</gnt:kernel_args>
+ </gnt:Parameters>
+ </gnt:Hypervisor>
+ </gnt:GanetiSection>
+ <NetworkSection>
+ <Info>Logical networks used in the package</Info>
+ <Network ovf:name="routed">
+ <Description>Logical network used by this appliance.</Description>
+ </Network>
+ </NetworkSection>
+ <VirtualSystem ovf:id="New-shiny-instance">
+ <Info>A virtual machine</Info>
+ <Name>ganeti-test-xen</Name>
+ <OperatingSystemSection ovf:id="93">
+ <Info>The kind of installed guest operating system</Info>
+ <Description>Ubuntu</Description>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ <Info>Virtual hardware requirements for a virtual machine</Info>
+ <System>
+ <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemIdentifier>Ubuntu-freshly-created</vssd:VirtualSystemIdentifier>
+ <vssd:VirtualSystemType>virtualbox-2.2</vssd:VirtualSystemType>
+ </System>
+ <Item>
+ <rasd:Caption>1 virtual CPU</rasd:Caption>
+ <rasd:ElementName>1 virtual CPU</rasd:ElementName>
+ <rasd:Description>Number of virtual CPUs</rasd:Description>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Caption>2048 MB of memory</rasd:Caption>
+ <rasd:ElementName>2048 MB of memory</rasd:ElementName>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:AllocationUnits>MegaBytes</rasd:AllocationUnits>
+ <rasd:VirtualQuantity>2048</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Caption>Ethernet adapter on 'NAT'</rasd:Caption>
+ <rasd:ElementName>Ethernet adapter on 'NAT'</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ <rasd:ResourceSubType>PCNet32</rasd:ResourceSubType>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection></rasd:Connection>
+ </Item>
+ <Item>
+ <rasd:Caption>disk1</rasd:Caption>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:Description>Disk Image</rasd:Description>
+ <rasd:InstanceID>7</rasd:InstanceID>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ <rasd:HostResource>/disk/vmdisk1</rasd:HostResource>
+ <rasd:Parent>3</rasd:Parent>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ </Item>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope>
--- /dev/null
+[instance]
+disk0_dump = iamnothere.raw
+nic0_mode = nic
+name = ganeti-test-xen
+disk_count = 1
+nic0_mac = aa:00:00:d8:2c:1e
+nic_count = 1
+nic0_link = xen-br0
+nic0_ip = None
+disk0_ivname = disk/0
+disk0_size = 0
+
+[hypervisor]
+root-path = /dev/sda
+kernel_args = ro
+
+[export]
+version = 0
+
+[os]
+
+[backend]
+auto_balance = False
--- /dev/null
+<?xml version="1.0"?>
+<Envelope ovf:version="1.0" xml:lang="en-US" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <References>
+ <File ovf:href="second_disk.vmdk" ovf:id="file2"/>
+ </References>
+ <DiskSection>
+ <Info>List of the virtual disks used in the package</Info>
+ <Disk ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/specifications/vmdk.html#sparse"/>
+ <Disk ovf:diskId="vmdisk2" ovf:fileRef="file2" ovf:format="http://www.vmware.com/specifications/vmdk.html#sparse"/>
+ </DiskSection>
+ <NetworkSection>
+ <Info>Logical networks used in the package</Info>
+ <Network ovf:name="NAT">
+ <Description>Logical network used by this appliance.</Description>
+ </Network>
+ </NetworkSection>
+ <VirtualSystem ovf:id="Ubuntu-freshly-created">
+ <Info>A virtual machine</Info>
+ <OperatingSystemSection ovf:id="93">
+ <Info>The kind of installed guest operating system</Info>
+ <Description>Ubuntu</Description>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ <Info>Virtual hardware requirements for a virtual machine</Info>
+ <System>
+ <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemIdentifier>Ubuntu-freshly-created</vssd:VirtualSystemIdentifier>
+ <vssd:VirtualSystemType>virtualbox-2.2</vssd:VirtualSystemType>
+ </System>
+ <Item>
+ <rasd:Caption>1 virtual CPU</rasd:Caption>
+ <rasd:ElementName>1 virtual CPU</rasd:ElementName>
+ <rasd:Description>Number of virtual CPUs</rasd:Description>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Caption>2048 MB of memory</rasd:Caption>
+ <rasd:ElementName>2048 MB of memory</rasd:ElementName>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:AllocationUnits>MegaBytes</rasd:AllocationUnits>
+ <rasd:VirtualQuantity>2048</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Caption>ideController0</rasd:Caption>
+ <rasd:ElementName>ideController0</rasd:ElementName>
+ <rasd:Description>IDE Controller</rasd:Description>
+ <rasd:InstanceID>3</rasd:InstanceID>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ <rasd:ResourceSubType>PIIX4</rasd:ResourceSubType>
+ <rasd:Address>1</rasd:Address>
+ </Item>
+ <Item>
+ <rasd:Caption>Ethernet adapter on 'NAT'</rasd:Caption>
+ <rasd:ElementName>Ethernet adapter on 'NAT'</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ <rasd:ResourceSubType>PCNet32</rasd:ResourceSubType>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>NAT</rasd:Connection>
+ </Item>
+ <Item>
+ <rasd:Caption>disk1</rasd:Caption>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:Description>Disk Image</rasd:Description>
+ <rasd:InstanceID>7</rasd:InstanceID>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ <rasd:HostResource>/disk/vmdisk1</rasd:HostResource>
+ <rasd:Parent>3</rasd:Parent>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ </Item>
+ <Item>
+ <rasd:Caption>disk1</rasd:Caption>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:Description>Disk Image</rasd:Description>
+ <rasd:InstanceID>9</rasd:InstanceID>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ <rasd:HostResource>/disk/vmdisk1</rasd:HostResource>
+ <rasd:Parent>3</rasd:Parent>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ </Item>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope>
--- /dev/null
+[instance]
+disk0_dump = rawdisk.raw
+nic0_mode = bridged
+name = ganeti-test-xen
+hypervisor = xen-pvm
+disk_count = 1
+nic0_mac = aa:00:00:d8:2c:1e
+nic_count = 1
+nic0_link = xen-br0
+nic0_ip = None
+disk0_ivname = disk/0
+disk0_size = 0
+
+[hypervisor]
+root-path = /dev/sda
+kernel_args = ro
+
+[export]
+version = 0
+
+[os]
+
+[backend]
+auto_balance = False
+vcpus = 1
+memory = 2048
--- /dev/null
+[instance]
+disk0_dump = other/rawdisk.raw
+nic0_mode = bridged
+name = ganeti-test-xen
+hypervisor = xen-pvm
+disk_count = 1
+nic0_mac = aa:00:00:d8:2c:1e
+nic_count = 1
+nic0_link = xen-br0
+nic0_ip = None
+disk0_ivname = disk/0
+disk0_size = 0
+
+[hypervisor]
+root-path = /dev/sda
+kernel_args = ro
+
+[export]
+version = 0
+os = lenny-image
+
+[os]
+
+[backend]
+auto_balance = False
+vcpus = 1
+memory = 2048
--- /dev/null
+<?xml version="1.0"?>
+<Envelope ovf:version="1.0" xml:lang="en-US" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <References>
+ <File ovf:href="new_disk.vmdk" ovf:id="file1"/>
+ <File ovf:href="second_disk.vmdk" ovf:id="file2"/>
+ </References>
+ <DiskSection>
+ <Info>List of the virtual disks used in the package</Info>
+ <Disk ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/specifications/vmdk.html#sparse"/>
+ <Disk ovf:diskId="vmdisk2" ovf:fileRef="file2" ovf:format="http://www.vmware.com/specifications/vmdk.html#sparse"/>
+ </DiskSection>
+ <NetworkSection>
+ <Info>Logical networks used in the package</Info>
+ <Network ovf:name="bridged">
+ <Description>Logical network used by this appliance.</Description>
+ </Network>
+ </NetworkSection>
+ <VirtualSystem ovf:id="Ubuntu-freshly-created">
+ <Info>A virtual machine</Info>
+ <OperatingSystemSection ovf:id="93">
+ <Info>The kind of installed guest operating system</Info>
+ <Description>Ubuntu</Description>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ <Info>Virtual hardware requirements for a virtual machine</Info>
+ <System>
+ <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemIdentifier>Ubuntu-freshly-created</vssd:VirtualSystemIdentifier>
+ <vssd:VirtualSystemType>virtualbox-2.2</vssd:VirtualSystemType>
+ </System>
+ <Item>
+ <rasd:Caption>1 virtual CPU</rasd:Caption>
+ <rasd:ElementName>1 virtual CPU</rasd:ElementName>
+ <rasd:Description>Number of virtual CPUs</rasd:Description>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Caption>2048 MB of memory</rasd:Caption>
+ <rasd:ElementName>2048 MB of memory</rasd:ElementName>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:AllocationUnits>MegaBytes</rasd:AllocationUnits>
+ <rasd:VirtualQuantity>2048</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Caption>ideController0</rasd:Caption>
+ <rasd:ElementName>ideController0</rasd:ElementName>
+ <rasd:Description>IDE Controller</rasd:Description>
+ <rasd:InstanceID>3</rasd:InstanceID>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ <rasd:ResourceSubType>PIIX4</rasd:ResourceSubType>
+ <rasd:Address>1</rasd:Address>
+ </Item>
+ <Item>
+ <rasd:Caption>Ethernet adapter on 'NAT'</rasd:Caption>
+ <rasd:ElementName>Ethernet adapter on 'NAT'</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ <rasd:ResourceSubType>PCNet32</rasd:ResourceSubType>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>bridged</rasd:Connection>
+ </Item>
+ <Item>
+ <rasd:Caption>disk1</rasd:Caption>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:Description>Disk Image</rasd:Description>
+ <rasd:InstanceID>7</rasd:InstanceID>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ <rasd:HostResource>/disk/vmdisk1</rasd:HostResource>
+ <rasd:Parent>3</rasd:Parent>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ </Item>
+ <Item>
+ <rasd:Caption>disk1</rasd:Caption>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:Description>Disk Image</rasd:Description>
+ <rasd:InstanceID>9</rasd:InstanceID>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ <rasd:HostResource>/disk/vmdisk1</rasd:HostResource>
+ <rasd:Parent>3</rasd:Parent>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ </Item>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope>
--- /dev/null
+It's just wrong
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--Generated by VMware ovftool 2.0.1 (build-260188), User: , UTC time: 2011-08-17T15:12:11.715742Z-->
+<Envelope vmw:buildId="build-260188" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:cim="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <References>
+ <File ovf:href="AyertiennaSUSE.x86_64-0.0.2-disk1.vmdk" ovf:id="file1" ovf:size="761627136"/>
+ </References>
+ <DiskSection>
+ <Info>Virtual disk information</Info>
+ <Disk ovf:capacity="16514" ovf:capacityAllocationUnits="byte * 2^20" ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" ovf:populatedSize="2042953728"/>
+ </DiskSection>
+ <NetworkSection>
+ <Info>The list of logical networks</Info>
+ <Network ovf:name="bridged">
+ <Description>The bridged network</Description>
+ </Network>
+ </NetworkSection>
+ <VirtualSystem ovf:id="vm">
+ <Info>A virtual machine</Info>
+ <Name>AyertiennaSUSE.x86_64-0.0.2</Name>
+ <OperatingSystemSection ovf:id="83" vmw:osType="suse64Guest">
+ <Info>The kind of installed guest operating system</Info>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ <Info>Virtual hardware requirements</Info>
+ <System>
+ <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemIdentifier>AyertiennaSUSE.x86_64-0.0.2</vssd:VirtualSystemIdentifier>
+ <vssd:VirtualSystemType>vmx-04</vssd:VirtualSystemType>
+ </System>
+ <Item>
+ <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+ <rasd:Description>Number of Virtual CPUs</rasd:Description>
+ <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:ElementName>512MB of memory</rasd:ElementName>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:VirtualQuantity>512</rasd:VirtualQuantity>
+ </Item>
+ <Item ovf:required="false">
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>USB Controller</rasd:Description>
+ <rasd:ElementName>usb</rasd:ElementName>
+ <rasd:InstanceID>3</rasd:InstanceID>
+ <rasd:ResourceType>23</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>SCSI Controller</rasd:Description>
+ <rasd:ElementName>scsiController0</rasd:ElementName>
+ <rasd:InstanceID>4</rasd:InstanceID>
+ <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>
+ <rasd:ResourceType>6</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>IDE Controller</rasd:Description>
+ <rasd:ElementName>ideController0</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ </Item>
+ <Item ovf:required="false">
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+ <rasd:Description>Floppy Drive</rasd:Description>
+ <rasd:ElementName>floppy0</rasd:ElementName>
+ <rasd:InstanceID>6</rasd:InstanceID>
+ <rasd:ResourceType>14</rasd:ResourceType>
+ </Item>
+ <Item ovf:required="false">
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+ <rasd:ElementName>cdrom1</rasd:ElementName>
+ <rasd:InstanceID>7</rasd:InstanceID>
+ <rasd:Parent>5</rasd:Parent>
+ <rasd:ResourceType>15</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>
+ <rasd:InstanceID>8</rasd:InstanceID>
+ <rasd:Parent>4</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:AddressOnParent>2</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>bridged</rasd:Connection>
+ <rasd:Description>E1000 ethernet adapter on "bridged"</rasd:Description>
+ <rasd:ElementName>ethernet0</rasd:ElementName>
+ <rasd:InstanceID>9</rasd:InstanceID>
+ <rasd:ResourceSubType>E1000</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </Item>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope>
--- /dev/null
+SHA1(new_disk.vmdk)= 0500304662fb8a6a7925b5a43bc0e05d6a03720d
+SHA1(wrong_manifest.ovf)= 0500304662fb8a6a7965b5a43bc0e05d6a03720d
--- /dev/null
+<?xml version="1.0"?>
+<Envelope ovf:version="1.0" xml:lang="en-US" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:gnt="http://ganeti">
+ <References>
+ <File ovf:href="new_disk.vmdk" ovf:id="file1"/>
+ </References>
+ <DiskSection>
+ <Info>List of the virtual disks used in the package</Info>
+ <Disk ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/specifications/vmdk.html#sparse"/>
+ </DiskSection>
+ <gnt:GanetiSection>
+ <gnt:VersionId>0</gnt:VersionId>
+ <gnt:AutoBalance>False</gnt:AutoBalance>
+ <gnt:Tags></gnt:Tags>
+ <gnt:OS>
+ <gnt:Name>lenny-image</gnt:Name>
+ </gnt:OS>
+ <gnt:Network>
+ <gnt:Mode>bridged</gnt:Mode>
+ <gnt:MACAddress>aa:00:00:d8:2c:1e</gnt:MACAddress>
+ <gnt:IPAddress>None</gnt:IPAddress>
+ <gnt:Link>xen-br0</gnt:Link>
+ </gnt:Network>
+ <gnt:Hypervisor>
+ <gnt:Name>xen-pvm</gnt:Name>
+ <gnt:Parameters>
+ <gnt:root-path>/dev/sda</gnt:root-path>
+ <gnt:kernel_args>ro</gnt:kernel_args>
+ </gnt:Parameters>
+ </gnt:Hypervisor>
+ </gnt:GanetiSection>
+ <NetworkSection>
+ <Info>Logical networks used in the package</Info>
+ <Network ovf:name="bridged network">
+ <Description>Logical network used by this appliance.</Description>
+ </Network>
+ </NetworkSection>
+ <VirtualSystem ovf:id="New-shiny-instance">
+ <Info>A virtual machine</Info>
+ <OperatingSystemSection ovf:id="93">
+ <Info>The kind of installed guest operating system</Info>
+ <Description>Ubuntu</Description>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ <Info>Virtual hardware requirements for a virtual machine</Info>
+ <System>
+ <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemIdentifier>Ubuntu-freshly-created</vssd:VirtualSystemIdentifier>
+ <vssd:VirtualSystemType>virtualbox-2.2</vssd:VirtualSystemType>
+ </System>
+ <Item>
+ <rasd:Caption>1 virtual CPU</rasd:Caption>
+ <rasd:ElementName>1 virtual CPU</rasd:ElementName>
+ <rasd:Description>Number of virtual CPUs</rasd:Description>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Caption>2048 MB of memory</rasd:Caption>
+ <rasd:ElementName>2048 MB of memory</rasd:ElementName>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:AllocationUnits>MegaBytes</rasd:AllocationUnits>
+ <rasd:VirtualQuantity>2048</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Caption>ideController0</rasd:Caption>
+ <rasd:ElementName>ideController0</rasd:ElementName>
+ <rasd:Description>IDE Controller</rasd:Description>
+ <rasd:InstanceID>3</rasd:InstanceID>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ <rasd:ResourceSubType>PIIX4</rasd:ResourceSubType>
+ <rasd:Address>1</rasd:Address>
+ </Item>
+ <Item>
+ <rasd:Caption>Ethernet adapter on 'NAT'</rasd:Caption>
+ <rasd:ElementName>Ethernet adapter on 'NAT'</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ <rasd:ResourceSubType>PCNet32</rasd:ResourceSubType>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>bridged network</rasd:Connection>
+ </Item>
+ <Item>
+ <rasd:Caption>disk1</rasd:Caption>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:Description>Disk Image</rasd:Description>
+ <rasd:InstanceID>7</rasd:InstanceID>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ <rasd:HostResource>/disk/vmdisk1</rasd:HostResource>
+ <rasd:Parent>3</rasd:Parent>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ </Item>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--Generated by VMware ovftool 2.0.1 (build-260188), User: , UTC time: 2011-08-17T15:12:11.715742Z-->
+<Envelope vmw:buildId="build-260188" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:cim="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <References>
+ <File ovf:href="AyertiennaSUSE.x86_64-0.0.2-disk1.vmdk" ovf:id="file1" ovf:size="761627136"/>
+ </References>
+ <DiskSection>
+ <Info>Virtual disk information</Info>
+ <Disk ovf:capacity="16514" ovf:capacityAllocationUnits="byte * 2^20" ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" ovf:populatedSize="2042953728"/>
+ </DiskSection>
+ <NetworkSection>
+ <Info>The list of logical networks</Info>
+ <Network ovf:name="bridged">
+ <Description>The bridged network</Description>
+ </Network>
+ </NetworkSection>
+ <VirtualSystem ovf:id="vm">
+ <Info>A virtual machine</Info>
+ <Name>AyertiennaSUSE.x86_64-0.0.2</Name>
+ <OperatingSystemSection ovf:id="83" vmw:osType="suse64Guest">
+ <Info>The kind of installed guest operating system</Info>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ <Info>Virtual hardware requirements</Info>
+ <System>
+ <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemIdentifier>AyertiennaSUSE.x86_64-0.0.2</vssd:VirtualSystemIdentifier>
+ <vssd:VirtualSystemType>vmx-04</vssd:VirtualSystemType>
+ </System>
+ <Item>
+ <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+ <rasd:Description>Number of Virtual CPUs</rasd:Description>
+ <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:ElementName>512MB of memory</rasd:ElementName>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:VirtualQuantity>512</rasd:VirtualQuantity>
+ </Item>
+ <Item ovf:required="false">
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>USB Controller</rasd:Description>
+ <rasd:ElementName>usb</rasd:ElementName>
+ <rasd:InstanceID>3</rasd:InstanceID>
+ <rasd:ResourceType>23</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>SCSI Controller</rasd:Description>
+ <rasd:ElementName>scsiController0</rasd:ElementName>
+ <rasd:InstanceID>4</rasd:InstanceID>
+ <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>
+ <rasd:ResourceType>6</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>IDE Controller</rasd:Description>
+ <rasd:ElementName>ideController0</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ </Item>
+ <Item ovf:required="false">
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+ <rasd:Description>Floppy Drive</rasd:Description>
+ <rasd:ElementName>floppy0</rasd:ElementName>
+ <rasd:InstanceID>6</rasd:InstanceID>
+ <rasd:ResourceType>14</rasd:ResourceType>
+ </Item>
+ <Item ovf:required="false">
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+ <rasd:ElementName>cdrom1</rasd:ElementName>
+ <rasd:InstanceID>7</rasd:InstanceID>
+ <rasd:Parent>5</rasd:Parent>
+ <rasd:ResourceType>15</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>
+ <rasd:InstanceID>8</rasd:InstanceID>
+ <rasd:Parent>4</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:AddressOnParent>2</rasd:AddressOnParent>
+ <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Connection>bridged</rasd:Connection>
+ <rasd:Description>E1000 ethernet adapter on "bridged"</rasd:Description>
+ <rasd:ElementName>ethernet0</rasd:ElementName>
+ <rasd:InstanceID>9</rasd:InstanceID>
+ <rasd:ResourceSubType>E1000</rasd:ResourceSubType>
+ <rasd:ResourceType>10</rasd:ResourceType>
+ </Item>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--Generated by VMware ovftool 2.0.1 (build-260188), User: , UTC time: 2011-08-17T15:12:11.715742Z-->
+<Envelope vmw:buildId="build-260188" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:cim="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <References>
+ <File ovf:href="AyertiennaSUSE.x86_64-0.0.2-disk1.vmdk" ovf:id="file1" ovf:size="761627136"/>
+ </References>
+ <DiskSection>
+ <Info>Virtual disk information</Info>
+ <Disk ovf:capacity="16514" ovf:capacityAllocationUnits="byte * 2^20" ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" ovf:populatedSize="2042953728"/>
+ </DiskSection>
+ <NetworkSection>
+ <Info>The list of logical networks</Info>
+ <Network ovf:name="bridged">
+ <Description>The bridged network</Description>
+ </Network>
+ </NetworkSection>
+ <VirtualSystem ovf:id="vm">
+ <Info>A virtual machine</Info>
+ <Name>AyertiennaSUSE.x86_64-0.0.2</Name>
+ <OperatingSystemSection ovf:id="83" vmw:osType="suse64Guest">
+ <Info>The kind of installed guest operating system</Info>
+ </OperatingSystemSection>
+ <VirtualHardwareSection>
+ <Info>Virtual hardware requirements</Info>
+ <System>
+ <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
+ <vssd:InstanceID>0</vssd:InstanceID>
+ <vssd:VirtualSystemIdentifier>AyertiennaSUSE.x86_64-0.0.2</vssd:VirtualSystemIdentifier>
+ <vssd:VirtualSystemType>vmx-04</vssd:VirtualSystemType>
+ </System>
+ <Item>
+ <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+ <rasd:Description>Number of Virtual CPUs</rasd:Description>
+ <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>
+ <rasd:InstanceID>1</rasd:InstanceID>
+ <rasd:ResourceType>3</rasd:ResourceType>
+ <rasd:VirtualQuantity>1</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+ <rasd:Description>Memory Size</rasd:Description>
+ <rasd:ElementName>512MB of memory</rasd:ElementName>
+ <rasd:InstanceID>2</rasd:InstanceID>
+ <rasd:ResourceType>4</rasd:ResourceType>
+ <rasd:VirtualQuantity>512</rasd:VirtualQuantity>
+ </Item>
+ <Item>
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>SCSI Controller</rasd:Description>
+ <rasd:ElementName>scsiController0</rasd:ElementName>
+ <rasd:InstanceID>4</rasd:InstanceID>
+ <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>
+ <rasd:ResourceType>6</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:Address>0</rasd:Address>
+ <rasd:Description>IDE Controller</rasd:Description>
+ <rasd:ElementName>ideController0</rasd:ElementName>
+ <rasd:InstanceID>5</rasd:InstanceID>
+ <rasd:ResourceType>5</rasd:ResourceType>
+ </Item>
+ <Item>
+ <rasd:AddressOnParent>0</rasd:AddressOnParent>
+ <rasd:ElementName>disk1</rasd:ElementName>
+ <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>
+ <rasd:InstanceID>8</rasd:InstanceID>
+ <rasd:Parent>4</rasd:Parent>
+ <rasd:ResourceType>17</rasd:ResourceType>
+ </Item>
+ <Item
+ </Item>
+ </VirtualHardwareSection>
+ </VirtualSystem>
+</Envelope>
+
import unittest
import re
+import itertools
+import operator
from ganeti import _autoconf
from ganeti import utils
from ganeti import build
from ganeti import compat
from ganeti import mcpu
+from ganeti import opcodes
+from ganeti import constants
+from ganeti.rapi import baserlib
+from ganeti.rapi import rlib2
from ganeti.rapi import connector
import testutils
VALID_URI_RE = re.compile(r"^[-/a-z0-9]*$")
-
-class TestDocs(unittest.TestCase):
- """Documentation tests"""
-
- @staticmethod
- def _ReadDocFile(filename):
- return utils.ReadFile("%s/doc/%s" %
- (testutils.GetSourceDir(), filename))
-
- def testHookDocs(self):
+RAPI_OPCODE_EXCLUDE = frozenset([
+ # Not yet implemented
+ opcodes.OpBackupQuery,
+ opcodes.OpBackupRemove,
+ opcodes.OpClusterConfigQuery,
+ opcodes.OpClusterRepairDiskSizes,
+ opcodes.OpClusterVerify,
+ opcodes.OpClusterVerifyDisks,
+ opcodes.OpInstanceChangeGroup,
+ opcodes.OpInstanceMove,
+ opcodes.OpNodeQueryvols,
+ opcodes.OpOobCommand,
+ opcodes.OpTagsSearch,
+ opcodes.OpClusterActivateMasterIp,
+ opcodes.OpClusterDeactivateMasterIp,
+
+ # Difficult if not impossible
+ opcodes.OpClusterDestroy,
+ opcodes.OpClusterPostInit,
+ opcodes.OpClusterRename,
+ opcodes.OpNodeAdd,
+ opcodes.OpNodeRemove,
+
+ # Helper opcodes (e.g. submitted by LUs)
+ opcodes.OpClusterVerifyConfig,
+ opcodes.OpClusterVerifyGroup,
+ opcodes.OpGroupEvacuate,
+ opcodes.OpGroupVerifyDisks,
+
+ # Test opcodes
+ opcodes.OpTestAllocator,
+ opcodes.OpTestDelay,
+ opcodes.OpTestDummy,
+ opcodes.OpTestJqueue,
+ ])
+
+
+def _ReadDocFile(filename):
+ return utils.ReadFile("%s/doc/%s" %
+ (testutils.GetSourceDir(), filename))
+
+
+class TestHooksDocs(unittest.TestCase):
+ def test(self):
"""Check whether all hooks are documented.
"""
- hooksdoc = self._ReadDocFile("hooks.rst")
+ hooksdoc = _ReadDocFile("hooks.rst")
# Reverse mapping from LU to opcode
lu2opcode = dict((lu, op)
msg=("Missing documentation for hook %s/%s" %
(lucls.HTYPE, lucls.HPATH)))
+
+class TestRapiDocs(unittest.TestCase):
def _CheckRapiResource(self, uri, fixup, handler):
docline = "%s resource." % uri
self.assertEqual(handler.__doc__.splitlines()[0].strip(), docline,
self.assertTrue(VALID_URI_RE.match(uri), msg="Invalid URI %r" % uri)
- def testRapiDocs(self):
+ def test(self):
"""Check whether all RAPI resources are documented.
"""
- rapidoc = self._ReadDocFile("rapi.rst")
+ rapidoc = _ReadDocFile("rapi.rst")
node_name = re.escape("[node_name]")
instance_name = re.escape("[instance_name]")
msg=("URIs matched by more than one resource: %s" %
utils.CommaJoin(uri_dups)))
+ self._FindRapiMissing(resources.values())
+ self._CheckTagHandlers(resources.values())
+
+ def _FindRapiMissing(self, handlers):
+ used = frozenset(itertools.chain(*map(baserlib.GetResourceOpcodes,
+ handlers)))
+
+ unexpected = used & RAPI_OPCODE_EXCLUDE
+ self.assertFalse(unexpected,
+ msg=("Found RAPI resources for excluded opcodes: %s" %
+ utils.CommaJoin(_GetOpIds(unexpected))))
+
+ missing = (frozenset(opcodes.OP_MAPPING.values()) - used -
+ RAPI_OPCODE_EXCLUDE)
+ self.assertFalse(missing,
+ msg=("Missing RAPI resources for opcodes: %s" %
+ utils.CommaJoin(_GetOpIds(missing))))
+
+ def _CheckTagHandlers(self, handlers):
+ tag_handlers = filter(lambda x: issubclass(x, rlib2._R_Tags), handlers)
+ self.assertEqual(frozenset(map(operator.attrgetter("TAG_LEVEL"),
+ tag_handlers)),
+ constants.VALID_TAG_TYPES)
+
+
+def _GetOpIds(ops):
+ """Returns C{OP_ID} for all opcodes in passed sequence.
+
+ """
+ return sorted(opcls.OP_ID for opcls in ops)
+
class TestManpages(unittest.TestCase):
"""Manpage tests"""
import shutil
try:
- # pylint: disable-msg=E0611
+ # pylint: disable=E0611
from pyinotify import pyinotify
except ImportError:
import pyinotify
from ganeti import bdev
from ganeti import errors
+from ganeti import constants
import testutils
"remote_addr" not in result),
"Should not find network info")
+ def testBarriersOptions(self):
+ """Test class method that generates drbdsetup options for disk barriers"""
+ # Tests that should fail because of wrong version/options combinations
+ should_fail = [
+ (8, 0, 12, "bfd", True),
+ (8, 0, 12, "fd", False),
+ (8, 0, 12, "b", True),
+ (8, 2, 7, "bfd", True),
+ (8, 2, 7, "b", True)
+ ]
+
+ for vmaj, vmin, vrel, opts, meta in should_fail:
+ self.assertRaises(errors.BlockDeviceError,
+ bdev.DRBD8._ComputeDiskBarrierArgs,
+ vmaj, vmin, vrel, opts, meta)
+
+ # get the valid options from the frozenset(frozenset()) in constants.
+ valid_options = [list(x)[0] for x in constants.DRBD_VALID_BARRIER_OPT]
+
+ # Versions that do not support anything
+ for vmaj, vmin, vrel in ((8, 0, 0), (8, 0, 11), (8, 2, 6)):
+ for opts in valid_options:
+ self.assertRaises(errors.BlockDeviceError,
+ bdev.DRBD8._ComputeDiskBarrierArgs,
+ vmaj, vmin, vrel, opts, True)
+
+ # Versions with partial support (testing only options that are supported)
+ tests = [
+ (8, 0, 12, "n", False, []),
+ (8, 0, 12, "n", True, ["--no-md-flushes"]),
+ (8, 2, 7, "n", False, []),
+ (8, 2, 7, "fd", False, ["--no-disk-flushes", "--no-disk-drain"]),
+ (8, 0, 12, "n", True, ["--no-md-flushes"]),
+ ]
+
+ # Versions that support everything
+ for vmaj, vmin, vrel in ((8, 3, 0), (8, 3, 12)):
+ tests.append((vmaj, vmin, vrel, "bfd", True,
+ ["--no-disk-barrier", "--no-disk-drain",
+ "--no-disk-flushes", "--no-md-flushes"]))
+ tests.append((vmaj, vmin, vrel, "n", False, []))
+ tests.append((vmaj, vmin, vrel, "b", True,
+ ["--no-disk-barrier", "--no-md-flushes"]))
+ tests.append((vmaj, vmin, vrel, "fd", False,
+ ["--no-disk-flushes", "--no-disk-drain"]))
+ tests.append((vmaj, vmin, vrel, "n", True, ["--no-md-flushes"]))
+
+ # Test execution
+ for test in tests:
+ vmaj, vmin, vrel, disabled_barriers, disable_meta_flush, expected = test
+ args = \
+ bdev.DRBD8._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
+ disabled_barriers,
+ disable_meta_flush)
+ self.failUnless(set(args) == set(expected),
+ "For test %s, got wrong results %s" % (test, args))
+
+ # Unsupported or invalid versions
+ for vmaj, vmin, vrel in ((0, 7, 25), (9, 0, 0), (7, 0, 0), (8, 4, 0)):
+ self.assertRaises(errors.BlockDeviceError,
+ bdev.DRBD8._ComputeDiskBarrierArgs,
+ vmaj, vmin, vrel, "n", True)
+
+ # Invalid options
+ for option in ("", "c", "whatever", "nbdfc", "nf"):
+ self.assertRaises(errors.BlockDeviceError,
+ bdev.DRBD8._ComputeDiskBarrierArgs,
+ 8, 3, 11, option, True)
+
class TestDRBD8Status(testutils.GanetiTestCase):
"""Testing case for DRBD8 /proc status"""
def CountPending(self):
return len(self._query)
- def Query(self, res, fields, filter_):
+ def Query(self, res, fields, qfilter):
if res != constants.QR_NODE:
raise Exception("Querying wrong resource")
if exp_fields != fields:
raise Exception("Expected fields %s, got %s" % (exp_fields, fields))
- if not (filter_ is None or check_filter(filter_)):
+ if not (qfilter is None or check_filter(qfilter)):
raise Exception("Filter doesn't match expectations")
return objects.QueryResponse(fields=None, data=result)
def testNoMaster(self):
cl = self._FakeClient()
- def _CheckFilter(filter_):
- self.assertEqual(filter_, [qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
+ def _CheckFilter(qfilter):
+ self.assertEqual(qfilter, [qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
return True
cl.AddQueryResult(["name", "offline", "sip"], _CheckFilter, [
def testNoMasterFilterNodeName(self):
cl = self._FakeClient()
- def _CheckFilter(filter_):
- self.assertEqual(filter_,
+ def _CheckFilter(qfilter):
+ self.assertEqual(qfilter,
[qlang.OP_AND,
[qlang.OP_OR] + [[qlang.OP_EQUAL, "name", name]
for name in ["node2", "node3"]],
def testNodeGroup(self):
cl = self._FakeClient()
- def _CheckFilter(filter_):
- self.assertEqual(filter_,
+ def _CheckFilter(qfilter):
+ self.assertEqual(qfilter,
[qlang.OP_OR, [qlang.OP_EQUAL, "group", "foobar"],
[qlang.OP_EQUAL, "group.uuid", "foobar"]])
return True
class TestClusterVerifyFiles(unittest.TestCase):
@staticmethod
def _FakeErrorIf(errors, cond, ecode, item, msg, *args, **kwargs):
- assert ((ecode == cmdlib.LUClusterVerifyGroup.ENODEFILECHECK and
+ assert ((ecode == constants.CV_ENODEFILECHECK and
ht.TNonEmptyString(item)) or
- (ecode == cmdlib.LUClusterVerifyGroup.ECLUSTERFILECHECK and
+ (ecode == constants.CV_ECLUSTERFILECHECK and
item is None))
if args:
self.assertFalse(lu.warning_log)
+class TestUpdateAndVerifySubDict(unittest.TestCase):
+ def setUp(self):
+ self.type_check = {
+ "a": constants.VTYPE_INT,
+ "b": constants.VTYPE_STRING,
+ "c": constants.VTYPE_BOOL,
+ "d": constants.VTYPE_STRING,
+ }
+
+ def test(self):
+ old_test = {
+ "foo": {
+ "d": "blubb",
+ "a": 321,
+ },
+ "baz": {
+ "a": 678,
+ "b": "678",
+ "c": True,
+ },
+ }
+ test = {
+ "foo": {
+ "a": 123,
+ "b": "123",
+ "c": True,
+ },
+ "bar": {
+ "a": 321,
+ "b": "321",
+ "c": False,
+ },
+ }
+
+ mv = {
+ "foo": {
+ "a": 123,
+ "b": "123",
+ "c": True,
+ "d": "blubb"
+ },
+ "bar": {
+ "a": 321,
+ "b": "321",
+ "c": False,
+ },
+ "baz": {
+ "a": 678,
+ "b": "678",
+ "c": True,
+ },
+ }
+
+ verified = cmdlib._UpdateAndVerifySubDict(old_test, test, self.type_check)
+ self.assertEqual(verified, mv)
+
+ def testWrong(self):
+ test = {
+ "foo": {
+ "a": "blubb",
+ "b": "123",
+ "c": True,
+ },
+ "bar": {
+ "a": 321,
+ "b": "321",
+ "c": False,
+ },
+ }
+
+ self.assertRaises(errors.TypeEnforcementError,
+ cmdlib._UpdateAndVerifySubDict, {}, test, self.type_check)
+
+
+class TestHvStateHelper(unittest.TestCase):
+ def testWithoutOpData(self):
+ self.assertEqual(cmdlib._MergeAndVerifyHvState(None, NotImplemented), None)
+
+ def testWithoutOldData(self):
+ new = {
+ constants.HT_XEN_PVM: {
+ constants.HVST_MEMORY_TOTAL: 4096,
+ },
+ }
+ self.assertEqual(cmdlib._MergeAndVerifyHvState(new, None), new)
+
+ def testWithWrongHv(self):
+ new = {
+ "i-dont-exist": {
+ constants.HVST_MEMORY_TOTAL: 4096,
+ },
+ }
+ self.assertRaises(errors.OpPrereqError, cmdlib._MergeAndVerifyHvState, new,
+ None)
+
+class TestDiskStateHelper(unittest.TestCase):
+ def testWithoutOpData(self):
+ self.assertEqual(cmdlib._MergeAndVerifyDiskState(None, NotImplemented),
+ None)
+
+ def testWithoutOldData(self):
+ new = {
+ constants.LD_LV: {
+ "xenvg": {
+ constants.DS_DISK_RESERVED: 1024,
+ },
+ },
+ }
+ self.assertEqual(cmdlib._MergeAndVerifyDiskState(new, None), new)
+
+ def testWithWrongStorageType(self):
+ new = {
+ "i-dont-exist": {
+ "xenvg": {
+ constants.DS_DISK_RESERVED: 1024,
+ },
+ },
+ }
+ self.assertRaises(errors.OpPrereqError, cmdlib._MergeAndVerifyDiskState,
+ new, None)
+
+
if __name__ == "__main__":
testutils.GanetiTestProgram()
import unittest
import re
+import itertools
from ganeti import constants
from ganeti import locking
+from ganeti import utils
import testutils
self.failUnless(constants.OP_PRIO_NORMAL > constants.OP_PRIO_HIGH)
self.failUnless(constants.OP_PRIO_HIGH > constants.OP_PRIO_HIGHEST)
+ def testDiskDefaults(self):
+ self.failUnless(set(constants.DISK_LD_DEFAULTS.keys()) ==
+ constants.LOGICAL_DISK_TYPES)
+ self.failUnless(set(constants.DISK_DT_DEFAULTS.keys()) ==
+ constants.DISK_TEMPLATES)
+
+
+class TestExportedNames(unittest.TestCase):
+ _VALID_NAME_RE = re.compile(r"^[A-Z][A-Z0-9_]+$")
+ _BUILTIN_NAME_RE = re.compile(r"^__\w+__$")
+ _EXCEPTIONS = frozenset([
+ "SplitVersion",
+ "BuildVersion",
+ ])
+
+ def test(self):
+ wrong = \
+ set(itertools.ifilterfalse(self._BUILTIN_NAME_RE.match,
+ itertools.ifilterfalse(self._VALID_NAME_RE.match,
+ dir(constants))))
+ wrong -= self._EXCEPTIONS
+ self.assertFalse(wrong,
+ msg=("Invalid names exported from constants module: %s" %
+ utils.CommaJoin(sorted(wrong))))
+
class TestParameterNames(unittest.TestCase):
"""HV/BE parameter tests"""
import unittest
import time
import tempfile
+import pycurl
+import itertools
+import threading
from cStringIO import StringIO
from ganeti import http
+from ganeti import compat
import ganeti.http.server
import ganeti.http.client
self.assertEqual(cr.headers, [])
self.assertEqual(cr.url, "https://localhost:1234/version")
+ def testPlainAddressIPv4(self):
+ cr = http.client.HttpClientRequest("192.0.2.9", 19956, "GET", "/version")
+ self.assertEqual(cr.url, "https://192.0.2.9:19956/version")
+
+ def testPlainAddressIPv6(self):
+ cr = http.client.HttpClientRequest("2001:db8::cafe", 15110, "GET", "/info")
+ self.assertEqual(cr.url, "https://[2001:db8::cafe]:15110/info")
+
def testOldStyleHeaders(self):
headers = {
"Content-type": "text/plain",
cr = http.client.HttpClientRequest("localhost", 1234, "GET", "/version")
self.assertEqual(cr.post_data, "")
- def testIdentity(self):
- # These should all use different connections, hence also have a different
- # identity
- cr1 = http.client.HttpClientRequest("localhost", 1234, "GET", "/version")
- cr2 = http.client.HttpClientRequest("localhost", 9999, "GET", "/version")
- cr3 = http.client.HttpClientRequest("node1", 1234, "GET", "/version")
- cr4 = http.client.HttpClientRequest("node1", 9999, "GET", "/version")
+ def testCompletionCallback(self):
+ for argname in ["completion_cb", "curl_config_fn"]:
+ kwargs = {
+ argname: NotImplementedError,
+ }
+ cr = http.client.HttpClientRequest("localhost", 14038, "GET", "/version",
+ **kwargs)
+ self.assertEqual(getattr(cr, argname), NotImplementedError)
+
+ for fn in [NotImplemented, {}, 1]:
+ kwargs = {
+ argname: fn,
+ }
+ self.assertRaises(AssertionError, http.client.HttpClientRequest,
+ "localhost", 23150, "GET", "/version", **kwargs)
+
+
+class _FakeCurl:
+ def __init__(self):
+ self.opts = {}
+ self.info = NotImplemented
+
+ def setopt(self, opt, value):
+ assert opt not in self.opts, "Option set more than once"
+ self.opts[opt] = value
+
+ def getinfo(self, info):
+ return self.info.pop(info)
+
+
+class TestClientStartRequest(unittest.TestCase):
+ @staticmethod
+ def _TestCurlConfig(curl):
+ curl.setopt(pycurl.SSLKEYTYPE, "PEM")
+
+ def test(self):
+ for method in [http.HTTP_GET, http.HTTP_PUT, "CUSTOM"]:
+ for port in [8761, 29796, 19528]:
+ for curl_config_fn in [None, self._TestCurlConfig]:
+ for read_timeout in [None, 0, 1, 123, 36000]:
+ self._TestInner(method, port, curl_config_fn, read_timeout)
+
+ def _TestInner(self, method, port, curl_config_fn, read_timeout):
+ for response_code in [http.HTTP_OK, http.HttpNotFound.code,
+ http.HTTP_NOT_MODIFIED]:
+ for response_body in [None, "Hello World",
+ "Very Long\tContent here\n" * 171]:
+ for errmsg in [None, "error"]:
+ req = http.client.HttpClientRequest("localhost", port, method,
+ "/version",
+ curl_config_fn=curl_config_fn,
+ read_timeout=read_timeout)
+ curl = _FakeCurl()
+ pending = http.client._StartRequest(curl, req)
+ self.assertEqual(pending.GetCurlHandle(), curl)
+ self.assertEqual(pending.GetCurrentRequest(), req)
+
+ # Check options
+ opts = curl.opts
+ self.assertEqual(opts.pop(pycurl.CUSTOMREQUEST), method)
+ self.assertEqual(opts.pop(pycurl.URL),
+ "https://localhost:%s/version" % port)
+ if read_timeout is None:
+ self.assertEqual(opts.pop(pycurl.TIMEOUT), 0)
+ else:
+ self.assertEqual(opts.pop(pycurl.TIMEOUT), read_timeout)
+ self.assertFalse(opts.pop(pycurl.VERBOSE))
+ self.assertTrue(opts.pop(pycurl.NOSIGNAL))
+ self.assertEqual(opts.pop(pycurl.USERAGENT),
+ http.HTTP_GANETI_VERSION)
+ self.assertEqual(opts.pop(pycurl.PROXY), "")
+ self.assertFalse(opts.pop(pycurl.POSTFIELDS))
+ self.assertFalse(opts.pop(pycurl.HTTPHEADER))
+ write_fn = opts.pop(pycurl.WRITEFUNCTION)
+ self.assertTrue(callable(write_fn))
+ if hasattr(pycurl, "SSL_SESSIONID_CACHE"):
+ self.assertFalse(opts.pop(pycurl.SSL_SESSIONID_CACHE))
+ if curl_config_fn:
+ self.assertEqual(opts.pop(pycurl.SSLKEYTYPE), "PEM")
+ else:
+ self.assertFalse(pycurl.SSLKEYTYPE in opts)
+ self.assertFalse(opts)
+
+ if response_body is not None:
+ offset = 0
+ while offset < len(response_body):
+ piece = response_body[offset:offset + 10]
+ write_fn(piece)
+ offset += len(piece)
+
+ curl.info = {
+ pycurl.RESPONSE_CODE: response_code,
+ }
+
+ # Finalize request
+ pending.Done(errmsg)
+
+ self.assertFalse(curl.info)
+
+ # Can only finalize once
+ self.assertRaises(AssertionError, pending.Done, True)
+
+ if errmsg:
+ self.assertFalse(req.success)
+ else:
+ self.assertTrue(req.success)
+ self.assertEqual(req.error, errmsg)
+ self.assertEqual(req.resp_status_code, response_code)
+ if response_body is None:
+ self.assertEqual(req.resp_body, "")
+ else:
+ self.assertEqual(req.resp_body, response_body)
+
+ # Check if resetting worked
+ assert not hasattr(curl, "reset")
+ opts = curl.opts
+ self.assertFalse(opts.pop(pycurl.POSTFIELDS))
+ self.assertTrue(callable(opts.pop(pycurl.WRITEFUNCTION)))
+ self.assertFalse(opts)
+
+ self.assertFalse(curl.opts,
+ msg="Previous checks did not consume all options")
+ assert id(opts) == id(curl.opts)
+
+ def _TestWrongTypes(self, *args, **kwargs):
+ req = http.client.HttpClientRequest(*args, **kwargs)
+ self.assertRaises(AssertionError, http.client._StartRequest,
+ _FakeCurl(), req)
+
+ def testWrongHostType(self):
+ self._TestWrongTypes(unicode("localhost"), 8080, "GET", "/version")
+
+ def testWrongUrlType(self):
+ self._TestWrongTypes("localhost", 8080, "GET", unicode("/version"))
+
+ def testWrongMethodType(self):
+ self._TestWrongTypes("localhost", 8080, unicode("GET"), "/version")
+
+ def testWrongHeaderType(self):
+ self._TestWrongTypes("localhost", 8080, "GET", "/version",
+ headers={
+ unicode("foo"): "bar",
+ })
+
+ def testWrongPostDataType(self):
+ self._TestWrongTypes("localhost", 8080, "GET", "/version",
+ post_data=unicode("verylongdata" * 100))
+
+
+class _EmptyCurlMulti:
+ def perform(self):
+ return (pycurl.E_MULTI_OK, 0)
- self.assertEqual(len(set([cr1.identity, cr2.identity,
- cr3.identity, cr4.identity])), 4)
+ def info_read(self):
+ return (0, [], [])
- # But this one should have the same
- cr1vglist = http.client.HttpClientRequest("localhost", 1234,
- "GET", "/vg_list")
- self.assertEqual(cr1.identity, cr1vglist.identity)
+class TestClientProcessRequests(unittest.TestCase):
+ def testEmpty(self):
+ requests = []
+ http.client.ProcessRequests(requests, _curl=NotImplemented,
+ _curl_multi=_EmptyCurlMulti)
+ self.assertEqual(requests, [])
+
+
+class TestProcessCurlRequests(unittest.TestCase):
+ class _FakeCurlMulti:
+ def __init__(self):
+ self.handles = []
+ self.will_fail = []
+ self._expect = ["perform"]
+ self._counter = itertools.count()
+
+ def add_handle(self, curl):
+ assert curl not in self.handles
+ self.handles.append(curl)
+ if self._counter.next() % 3 == 0:
+ self.will_fail.append(curl)
+
+ def remove_handle(self, curl):
+ self.handles.remove(curl)
+
+ def perform(self):
+ assert self._expect.pop(0) == "perform"
+
+ if self._counter.next() % 2 == 0:
+ self._expect.append("perform")
+ return (pycurl.E_CALL_MULTI_PERFORM, None)
+
+ self._expect.append("info_read")
+
+ return (pycurl.E_MULTI_OK, len(self.handles))
+
+ def info_read(self):
+ assert self._expect.pop(0) == "info_read"
+ successful = []
+ failed = []
+ if self.handles:
+ if self._counter.next() % 17 == 0:
+ curl = self.handles[0]
+ if curl in self.will_fail:
+ failed.append((curl, -1, "test error"))
+ else:
+ successful.append(curl)
+ remaining_messages = len(self.handles) % 3
+ if remaining_messages > 0:
+ self._expect.append("info_read")
+ else:
+ self._expect.append("select")
+ else:
+ remaining_messages = 0
+ self._expect.append("select")
+ return (remaining_messages, successful, failed)
+
+ def select(self, timeout):
+ # Never compare floats for equality
+ assert timeout >= 0.95 and timeout <= 1.05
+ assert self._expect.pop(0) == "select"
+ self._expect.append("perform")
-class TestClient(unittest.TestCase):
def test(self):
- pool = http.client.HttpClientPool(None)
- self.assertFalse(pool._pool)
+ requests = [_FakeCurl() for _ in range(10)]
+ multi = self._FakeCurlMulti()
+ for (curl, errmsg) in http.client._ProcessCurlRequests(multi, requests):
+ self.assertTrue(curl not in multi.handles)
+ if curl in multi.will_fail:
+ self.assertTrue("test error" in errmsg)
+ else:
+ self.assertTrue(errmsg is None)
+ self.assertFalse(multi.handles)
+ self.assertEqual(multi._expect, ["select"])
+
+
+class TestProcessRequests(unittest.TestCase):
+ class _DummyCurlMulti:
+ pass
+
+ def testNoMonitor(self):
+ self._Test(False)
+
+ def testWithMonitor(self):
+ self._Test(True)
+
+ class _MonitorChecker:
+ def __init__(self):
+ self._monitor = None
+
+ def GetMonitor(self):
+ return self._monitor
+
+ def __call__(self, monitor):
+ assert callable(monitor.GetLockInfo)
+ self._monitor = monitor
+
+ def _Test(self, use_monitor):
+ def cfg_fn(port, curl):
+ curl.opts["__port__"] = port
+
+ def _LockCheckReset(monitor, req):
+ self.assertTrue(monitor._lock.is_owned(shared=0),
+ msg="Lock must be owned in exclusive mode")
+ assert not hasattr(req, "lockcheck__")
+ setattr(req, "lockcheck__", True)
+
+ def _BuildNiceName(port, default=None):
+ if port % 5 == 0:
+ return "nicename%s" % port
+ else:
+ # Use standard name
+ return default
+
+ requests = \
+ [http.client.HttpClientRequest("localhost", i, "POST", "/version%s" % i,
+ curl_config_fn=compat.partial(cfg_fn, i),
+ completion_cb=NotImplementedError,
+ nicename=_BuildNiceName(i))
+ for i in range(15176, 15501)]
+ requests_count = len(requests)
+
+ if use_monitor:
+ lock_monitor_cb = self._MonitorChecker()
+ else:
+ lock_monitor_cb = None
+
+ def _ProcessRequests(multi, handles):
+ self.assertTrue(isinstance(multi, self._DummyCurlMulti))
+ self.assertEqual(len(requests), len(handles))
+ self.assertTrue(compat.all(isinstance(curl, _FakeCurl)
+ for curl in handles))
+
+ # Prepare for lock check
+ for req in requests:
+ assert req.completion_cb is NotImplementedError
+ if use_monitor:
+ req.completion_cb = \
+ compat.partial(_LockCheckReset, lock_monitor_cb.GetMonitor())
+
+ for idx, curl in enumerate(handles):
+ try:
+ port = curl.opts["__port__"]
+ except KeyError:
+ self.fail("Per-request config function was not called")
+
+ if use_monitor:
+ # Check if lock information is correct
+ lock_info = lock_monitor_cb.GetMonitor().GetLockInfo(None)
+ expected = \
+ [("rpc/%s" % (_BuildNiceName(handle.opts["__port__"],
+ default=("localhost/version%s" %
+ handle.opts["__port__"]))),
+ None,
+ [threading.currentThread().getName()], None)
+ for handle in handles[idx:]]
+ self.assertEqual(sorted(lock_info), sorted(expected))
+
+ if port % 3 == 0:
+ response_code = http.HTTP_OK
+ msg = None
+ else:
+ response_code = http.HttpNotFound.code
+ msg = "test error"
+
+ curl.info = {
+ pycurl.RESPONSE_CODE: response_code,
+ }
+
+ # Prepare for reset
+ self.assertFalse(curl.opts.pop(pycurl.POSTFIELDS))
+ self.assertTrue(callable(curl.opts.pop(pycurl.WRITEFUNCTION)))
+
+ yield (curl, msg)
+
+ if use_monitor:
+ self.assertTrue(compat.all(req.lockcheck__ for req in requests))
+
+ if use_monitor:
+ self.assertEqual(lock_monitor_cb.GetMonitor(), None)
+
+ http.client.ProcessRequests(requests, lock_monitor_cb=lock_monitor_cb,
+ _curl=_FakeCurl,
+ _curl_multi=self._DummyCurlMulti,
+ _curl_process=_ProcessRequests)
+ for req in requests:
+ if req.port % 3 == 0:
+ self.assertTrue(req.success)
+ self.assertEqual(req.error, None)
+ else:
+ self.assertFalse(req.success)
+ self.assertTrue("test error" in req.error)
+
+ # See if monitor was disabled
+ if use_monitor:
+ monitor = lock_monitor_cb.GetMonitor()
+ self.assertEqual(monitor._pending_fn, None)
+ self.assertEqual(monitor.GetLockInfo(None), [])
+ else:
+ self.assertEqual(lock_monitor_cb, None)
+
+ self.assertEqual(len(requests), requests_count)
+
+ def testBadRequest(self):
+ bad_request = http.client.HttpClientRequest("localhost", 27784,
+ "POST", "/version")
+ bad_request.success = False
+
+ self.assertRaises(AssertionError, http.client.ProcessRequests,
+ [bad_request], _curl=NotImplemented,
+ _curl_multi=NotImplemented, _curl_process=NotImplemented)
if __name__ == '__main__':
"""Stub for a QMP endpoint for a KVM instance
"""
- _QMP_BANNER_DATA = {"QMP": {"version": {
- "package": "",
- "qemu": {"micro": 50, "minor": 13, "major": 0},
- "capabilities": [],
- }}}
- _EMPTY_RESPONSE = {"return": []}
+ _QMP_BANNER_DATA = {
+ "QMP": {
+ "version": {
+ "package": "",
+ "qemu": {
+ "micro": 50,
+ "minor": 13,
+ "major": 0,
+ },
+ "capabilities": [],
+ },
+ }
+ }
+ _EMPTY_RESPONSE = {
+ "return": [],
+ }
def __init__(self, socket_filename, server_responses):
"""Creates a QMP stub
conn.close()
def encode_string(self, message):
- return (serializer.DumpJson(message, indent=False) +
+ return (serializer.DumpJson(message) +
hv_kvm.QmpConnection._MESSAGE_END_TOKEN)
class TestQmpMessage(testutils.GanetiTestCase):
def testSerialization(self):
- test_data = {"execute": "command", "arguments": ["a", "b", "c"]}
+ test_data = {
+ "execute": "command",
+ "arguments": ["a", "b", "c"],
+ }
message = hv_kvm.QmpMessage(test_data)
for k, v in test_data.items():
- self.failUnless(message[k] == v)
+ self.assertEqual(message[k], v)
+
+ serialized = str(message)
+ self.assertEqual(len(serialized.splitlines()), 1,
+ msg="Got multi-line message")
- rebuilt_message = hv_kvm.QmpMessage.BuildFromJsonString(str(message))
- self.failUnless(rebuilt_message == message)
+ rebuilt_message = hv_kvm.QmpMessage.BuildFromJsonString(serialized)
+ self.assertEqual(rebuilt_message, message)
class TestQmp(testutils.GanetiTestCase):
# Format the script
for request, expected_response in zip(requests, expected_responses):
response = qmp_connection.Execute(request)
- self.failUnless(response == hv_kvm.QmpMessage(expected_response))
+ msg = hv_kvm.QmpMessage(expected_response)
+ self.assertEqual(len(str(msg).splitlines()), 1,
+ msg="Got multi-line message")
+ self.assertEqual(response, msg)
class TestConsole(unittest.TestCase):
self.sl = locking.SharedLock("TestSharedLock")
def testSequenceAndOwnership(self):
- self.assertFalse(self.sl._is_owned())
+ self.assertFalse(self.sl.is_owned())
self.sl.acquire(shared=1)
- self.assert_(self.sl._is_owned())
- self.assert_(self.sl._is_owned(shared=1))
- self.assertFalse(self.sl._is_owned(shared=0))
+ self.assert_(self.sl.is_owned())
+ self.assert_(self.sl.is_owned(shared=1))
+ self.assertFalse(self.sl.is_owned(shared=0))
self.sl.release()
- self.assertFalse(self.sl._is_owned())
+ self.assertFalse(self.sl.is_owned())
self.sl.acquire()
- self.assert_(self.sl._is_owned())
- self.assertFalse(self.sl._is_owned(shared=1))
- self.assert_(self.sl._is_owned(shared=0))
+ self.assert_(self.sl.is_owned())
+ self.assertFalse(self.sl.is_owned(shared=1))
+ self.assert_(self.sl.is_owned(shared=0))
self.sl.release()
- self.assertFalse(self.sl._is_owned())
+ self.assertFalse(self.sl.is_owned())
self.sl.acquire(shared=1)
- self.assert_(self.sl._is_owned())
- self.assert_(self.sl._is_owned(shared=1))
- self.assertFalse(self.sl._is_owned(shared=0))
+ self.assert_(self.sl.is_owned())
+ self.assert_(self.sl.is_owned(shared=1))
+ self.assertFalse(self.sl.is_owned(shared=0))
self.sl.release()
- self.assertFalse(self.sl._is_owned())
+ self.assertFalse(self.sl.is_owned())
def testBooleanValue(self):
# semaphores are supposed to return a true value on a successful acquire
# Acquire in shared mode, downgrade should be no-op
self.assertTrue(self.sl.acquire(shared=1))
- self.assertTrue(self.sl._is_owned(shared=1))
+ self.assertTrue(self.sl.is_owned(shared=1))
self.assertTrue(self.sl.downgrade())
- self.assertTrue(self.sl._is_owned(shared=1))
+ self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
def testDowngrade(self):
self.assertTrue(self.sl.acquire())
- self.assertTrue(self.sl._is_owned(shared=0))
+ self.assertTrue(self.sl.is_owned(shared=0))
self.assertTrue(self.sl.downgrade())
- self.assertTrue(self.sl._is_owned(shared=1))
+ self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
@_Repeat
def testDowngradeJumpsAheadOfExclusive(self):
def _KeepExclusive(ev_got, ev_downgrade, ev_release):
self.assertTrue(self.sl.acquire())
- self.assertTrue(self.sl._is_owned(shared=0))
+ self.assertTrue(self.sl.is_owned(shared=0))
ev_got.set()
ev_downgrade.wait()
- self.assertTrue(self.sl._is_owned(shared=0))
+ self.assertTrue(self.sl.is_owned(shared=0))
self.assertTrue(self.sl.downgrade())
- self.assertTrue(self.sl._is_owned(shared=1))
+ self.assertTrue(self.sl.is_owned(shared=1))
ev_release.wait()
- self.assertTrue(self.sl._is_owned(shared=1))
+ self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
def _KeepExclusive2(ev_started, ev_release):
self.assertTrue(self.sl.acquire(test_notify=ev_started.set))
- self.assertTrue(self.sl._is_owned(shared=0))
+ self.assertTrue(self.sl.is_owned(shared=0))
ev_release.wait()
- self.assertTrue(self.sl._is_owned(shared=0))
+ self.assertTrue(self.sl.is_owned(shared=0))
self.sl.release()
def _KeepShared(ev_started, ev_got, ev_release):
self.assertTrue(self.sl.acquire(shared=1, test_notify=ev_started.set))
- self.assertTrue(self.sl._is_owned(shared=1))
+ self.assertTrue(self.sl.is_owned(shared=1))
ev_got.set()
ev_release.wait()
- self.assertTrue(self.sl._is_owned(shared=1))
+ self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
# Acquire lock in exclusive mode
def testKeepMode(self):
self.cond.acquire(shared=1)
- self.assert_(self.sl._is_owned(shared=1))
+ self.assert_(self.sl.is_owned(shared=1))
self.cond.wait(0)
- self.assert_(self.sl._is_owned(shared=1))
+ self.assert_(self.sl.is_owned(shared=1))
self.cond.release()
self.cond.acquire(shared=0)
- self.assert_(self.sl._is_owned(shared=0))
+ self.assert_(self.sl.is_owned(shared=0))
self.cond.wait(0)
- self.assert_(self.sl._is_owned(shared=0))
+ self.assert_(self.sl.is_owned(shared=0))
self.cond.release()
@locking.ssynchronized(_decoratorlock)
def _doItExclusive(self):
- self.assert_(_decoratorlock._is_owned())
+ self.assert_(_decoratorlock.is_owned())
self.done.put('EXC')
@locking.ssynchronized(_decoratorlock, shared=1)
def _doItSharer(self):
- self.assert_(_decoratorlock._is_owned(shared=1))
+ self.assert_(_decoratorlock.is_owned(shared=1))
self.done.put('SHR')
def testDecoratedFunctions(self):
self._doItExclusive()
- self.assertFalse(_decoratorlock._is_owned())
+ self.assertFalse(_decoratorlock.is_owned())
self._doItSharer()
- self.assertFalse(_decoratorlock._is_owned())
+ self.assertFalse(_decoratorlock.is_owned())
def testSharersCanCoexist(self):
_decoratorlock.acquire(shared=1)
newls = locking.LockSet([], "TestLockSet.testResources")
self.assertEquals(newls._names(), set())
+ def testCheckOwnedUnknown(self):
+ self.assertFalse(self.ls.check_owned("certainly-not-owning-this-one"))
+ for shared in [-1, 0, 1, 6378, 24255]:
+ self.assertFalse(self.ls.check_owned("certainly-not-owning-this-one",
+ shared=shared))
+
+ def testCheckOwnedUnknownWhileHolding(self):
+ self.assertFalse(self.ls.check_owned([]))
+ self.ls.acquire("one", shared=1)
+ self.assertRaises(errors.LockError, self.ls.check_owned, "nonexist")
+ self.assertTrue(self.ls.check_owned("one", shared=1))
+ self.assertFalse(self.ls.check_owned("one", shared=0))
+ self.assertFalse(self.ls.check_owned(["one", "two"]))
+ self.assertRaises(errors.LockError, self.ls.check_owned,
+ ["one", "nonexist"])
+ self.assertRaises(errors.LockError, self.ls.check_owned, "")
+ self.ls.release()
+ self.assertFalse(self.ls.check_owned([]))
+ self.assertFalse(self.ls.check_owned("one"))
+
def testAcquireRelease(self):
+ self.assertFalse(self.ls.check_owned(self.ls._names()))
self.assert_(self.ls.acquire('one'))
- self.assertEquals(self.ls._list_owned(), set(['one']))
+ self.assertEquals(self.ls.list_owned(), set(['one']))
+ self.assertTrue(self.ls.check_owned("one"))
+ self.assertTrue(self.ls.check_owned("one", shared=0))
+ self.assertFalse(self.ls.check_owned("one", shared=1))
self.ls.release()
- self.assertEquals(self.ls._list_owned(), set())
+ self.assertEquals(self.ls.list_owned(), set())
+ self.assertFalse(self.ls.check_owned(self.ls._names()))
self.assertEquals(self.ls.acquire(['one']), set(['one']))
- self.assertEquals(self.ls._list_owned(), set(['one']))
+ self.assertEquals(self.ls.list_owned(), set(['one']))
self.ls.release()
- self.assertEquals(self.ls._list_owned(), set())
+ self.assertEquals(self.ls.list_owned(), set())
self.ls.acquire(['one', 'two', 'three'])
- self.assertEquals(self.ls._list_owned(), set(['one', 'two', 'three']))
+ self.assertEquals(self.ls.list_owned(), set(['one', 'two', 'three']))
+ self.assertTrue(self.ls.check_owned(self.ls._names()))
+ self.assertTrue(self.ls.check_owned(self.ls._names(), shared=0))
+ self.assertFalse(self.ls.check_owned(self.ls._names(), shared=1))
self.ls.release('one')
- self.assertEquals(self.ls._list_owned(), set(['two', 'three']))
+ self.assertFalse(self.ls.check_owned(["one"]))
+ self.assertTrue(self.ls.check_owned(["two", "three"]))
+ self.assertTrue(self.ls.check_owned(["two", "three"], shared=0))
+ self.assertFalse(self.ls.check_owned(["two", "three"], shared=1))
+ self.assertEquals(self.ls.list_owned(), set(['two', 'three']))
self.ls.release(['three'])
- self.assertEquals(self.ls._list_owned(), set(['two']))
+ self.assertEquals(self.ls.list_owned(), set(['two']))
self.ls.release()
- self.assertEquals(self.ls._list_owned(), set())
+ self.assertEquals(self.ls.list_owned(), set())
self.assertEquals(self.ls.acquire(['one', 'three']), set(['one', 'three']))
- self.assertEquals(self.ls._list_owned(), set(['one', 'three']))
+ self.assertEquals(self.ls.list_owned(), set(['one', 'three']))
self.ls.release()
- self.assertEquals(self.ls._list_owned(), set())
+ self.assertEquals(self.ls.list_owned(), set())
+ for name in self.ls._names():
+ self.assertFalse(self.ls.check_owned(name))
def testNoDoubleAcquire(self):
self.ls.acquire('one')
def testAddRemove(self):
self.ls.add('four')
- self.assertEquals(self.ls._list_owned(), set())
+ self.assertEquals(self.ls.list_owned(), set())
self.assert_('four' in self.ls._names())
self.ls.add(['five', 'six', 'seven'], acquired=1)
self.assert_('five' in self.ls._names())
self.assert_('six' in self.ls._names())
self.assert_('seven' in self.ls._names())
- self.assertEquals(self.ls._list_owned(), set(['five', 'six', 'seven']))
+ self.assertEquals(self.ls.list_owned(), set(['five', 'six', 'seven']))
self.assertEquals(self.ls.remove(['five', 'six']), ['five', 'six'])
self.assert_('five' not in self.ls._names())
self.assert_('six' not in self.ls._names())
- self.assertEquals(self.ls._list_owned(), set(['seven']))
+ self.assertEquals(self.ls.list_owned(), set(['seven']))
self.assertRaises(AssertionError, self.ls.add, 'eight', acquired=1)
self.ls.remove('seven')
self.assert_('seven' not in self.ls._names())
- self.assertEquals(self.ls._list_owned(), set([]))
+ self.assertEquals(self.ls.list_owned(), set([]))
self.ls.acquire(None, shared=1)
self.assertRaises(AssertionError, self.ls.add, 'eight')
self.ls.release()
self.ls.acquire(None)
self.ls.add('eight', acquired=1)
self.assert_('eight' in self.ls._names())
- self.assert_('eight' in self.ls._list_owned())
+ self.assert_('eight' in self.ls.list_owned())
self.ls.add('nine')
self.assert_('nine' in self.ls._names())
- self.assert_('nine' not in self.ls._list_owned())
+ self.assert_('nine' not in self.ls.list_owned())
self.ls.release()
self.ls.remove(['two'])
self.assert_('two' not in self.ls._names())
def testAcquireSetLock(self):
# acquire the set-lock exclusively
self.assertEquals(self.ls.acquire(None), set(['one', 'two', 'three']))
- self.assertEquals(self.ls._list_owned(), set(['one', 'two', 'three']))
- self.assertEquals(self.ls._is_owned(), True)
+ self.assertEquals(self.ls.list_owned(), set(['one', 'two', 'three']))
+ self.assertEquals(self.ls.is_owned(), True)
self.assertEquals(self.ls._names(), set(['one', 'two', 'three']))
# I can still add/remove elements...
self.assertEquals(self.ls.remove(['two', 'three']), ['two', 'three'])
self.assertEquals(self.ls.acquire(['two', 'two', 'three'], shared=1),
set(['two', 'two', 'three']))
self.ls.release(['two', 'two'])
- self.assertEquals(self.ls._list_owned(), set(['three']))
+ self.assertEquals(self.ls.list_owned(), set(['three']))
def testEmptyAcquire(self):
# Acquire an empty list of locks...
self.assertEquals(self.ls.acquire([]), set())
- self.assertEquals(self.ls._list_owned(), set())
+ self.assertEquals(self.ls.list_owned(), set())
# New locks can still be addded
self.assert_(self.ls.add('six'))
# "re-acquiring" is not an issue, since we had really acquired nothing
self.assertEquals(self.ls.acquire([], shared=1), set())
- self.assertEquals(self.ls._list_owned(), set())
+ self.assertEquals(self.ls.list_owned(), set())
# We haven't really acquired anything, so we cannot release
self.assertRaises(AssertionError, self.ls.release)
self.ls.release()
else:
self.assert_(acquired is None)
- self.assertFalse(self.ls._list_owned())
- self.assertFalse(self.ls._is_owned())
+ self.assertFalse(self.ls.list_owned())
+ self.assertFalse(self.ls.is_owned())
self.done.put("not acquired")
self._addThread(target=_AcquireOne)
self.ls.release(names=name)
- self.assertFalse(self.ls._list_owned())
+ self.assertFalse(self.ls.list_owned())
self._waitThreads()
self.ls.add('four')
self.ls.add('five', acquired=1)
self.ls.add('six', acquired=1, shared=1)
- self.assertEquals(self.ls._list_owned(),
+ self.assertEquals(self.ls.list_owned(),
set(['one', 'two', 'three', 'five', 'six']))
- self.assertEquals(self.ls._is_owned(), True)
+ self.assertEquals(self.ls.is_owned(), True)
self.assertEquals(self.ls._names(),
set(['one', 'two', 'three', 'four', 'five', 'six']))
self.ls.release()
def testAcquireWithNamesDowngrade(self):
self.assertEquals(self.ls.acquire("two", shared=0), set(["two"]))
- self.assertTrue(self.ls._is_owned())
- self.assertFalse(self.ls._get_lock()._is_owned())
+ self.assertTrue(self.ls.is_owned())
+ self.assertFalse(self.ls._get_lock().is_owned())
self.ls.release()
- self.assertFalse(self.ls._is_owned())
- self.assertFalse(self.ls._get_lock()._is_owned())
+ self.assertFalse(self.ls.is_owned())
+ self.assertFalse(self.ls._get_lock().is_owned())
# Can't downgrade after releasing
self.assertRaises(AssertionError, self.ls.downgrade, "two")
def testDowngrade(self):
# Not owning anything, must raise an exception
- self.assertFalse(self.ls._is_owned())
+ self.assertFalse(self.ls.is_owned())
self.assertRaises(AssertionError, self.ls.downgrade)
- self.assertFalse(compat.any(i._is_owned()
+ self.assertFalse(compat.any(i.is_owned()
for i in self.ls._get_lockdict().values()))
+ self.assertFalse(self.ls.check_owned(self.ls._names()))
+ for name in self.ls._names():
+ self.assertFalse(self.ls.check_owned(name))
self.assertEquals(self.ls.acquire(None, shared=0),
set(["one", "two", "three"]))
self.assertRaises(AssertionError, self.ls.downgrade, "unknown lock")
- self.assertTrue(self.ls._get_lock()._is_owned(shared=0))
- self.assertTrue(compat.all(i._is_owned(shared=0)
+ self.assertTrue(self.ls.check_owned(self.ls._names(), shared=0))
+ for name in self.ls._names():
+ self.assertTrue(self.ls.check_owned(name))
+ self.assertTrue(self.ls.check_owned(name, shared=0))
+ self.assertFalse(self.ls.check_owned(name, shared=1))
+
+ self.assertTrue(self.ls._get_lock().is_owned(shared=0))
+ self.assertTrue(compat.all(i.is_owned(shared=0)
for i in self.ls._get_lockdict().values()))
# Start downgrading locks
self.assertTrue(self.ls.downgrade(names=["one"]))
- self.assertTrue(self.ls._get_lock()._is_owned(shared=0))
- self.assertTrue(compat.all(lock._is_owned(shared=[0, 1][int(name == "one")])
+ self.assertTrue(self.ls._get_lock().is_owned(shared=0))
+ self.assertTrue(compat.all(lock.is_owned(shared=[0, 1][int(name == "one")])
for name, lock in
self.ls._get_lockdict().items()))
+ self.assertFalse(self.ls.check_owned("one", shared=0))
+ self.assertTrue(self.ls.check_owned("one", shared=1))
+ self.assertTrue(self.ls.check_owned("two", shared=0))
+ self.assertTrue(self.ls.check_owned("three", shared=0))
+
+ # Downgrade second lock
self.assertTrue(self.ls.downgrade(names="two"))
- self.assertTrue(self.ls._get_lock()._is_owned(shared=0))
+ self.assertTrue(self.ls._get_lock().is_owned(shared=0))
should_share = lambda name: [0, 1][int(name in ("one", "two"))]
- self.assertTrue(compat.all(lock._is_owned(shared=should_share(name))
+ self.assertTrue(compat.all(lock.is_owned(shared=should_share(name))
for name, lock in
self.ls._get_lockdict().items()))
+ self.assertFalse(self.ls.check_owned("one", shared=0))
+ self.assertTrue(self.ls.check_owned("one", shared=1))
+ self.assertFalse(self.ls.check_owned("two", shared=0))
+ self.assertTrue(self.ls.check_owned("two", shared=1))
+ self.assertTrue(self.ls.check_owned("three", shared=0))
+
# Downgrading the last exclusive lock to shared must downgrade the
# lockset-internal lock too
self.assertTrue(self.ls.downgrade(names="three"))
- self.assertTrue(self.ls._get_lock()._is_owned(shared=1))
- self.assertTrue(compat.all(i._is_owned(shared=1)
+ self.assertTrue(self.ls._get_lock().is_owned(shared=1))
+ self.assertTrue(compat.all(i.is_owned(shared=1)
for i in self.ls._get_lockdict().values()))
+ # Verify owned locks
+ for name in self.ls._names():
+ self.assertTrue(self.ls.check_owned(name, shared=1))
+
# Downgrading a shared lock must be a no-op
self.assertTrue(self.ls.downgrade(names=["one", "three"]))
- self.assertTrue(self.ls._get_lock()._is_owned(shared=1))
- self.assertTrue(compat.all(i._is_owned(shared=1)
+ self.assertTrue(self.ls._get_lock().is_owned(shared=1))
+ self.assertTrue(compat.all(i.is_owned(shared=1)
for i in self.ls._get_lockdict().values()))
self.ls.release()
def testAcquireRelease(self):
self.GL.acquire(locking.LEVEL_CLUSTER, ['BGL'], shared=1)
- self.assertEquals(self.GL._list_owned(locking.LEVEL_CLUSTER), set(['BGL']))
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_CLUSTER), set(['BGL']))
self.GL.acquire(locking.LEVEL_INSTANCE, ['i1'])
self.GL.acquire(locking.LEVEL_NODEGROUP, ['g2'])
self.GL.acquire(locking.LEVEL_NODE, ['n1', 'n2'], shared=1)
+ self.assertTrue(self.GL.check_owned(locking.LEVEL_NODE, ["n1", "n2"],
+ shared=1))
+ self.assertFalse(self.GL.check_owned(locking.LEVEL_INSTANCE, ["i1", "i3"]))
self.GL.release(locking.LEVEL_NODE, ['n2'])
- self.assertEquals(self.GL._list_owned(locking.LEVEL_NODE), set(['n1']))
- self.assertEquals(self.GL._list_owned(locking.LEVEL_NODEGROUP), set(['g2']))
- self.assertEquals(self.GL._list_owned(locking.LEVEL_INSTANCE), set(['i1']))
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_NODE), set(['n1']))
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_NODEGROUP), set(['g2']))
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_INSTANCE), set(['i1']))
self.GL.release(locking.LEVEL_NODE)
- self.assertEquals(self.GL._list_owned(locking.LEVEL_NODE), set())
- self.assertEquals(self.GL._list_owned(locking.LEVEL_NODEGROUP), set(['g2']))
- self.assertEquals(self.GL._list_owned(locking.LEVEL_INSTANCE), set(['i1']))
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_NODE), set())
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_NODEGROUP), set(['g2']))
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_INSTANCE), set(['i1']))
self.GL.release(locking.LEVEL_NODEGROUP)
self.GL.release(locking.LEVEL_INSTANCE)
self.assertRaises(errors.LockError, self.GL.acquire,
locking.LEVEL_INSTANCE, ['i5'])
self.GL.acquire(locking.LEVEL_INSTANCE, ['i3'], shared=1)
- self.assertEquals(self.GL._list_owned(locking.LEVEL_INSTANCE), set(['i3']))
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_INSTANCE), set(['i3']))
def testAcquireWholeSets(self):
self.GL.acquire(locking.LEVEL_CLUSTER, ['BGL'], shared=1)
self.assertEquals(self.GL.acquire(locking.LEVEL_INSTANCE, None),
set(self.instances))
- self.assertEquals(self.GL._list_owned(locking.LEVEL_INSTANCE),
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_INSTANCE),
set(self.instances))
self.assertEquals(self.GL.acquire(locking.LEVEL_NODEGROUP, None),
set(self.nodegroups))
- self.assertEquals(self.GL._list_owned(locking.LEVEL_NODEGROUP),
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_NODEGROUP),
set(self.nodegroups))
self.assertEquals(self.GL.acquire(locking.LEVEL_NODE, None, shared=1),
set(self.nodes))
- self.assertEquals(self.GL._list_owned(locking.LEVEL_NODE),
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_NODE),
set(self.nodes))
self.GL.release(locking.LEVEL_NODE)
self.GL.release(locking.LEVEL_NODEGROUP)
self.GL.acquire(locking.LEVEL_CLUSTER, ['BGL'], shared=1)
self.assertEquals(self.GL.acquire(locking.LEVEL_INSTANCE, None),
set(self.instances))
- self.assertEquals(self.GL._list_owned(locking.LEVEL_INSTANCE),
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_INSTANCE),
set(self.instances))
self.assertEquals(self.GL.acquire(locking.LEVEL_NODE, ['n2'], shared=1),
set(['n2']))
- self.assertEquals(self.GL._list_owned(locking.LEVEL_NODE),
+ self.assertEquals(self.GL.list_owned(locking.LEVEL_NODE),
set(['n2']))
self.GL.release(locking.LEVEL_NODE)
self.GL.release(locking.LEVEL_INSTANCE)
from ganeti import masterd
from ganeti.masterd.instance import \
- ImportExportTimeouts, _TimeoutExpired, _DiskImportExportBase, \
+ ImportExportTimeouts, _DiskImportExportBase, \
ComputeRemoteExportHandshake, CheckRemoteExportHandshake, \
ComputeRemoteImportDiskInfo, CheckRemoteExportDiskInfo, \
FormatProgress
self.assertEqual(tmo.progress, 5)
def testTimeoutExpired(self):
- self.assert_(_TimeoutExpired(100, 300, _time_fn=lambda: 500))
- self.assertFalse(_TimeoutExpired(100, 300, _time_fn=lambda: 0))
- self.assertFalse(_TimeoutExpired(100, 300, _time_fn=lambda: 100))
- self.assertFalse(_TimeoutExpired(100, 300, _time_fn=lambda: 400))
+ self.assert_(utils.TimeoutExpired(100, 300, _time_fn=lambda: 500))
+ self.assertFalse(utils.TimeoutExpired(100, 300, _time_fn=lambda: 0))
+ self.assertFalse(utils.TimeoutExpired(100, 300, _time_fn=lambda: 100))
+ self.assertFalse(utils.TimeoutExpired(100, 300, _time_fn=lambda: 400))
def testDiskImportExportBaseDirect(self):
self.assertRaises(AssertionError, _DiskImportExportBase,
self.assertEqual(fn("2001:db8::1"), socket.AF_INET6)
self.assertRaises(errors.IPAddressError, fn, "0")
+ def testValidateNetmask(self):
+ for netmask in [0, 33]:
+ self.assertFalse(netutils.IP4Address.ValidateNetmask(netmask))
+
+ for netmask in [1, 32]:
+ self.assertTrue(netutils.IP4Address.ValidateNetmask(netmask))
+
+ for netmask in [0, 129]:
+ self.assertFalse(netutils.IP6Address.ValidateNetmask(netmask))
+
+ for netmask in [1, 128]:
+ self.assertTrue(netutils.IP6Address.ValidateNetmask(netmask))
+
+ def testGetClassFromX(self):
+ self.assert_(
+ netutils.IPAddress.GetClassFromIpVersion(constants.IP4_VERSION) ==
+ netutils.IP4Address)
+ self.assert_(
+ netutils.IPAddress.GetClassFromIpVersion(constants.IP6_VERSION) ==
+ netutils.IP6Address)
+ self.assert_(
+ netutils.IPAddress.GetClassFromIpFamily(socket.AF_INET) ==
+ netutils.IP4Address)
+ self.assert_(
+ netutils.IPAddress.GetClassFromIpFamily(socket.AF_INET6) ==
+ netutils.IP6Address)
+
def testOwnLoopback(self):
# FIXME: In a pure IPv6 environment this is no longer true
self.assert_(netutils.IPAddress.Own("127.0.0.1"),
self.assertEqual(node_ndparams,
self.fake_cl.FillND(fake_node, fake_group))
+ def testPrimaryHypervisor(self):
+ assert self.fake_cl.enabled_hypervisors is None
+ self.fake_cl.enabled_hypervisors = [constants.HT_XEN_HVM]
+ self.assertEqual(self.fake_cl.primary_hypervisor, constants.HT_XEN_HVM)
+
+ self.fake_cl.enabled_hypervisors = [constants.HT_XEN_PVM, constants.HT_KVM]
+ self.assertEqual(self.fake_cl.primary_hypervisor, constants.HT_XEN_PVM)
+
+ self.fake_cl.enabled_hypervisors = sorted(constants.HYPER_TYPES)
+ self.assertEqual(self.fake_cl.primary_hypervisor, constants.HT_CHROOT)
+
class TestOS(unittest.TestCase):
ALL_DATA = [
self.assertRaises(errors.OpPrereqError, inst.FindDisk, 1)
+class TestNode(unittest.TestCase):
+ def testEmpty(self):
+ self.assertEqual(objects.Node().ToDict(), {})
+ self.assertTrue(isinstance(objects.Node.FromDict({}), objects.Node))
+
+ def testHvState(self):
+ node = objects.Node(name="node18157.example.com", hv_state={
+ constants.HT_XEN_HVM: objects.NodeHvState(cpu_total=64),
+ constants.HT_KVM: objects.NodeHvState(cpu_node=1),
+ })
+
+ node2 = objects.Node.FromDict(node.ToDict())
+
+ # Make sure nothing can reference it anymore
+ del node
+
+ self.assertEqual(node2.name, "node18157.example.com")
+ self.assertEqual(frozenset(node2.hv_state), frozenset([
+ constants.HT_XEN_HVM,
+ constants.HT_KVM,
+ ]))
+ self.assertEqual(node2.hv_state[constants.HT_KVM].cpu_node, 1)
+ self.assertEqual(node2.hv_state[constants.HT_XEN_HVM].cpu_total, 64)
+
+ def testDiskState(self):
+ node = objects.Node(name="node32087.example.com", disk_state={
+ constants.LD_LV: {
+ "lv32352": objects.NodeDiskState(total=128),
+ "lv2082": objects.NodeDiskState(total=512),
+ },
+ })
+
+ node2 = objects.Node.FromDict(node.ToDict())
+
+ # Make sure nothing can reference it anymore
+ del node
+
+ self.assertEqual(node2.name, "node32087.example.com")
+ self.assertEqual(frozenset(node2.disk_state), frozenset([
+ constants.LD_LV,
+ ]))
+ self.assertEqual(frozenset(node2.disk_state[constants.LD_LV]), frozenset([
+ "lv32352",
+ "lv2082",
+ ]))
+ self.assertEqual(node2.disk_state[constants.LD_LV]["lv2082"].total, 512)
+ self.assertEqual(node2.disk_state[constants.LD_LV]["lv32352"].total, 128)
+
+
if __name__ == '__main__':
testutils.GanetiTestProgram()
--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Script for testing ganeti.ovf.
+
+"""
+
+import optparse
+import os
+import os.path
+import re
+import shutil
+import sys
+import tempfile
+import unittest
+
+try:
+ import xml.etree.ElementTree as ET
+except ImportError:
+ import elementtree.ElementTree as ET
+
+from ganeti import constants
+from ganeti import errors
+from ganeti import ovf
+from ganeti import utils
+
+import testutils
+
+OUTPUT_DIR = "newdir"
+
+GANETI_DISKS = {
+ "disk_count": "1",
+ "disk0_dump": "new_disk.raw",
+ "disk0_size": "0",
+ "disk0_ivname": "disk/0",
+}
+GANETI_NETWORKS = {
+ "nic_count": "1",
+ "nic0_mode": "bridged",
+ "nic0_ip": "none",
+ "nic0_mac": "aa:00:00:d8:2c:1e",
+ "nic0_link": "xen-br0",
+}
+GANETI_HYPERVISOR = {
+ "hypervisor_name": "xen-pvm",
+ "root-path": "/dev/sda",
+ "kernel_args": "ro",
+}
+GANETI_OS = {"os_name": "lenny-image"}
+GANETI_BACKEND = {
+ "vcpus": "1",
+ "memory" : "2048",
+ "auto_balance": "False",
+}
+GANETI_NAME = "ganeti-test-xen"
+GANETI_TEMPLATE = "plain"
+GANETI_TAGS = None
+GANETI_VERSION = "0"
+
+VIRTUALBOX_DISKS = {
+ "disk_count": "2",
+ "disk0_ivname": "disk/0",
+ "disk0_dump": "new_disk.raw",
+ "disk0_size": "0",
+ "disk1_ivname": "disk/1",
+ "disk1_dump": "second_disk.raw",
+ "disk1_size": "0",
+}
+VIRTUALBOX_NETWORKS = {
+ "nic_count": "1",
+ "nic0_mode": "bridged",
+ "nic0_ip": "none",
+ "nic0_link": "auto",
+ "nic0_mac": "auto",
+}
+VIRTUALBOX_HYPERVISOR = {"hypervisor_name": "auto"}
+VIRTUALBOX_OS = {"os_name": None}
+VIRTUALBOX_BACKEND = {
+ "vcpus": "1",
+ "memory" : "2048",
+ "auto_balance": "auto",
+}
+VIRTUALBOX_NAME = None
+VIRTUALBOX_TEMPLATE = None
+VIRTUALBOX_TAGS = None
+VIRTUALBOX_VERSION = None
+
+EMPTY_DISKS = {}
+EMPTY_NETWORKS = {}
+EMPTY_HYPERVISOR = {"hypervisor_name": "auto"}
+EMPTY_OS = {}
+EMPTY_BACKEND = {
+ "vcpus": "auto",
+ "memory" : "auto",
+ "auto_balance": "auto",
+}
+EMPTY_NAME = None
+EMPTY_TEMPLATE = None
+EMPTY_TAGS = None
+EMPTY_VERSION = None
+
+CMDARGS_DISKS = {
+ "disk_count": "1",
+ "disk0_ivname": "disk/0",
+ "disk0_dump": "disk0.raw",
+ "disk0_size": "8",
+}
+CMDARGS_NETWORKS = {
+ "nic0_link": "auto",
+ "nic0_mode": "bridged",
+ "nic0_ip": "none",
+ "nic0_mac": "auto",
+ "nic_count": "1",
+}
+CMDARGS_HYPERVISOR = {
+ "hypervisor_name": "xen-pvm"
+}
+CMDARGS_OS = {"os_name": "lenny-image"}
+CMDARGS_BACKEND = {
+ "auto_balance": False,
+ "vcpus": "1",
+ "memory": "256",
+}
+CMDARGS_NAME = "test-instance"
+CMDARGS_TEMPLATE = "plain"
+CMDARGS_TAGS = "test-tag-1,test-tag-2"
+
+ARGS_EMPTY = {
+ "output_dir": None,
+ "nics": [],
+ "disks": [],
+ "name": "test-instance",
+ "ova_package": False,
+ "ext_usage": False,
+ "disk_format": "cow",
+ "compression": False,
+}
+ARGS_EXPORT_DIR = dict(ARGS_EMPTY, **{
+ "output_dir": OUTPUT_DIR,
+ "name": None,
+ "hypervisor": None,
+ "os": None,
+ "beparams": {},
+ "no_nics": False,
+ "disk_template": None,
+ "tags": None,
+})
+ARGS_VBOX = dict(ARGS_EXPORT_DIR, **{
+ "output_dir": OUTPUT_DIR,
+ "name": "test-instance",
+ "os": "lenny-image",
+ "hypervisor": ("xen-pvm", {}),
+ "osparams":{},
+ "disks": [],
+})
+ARGS_COMPLETE = dict(ARGS_VBOX, **{
+ "beparams": {"vcpus":"1", "memory":"256", "auto_balance": False},
+ "disks": [(0,{"size":"5mb"})],
+ "nics": [("0",{"mode":"bridged"})],
+ "disk_template": "plain",
+ "tags": "test-tag-1,test-tag-2",
+})
+ARGS_BROKEN = dict(ARGS_EXPORT_DIR , **{
+ "no_nics": True,
+ "disk_template": "diskless",
+ "name": "test-instance",
+ "os": "lenny-image",
+ "osparams": {},
+})
+
+EXP_ARGS_COMPRESSED = dict(ARGS_EXPORT_DIR, **{
+ "compression": True,
+})
+
+EXP_DISKS_LIST = [
+ {
+ "format": "vmdk",
+ "compression": "gzip",
+ "virt-size": 90000,
+ "real-size": 203,
+ "path": "new_disk.cow.gz",
+ },
+ {
+ "format": "cow",
+ "virt-size": 15,
+ "real-size": 15,
+ "path": "new_disk.cow",
+ },
+]
+EXP_NETWORKS_LIST = [
+ {"mac": "aa:00:00:d8:2c:1e", "ip":"None", "link":"br0","mode":"routed"},
+]
+EXP_PARTIAL_GANETI_DICT = {
+ "hypervisor": {"name": "xen-kvm"},
+ "os": {"name": "lenny-image"},
+ "auto_balance": "True",
+ "version": "0",
+}
+EXP_GANETI_DICT = {
+ 'tags': None,
+ 'auto_balance': 'False',
+ 'hypervisor': {
+ 'root-path': '/dev/sda',
+ 'name': 'xen-pvm',
+ 'kernel_args': 'ro'
+ },
+ 'version': '0',
+ 'disk_template': None,
+ 'os': {'name': 'lenny-image'}
+}
+EXP_NAME ="xen-dev-i1"
+EXP_VCPUS = 1
+EXP_MEMORY = 512
+
+EXPORT_EMPTY = ("<Envelope xml:lang=\"en-US\" xmlns=\"http://schemas.dmtf.org/"
+ "ovf/envelope/1\" xmlns:gnt=\"http://ganeti\" xmlns:ovf=\""
+ "http://schemas.dmtf.org/ovf/envelope/1\" xmlns:rasd=\""
+ "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_Resource"
+ "AllocationSettingData\" xmlns:vssd=\"http://schemas.dmtf.org"
+ "/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData\""
+ " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" />")
+EXPORT_DISKS_EMPTY = ("<References /><DiskSection><Info>Virtual disk"
+ " information</Info></DiskSection>")
+EXPORT_DISKS = ("<References><File ovf:compression=\"gzip\" ovf:href=\"new_disk"
+ ".cow.gz\" ovf:id=\"file0\" ovf:size=\"203\" /><File ovf:href="
+ "\"new_disk.cow\" ovf:id=\"file1\" ovf:size=\"15\" />"
+ "</References><DiskSection><Info>Virtual disk information"
+ "</Info><Disk ovf:capacity=\"90000\" ovf:diskId=\"disk0\" ovf"
+ ":fileRef=\"file0\" ovf:format=\"http://www.vmware.com/"
+ "interfaces/specifications/vmdk.html#monolithicSparse\" /><Disk"
+ " ovf:capacity=\"15\" ovf:diskId=\"disk1\" ovf:fileRef"
+ "=\"file1\" ovf:format=\"http://www.gnome.org/~markmc/qcow"
+ "-image-format.html\" /></DiskSection>")
+EXPORT_NETWORKS_EMPTY = ("<NetworkSection><Info>List of logical networks</Info>"
+ "</NetworkSection>")
+EXPORT_NETWORKS = ("<NetworkSection><Info>List of logical networks</Info>"
+ "<Network ovf:name=\"routed0\" /></NetworkSection>")
+EXPORT_GANETI_INCOMPLETE = ("<gnt:GanetiSection><gnt:Version>0</gnt:Version>"
+ "<gnt:AutoBalance>True</gnt:AutoBalance><gnt:"
+ "OperatingSystem><gnt:Name>lenny-image</gnt:Name>"
+ "<gnt:Parameters /></gnt:OperatingSystem><gnt:"
+ "Hypervisor><gnt:Name>xen-kvm</gnt:Name><gnt:"
+ "Parameters /></gnt:Hypervisor><gnt:Network><gnt:"
+ "Nic ovf:name=\"routed0\"><gnt:Mode>routed</gnt:"
+ "Mode><gnt:MACAddress>aa:00:00:d8:2c:1e</gnt:"
+ "MACAddress><gnt:IPAddress>None</gnt:IPAddress>"
+ "<gnt:Link>br0</gnt:Link></gnt:Nic></gnt:Network>"
+ "</gnt:GanetiSection>")
+EXPORT_GANETI = ("<gnt:GanetiSection><gnt:Version>0</gnt:Version><gnt:"
+ "AutoBalance>False</gnt:AutoBalance><gnt:OperatingSystem>"
+ "<gnt:Name>lenny-image</gnt:Name><gnt:Parameters /></gnt:"
+ "OperatingSystem><gnt:Hypervisor><gnt:Name>xen-pvm</gnt:Name>"
+ "<gnt:Parameters><gnt:root-path>/dev/sda</gnt:root-path><gnt:"
+ "kernel_args>ro</gnt:kernel_args></gnt:Parameters></gnt:"
+ "Hypervisor><gnt:Network><gnt:Nic ovf:name=\"routed0\"><gnt:"
+ "Mode>routed</gnt:Mode><gnt:MACAddress>aa:00:00:d8:2c:1e</gnt:"
+ "MACAddress><gnt:IPAddress>None</gnt:IPAddress><gnt:Link>br0"
+ "</gnt:Link></gnt:Nic></gnt:Network></gnt:GanetiSection>")
+EXPORT_SYSTEM = ("<References><File ovf:compression=\"gzip\" ovf:href=\"new_"
+ "disk.cow.gz\" ovf:id=\"file0\" ovf:size=\"203\" /><File ovf:"
+ "href=\"new_disk.cow\" ovf:id=\"file1\" ovf:size=\"15\" />"
+ "</References><DiskSection><Info>Virtual disk information"
+ "</Info><Disk ovf:capacity=\"90000\" ovf:diskId=\"disk0\""
+ " ovf:fileRef=\"file0\" ovf:format=\"http://www.vmware.com"
+ "/interfaces/specifications/vmdk.html#monolithicSparse\" />"
+ "<Disk ovf:capacity=\"15\" ovf:diskId=\"disk1\" ovf:fileRef"
+ "=\"file1\" ovf:format=\"http://www.gnome.org/~markmc/qcow"
+ "-image-format.html\" /></DiskSection><NetworkSection><Info>"
+ "List of logical networks</Info><Network ovf:name=\"routed0\""
+ " /></NetworkSection><VirtualSystem ovf:id=\"xen-dev-i1\">"
+ "<Info>A virtual machine</Info><Name>xen-dev-i1</Name>"
+ "<OperatingSystemSection ovf:id=\"0\"><Info>Installed guest"
+ " operating system</Info></OperatingSystemSection><Virtual"
+ "HardwareSection><Info>Virtual hardware requirements</Info>"
+ "<System><vssd:ElementName>Virtual Hardware Family"
+ "</vssd:ElementName><vssd:InstanceID>0</vssd:InstanceID><vssd:"
+ "VirtualSystemIdentifier>xen-dev-i1</vssd:VirtualSystem"
+ "Identifier><vssd:VirtualSystemType>ganeti-ovf</vssd:Virtual"
+ "SystemType></System><Item><rasd:ElementName>1 virtual CPU(s)"
+ "</rasd:ElementName><rasd:InstanceID>1</rasd:InstanceID><rasd:"
+ "ResourceType>3</rasd:ResourceType><rasd:VirtualQuantity>1"
+ "</rasd:VirtualQuantity></Item><Item><rasd:AllocationUnits>"
+ "byte * 2^20</rasd:AllocationUnits><rasd:ElementName>512MB of"
+ " memory</rasd:ElementName><rasd:InstanceID>2</rasd:"
+ "InstanceID><rasd:ResourceType>4</rasd:ResourceType><rasd:"
+ "VirtualQuantity>512</rasd:VirtualQuantity></Item><Item>"
+ "<rasd:Address>0</rasd:Address><rasd:ElementName>scsi"
+ "_controller0</rasd:ElementName><rasd:InstanceID>3"
+ "</rasd:InstanceID><rasd:ResourceSubType>lsilogic</rasd"
+ ":ResourceSubType><rasd:ResourceType>6</rasd:ResourceType>"
+ "</Item><Item><rasd:ElementName>disk0</rasd:ElementName><rasd"
+ ":HostResource>ovf:/disk/disk0</rasd:HostResource><rasd"
+ ":InstanceID>4</rasd:InstanceID><rasd:Parent>3</rasd:Parent>"
+ "<rasd:ResourceType>17</rasd:ResourceType></Item><Item><rasd:"
+ "ElementName>disk1</rasd:ElementName><rasd:HostResource>ovf:/"
+ "disk/disk1</rasd:HostResource><rasd:InstanceID>5</rasd"
+ ":InstanceID><rasd:Parent>3</rasd:Parent><rasd:ResourceType>17"
+ "</rasd:ResourceType></Item><Item><rasd:Address>aa:00"
+ ":00:d8:2c:1e</rasd:Address><rasd:Connection>routed0</rasd"
+ ":Connection><rasd:ElementName>routed0</rasd:ElementName><rasd"
+ ":InstanceID>6</rasd:InstanceID><rasd:ResourceType>10</rasd"
+ ":ResourceType></Item></VirtualHardwareSection>"
+ "</VirtualSystem>")
+
+
+def _GetArgs(args, with_name=False):
+ options = optparse.Values()
+ needed = args
+ if with_name:
+ needed["name"] = "test-instance"
+ options._update_loose(needed)
+ return options
+
+
+OPTS_EMPTY = _GetArgs(ARGS_EMPTY)
+OPTS_EXPORT_NO_NAME = _GetArgs(ARGS_EXPORT_DIR)
+OPTS_EXPORT = _GetArgs(ARGS_EXPORT_DIR, with_name=True)
+
+EXP_OPTS = OPTS_EXPORT_NO_NAME
+EXP_OPTS_COMPRESSED = _GetArgs(EXP_ARGS_COMPRESSED)
+
+OPTS_VBOX = _GetArgs(ARGS_VBOX)
+OPTS_COMPLETE = _GetArgs(ARGS_COMPLETE)
+OPTS_NONIC_NODISK = _GetArgs(ARGS_BROKEN)
+
+
+def _GetFullFilename(file_name):
+ file_path = "%s/test/data/ovfdata/%s" % (testutils.GetSourceDir(),
+ file_name)
+ file_path = os.path.abspath(file_path)
+ return file_path
+
+
+class BetterUnitTest(unittest.TestCase):
+ def assertRaisesRegexp(self, exception, regexp_val, function, *args):
+ try:
+ function(*args)
+ self.fail("Expected raising %s" % exception)
+ except exception, err:
+ regexp = re.compile(regexp_val)
+ if re.search(regexp, str(err)) == None:
+ self.fail("Expected matching '%s', got '%s'" %
+ (regexp_val, str(err)))
+
+
+class TestOVFImporter(BetterUnitTest):
+ def setUp(self):
+ self.non_existing_file = _GetFullFilename("not_the_file.ovf")
+ self.ganeti_ovf = _GetFullFilename("ganeti.ovf")
+ self.virtualbox_ovf = _GetFullFilename("virtualbox.ovf")
+ self.ova_package = _GetFullFilename("ova.ova")
+ self.empty_ovf = _GetFullFilename("empty.ovf")
+ self.wrong_extension = _GetFullFilename("wrong_extension.ovd")
+ self.wrong_ova_archive = _GetFullFilename("wrong_ova.ova")
+ self.no_ovf_in_ova = _GetFullFilename("no_ovf.ova")
+ self.importer = None
+
+ def tearDown(self):
+ if self.importer:
+ self.importer.Cleanup()
+ del_dir = os.path.abspath(OUTPUT_DIR)
+ try:
+ shutil.rmtree(del_dir)
+ except OSError:
+ pass
+
+ def testFileDoesNotExistError(self):
+ self.assertRaisesRegexp(errors.OpPrereqError, "does not exist",
+ ovf.OVFImporter, self.non_existing_file, None)
+
+ def testWrongInputFileExtensionError(self):
+ self.assertRaisesRegexp(errors.OpPrereqError,
+ "Unknown file extension", ovf.OVFImporter,
+ self.wrong_extension, None)
+
+ def testOVAUnpackingDirectories(self):
+ self.importer = ovf.OVFImporter(self.ova_package, OPTS_EMPTY)
+ self.assertTrue(self.importer.input_dir != None)
+ self.assertEquals(self.importer.output_dir , constants.EXPORT_DIR)
+ self.assertTrue(self.importer.temp_dir != None)
+
+ def testOVFUnpackingDirectories(self):
+ self.importer = ovf.OVFImporter(self.virtualbox_ovf,
+ OPTS_EMPTY)
+ self.assertEquals(self.importer.input_dir , _GetFullFilename(""))
+ self.assertEquals(self.importer.output_dir , constants.EXPORT_DIR)
+ self.assertEquals(self.importer.temp_dir , None)
+
+ def testOVFSetOutputDirDirectories(self):
+ self.importer = ovf.OVFImporter(self.ganeti_ovf, OPTS_EXPORT)
+ self.assertEquals(self.importer.input_dir , _GetFullFilename(""))
+ self.assertTrue(OUTPUT_DIR in self.importer.output_dir)
+ self.assertEquals(self.importer.temp_dir , None)
+
+ def testWrongOVAArchiveError(self):
+ self.assertRaisesRegexp(errors.OpPrereqError, "not a proper tar",
+ ovf.OVFImporter, self.wrong_ova_archive, None)
+
+ def testNoOVFFileInOVAPackageError(self):
+ self.assertRaisesRegexp(errors.OpPrereqError, "No .ovf file",
+ ovf.OVFImporter, self.no_ovf_in_ova, None)
+
+ def testParseGanetiOvf(self):
+ self.importer = ovf.OVFImporter(self.ganeti_ovf, OPTS_EXPORT_NO_NAME)
+ self.importer.Parse()
+ self.assertTrue("%s/ganeti-test-xen" % OUTPUT_DIR in
+ self.importer.output_dir)
+ self.assertEqual(self.importer.results_disk, GANETI_DISKS)
+ self.assertEqual(self.importer.results_network, GANETI_NETWORKS)
+ self.assertEqual(self.importer.results_hypervisor, GANETI_HYPERVISOR)
+ self.assertEqual(self.importer.results_os, GANETI_OS)
+ self.assertEqual(self.importer.results_backend, GANETI_BACKEND)
+ self.assertEqual(self.importer.results_name, GANETI_NAME)
+ self.assertEqual(self.importer.results_template, GANETI_TEMPLATE)
+ self.assertEqual(self.importer.results_tags, GANETI_TAGS)
+ self.assertEqual(self.importer.results_version, GANETI_VERSION)
+
+ def testParseVirtualboxOvf(self):
+ self.importer = ovf.OVFImporter(self.virtualbox_ovf, OPTS_VBOX)
+ self.importer.Parse()
+ self.assertTrue("%s/test-instance" % OUTPUT_DIR in self.importer.output_dir)
+ self.assertEquals(self.importer.results_disk, VIRTUALBOX_DISKS)
+ self.assertEquals(self.importer.results_network, VIRTUALBOX_NETWORKS)
+ self.assertEquals(self.importer.results_hypervisor, CMDARGS_HYPERVISOR)
+ self.assertEquals(self.importer.results_os, CMDARGS_OS)
+ self.assertEquals(self.importer.results_backend, VIRTUALBOX_BACKEND)
+ self.assertEquals(self.importer.results_name, CMDARGS_NAME)
+ self.assertEquals(self.importer.results_template, VIRTUALBOX_TEMPLATE)
+ self.assertEqual(self.importer.results_tags, VIRTUALBOX_TAGS)
+ self.assertEqual(self.importer.results_version, constants.EXPORT_VERSION)
+
+ def testParseEmptyOvf(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_COMPLETE)
+ self.importer.Parse()
+ self.assertTrue("%s/test-instance" % OUTPUT_DIR in self.importer.output_dir)
+ self.assertEquals(self.importer.results_disk, CMDARGS_DISKS)
+ self.assertEquals(self.importer.results_network, CMDARGS_NETWORKS)
+ self.assertEquals(self.importer.results_hypervisor, CMDARGS_HYPERVISOR)
+ self.assertEquals(self.importer.results_os, CMDARGS_OS)
+ self.assertEquals(self.importer.results_backend, CMDARGS_BACKEND)
+ self.assertEquals(self.importer.results_name, CMDARGS_NAME)
+ self.assertEquals(self.importer.results_template, CMDARGS_TEMPLATE)
+ self.assertEqual(self.importer.results_tags, CMDARGS_TAGS)
+ self.assertEqual(self.importer.results_version, constants.EXPORT_VERSION)
+
+ def testParseNameOptions(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_COMPLETE)
+ results = self.importer._ParseNameOptions()
+ self.assertEquals(results, CMDARGS_NAME)
+
+ def testParseHypervisorOptions(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_COMPLETE)
+ results = self.importer._ParseHypervisorOptions()
+ self.assertEquals(results, CMDARGS_HYPERVISOR)
+
+ def testParseOSOptions(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_COMPLETE)
+ results = self.importer._ParseOSOptions()
+ self.assertEquals(results, CMDARGS_OS)
+
+ def testParseBackendOptions(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_COMPLETE)
+ results = self.importer._ParseBackendOptions()
+ self.assertEquals(results, CMDARGS_BACKEND)
+
+ def testParseTags(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_COMPLETE)
+ results = self.importer._ParseTags()
+ self.assertEquals(results, CMDARGS_TAGS)
+
+ def testParseNicOptions(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_COMPLETE)
+ results = self.importer._ParseNicOptions()
+ self.assertEquals(results, CMDARGS_NETWORKS)
+
+ def testParseDiskOptionsFromGanetiOVF(self):
+ self.importer = ovf.OVFImporter(self.ganeti_ovf, OPTS_EXPORT)
+ os.mkdir(OUTPUT_DIR)
+ results = self.importer._GetDiskInfo()
+ self.assertEquals(results, GANETI_DISKS)
+
+ def testParseTemplateOptions(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_COMPLETE)
+ results = self.importer._ParseTemplateOptions()
+ self.assertEquals(results, GANETI_TEMPLATE)
+
+ def testParseDiskOptionsFromCmdLine(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_COMPLETE)
+ os.mkdir(OUTPUT_DIR)
+ results = self.importer._ParseDiskOptions()
+ self.assertEquals(results, CMDARGS_DISKS)
+
+ def testGetDiskFormat(self):
+ self.importer = ovf.OVFImporter(self.ganeti_ovf, OPTS_EXPORT)
+ disks_list = self.importer.ovf_reader.GetDisksNames()
+ results = [self.importer._GetDiskQemuInfo("%s/%s" %
+ (self.importer.input_dir, path), "file format: (\S+)")
+ for (path, _) in disks_list]
+ self.assertEqual(results, ["vmdk"])
+
+ def testNoInstanceNameOVF(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_EXPORT_NO_NAME)
+ self.assertRaisesRegexp(errors.OpPrereqError, "Name of instance",
+ self.importer.Parse)
+
+ def testErrorNoOSNameOVF(self):
+ self.importer = ovf.OVFImporter(self.virtualbox_ovf, OPTS_EXPORT)
+ self.assertRaisesRegexp(errors.OpPrereqError, "OS name",
+ self.importer.Parse)
+
+ def testErrorNoDiskAndNoNetwork(self):
+ self.importer = ovf.OVFImporter(self.empty_ovf, OPTS_NONIC_NODISK)
+ self.assertRaisesRegexp(errors.OpPrereqError,
+ "Either disk specification or network"
+ " description", self.importer.Parse)
+
+
+class TestOVFExporter(BetterUnitTest):
+ def setUp(self):
+ self.exporter = None
+ self.wrong_config_file = _GetFullFilename("wrong_config.ini")
+ self.unsafe_path_to_disk = _GetFullFilename("unsafe_path.ini")
+ self.disk_image_not_exist = _GetFullFilename("no_disk.ini")
+ self.empty_config = _GetFullFilename("empty.ini")
+ self.standard_export = _GetFullFilename("config.ini")
+ self.wrong_network_mode = self.disk_image_not_exist
+ self.no_memory = self.disk_image_not_exist
+ self.no_vcpus = self.disk_image_not_exist
+ self.no_os = _GetFullFilename("no_os.ini")
+ self.no_hypervisor = self.disk_image_not_exist
+
+ def tearDown(self):
+ if self.exporter:
+ self.exporter.Cleanup()
+ del_dir = os.path.abspath(OUTPUT_DIR)
+ try:
+ shutil.rmtree(del_dir)
+ except OSError:
+ pass
+
+ def testErrorWrongConfigFile(self):
+ self.assertRaisesRegexp(errors.OpPrereqError,
+ "Error when trying to read", ovf.OVFExporter,
+ self.wrong_config_file, EXP_OPTS)
+
+ def testErrorPathToTheDiskIncorrect(self):
+ self.exporter = ovf.OVFExporter(self.unsafe_path_to_disk, EXP_OPTS)
+ self.assertRaisesRegexp(errors.OpPrereqError, "contains a directory name",
+ self.exporter._ParseDisks)
+
+ def testErrorDiskImageNotExist(self):
+ self.exporter = ovf.OVFExporter(self.disk_image_not_exist, EXP_OPTS)
+ self.assertRaisesRegexp(errors.OpPrereqError, "Disk image does not exist",
+ self.exporter._ParseDisks)
+
+ def testParseNetworks(self):
+ self.exporter = ovf.OVFExporter(self.standard_export, EXP_OPTS)
+ results = self.exporter._ParseNetworks()
+ self.assertEqual(results, EXP_NETWORKS_LIST)
+
+ def testErrorWrongNetworkMode(self):
+ self.exporter = ovf.OVFExporter(self.wrong_network_mode, EXP_OPTS)
+ self.assertRaisesRegexp(errors.OpPrereqError,
+ "Network mode nic not recognized", self.exporter._ParseNetworks)
+
+ def testParseVCPusMem(self):
+ self.exporter = ovf.OVFExporter(self.standard_export, EXP_OPTS)
+ vcpus = self.exporter._ParseVCPUs()
+ memory = self.exporter._ParseMemory()
+ self.assertEqual(vcpus, EXP_VCPUS)
+ self.assertEqual(memory, EXP_MEMORY)
+
+ def testErrorNoVCPUs(self):
+ self.exporter = ovf.OVFExporter(self.no_vcpus, EXP_OPTS)
+ self.assertRaisesRegexp(errors.OpPrereqError, "No CPU information found",
+ self.exporter._ParseVCPUs)
+
+ def testErrorNoMemory(self):
+ self.exporter = ovf.OVFExporter(self.no_memory, EXP_OPTS)
+ self.assertRaisesRegexp(errors.OpPrereqError, "No memory information found",
+ self.exporter._ParseMemory)
+
+ def testParseGaneti(self):
+ self.exporter = ovf.OVFExporter(self.standard_export, EXP_OPTS)
+ results = self.exporter._ParseGaneti()
+ self.assertEqual(results, EXP_GANETI_DICT)
+
+ def testErrorNoHypervisor(self):
+ self.exporter = ovf.OVFExporter(self.no_hypervisor, EXP_OPTS)
+ self.assertRaisesRegexp(errors.OpPrereqError,
+ "No hypervisor information found", self.exporter._ParseGaneti)
+
+ def testErrorNoOS(self):
+ self.exporter = ovf.OVFExporter(self.no_os, EXP_OPTS)
+ self.assertRaisesRegexp(errors.OpPrereqError,
+ "No operating system information found", self.exporter._ParseGaneti)
+
+ def testErrorParseNoInstanceName(self):
+ self.exporter = ovf.OVFExporter(self.empty_config, EXP_OPTS)
+ self.assertRaisesRegexp(errors.OpPrereqError, "No instance name found",
+ self.exporter.Parse)
+
+
+class TestOVFReader(BetterUnitTest):
+ def setUp(self):
+ self.wrong_xml_file = _GetFullFilename("wrong_xml.ovf")
+ self.ganeti_ovf = _GetFullFilename("ganeti.ovf")
+ self.virtualbox_ovf = _GetFullFilename("virtualbox.ovf")
+ self.corrupted_ovf = _GetFullFilename("corrupted_resources.ovf")
+ self.wrong_manifest_ovf = _GetFullFilename("wrong_manifest.ovf")
+ self.no_disk_in_ref_ovf = _GetFullFilename("no_disk_in_ref.ovf")
+ self.empty_ovf = _GetFullFilename("empty.ovf")
+ self.compressed_disk = _GetFullFilename("gzip_disk.ovf")
+
+ def tearDown(self):
+ pass
+
+ def testXMLParsingError(self):
+ self.assertRaisesRegexp(errors.OpPrereqError,
+ "Error while reading .ovf", ovf.OVFReader, self.wrong_xml_file)
+
+ def testFileInResourcesDoesNotExistError(self):
+ self.assertRaisesRegexp(errors.OpPrereqError, "does not exist",
+ ovf.OVFReader, self.corrupted_ovf)
+
+ def testWrongManifestChecksumError(self):
+ reader = ovf.OVFReader(self.wrong_manifest_ovf)
+ self.assertRaisesRegexp(errors.OpPrereqError,
+ "does not match the value in manifest file", reader.VerifyManifest)
+
+ def testGoodManifestChecksum(self):
+ reader = ovf.OVFReader(self.ganeti_ovf)
+ self.assertEqual(reader.VerifyManifest(), None)
+
+ def testGetDisksNamesOVFCorruptedError(self):
+ reader = ovf.OVFReader(self.no_disk_in_ref_ovf)
+ self.assertRaisesRegexp(errors.OpPrereqError,
+ "not found in references", reader.GetDisksNames)
+
+ def testGetDisksNamesVirtualbox(self):
+ reader = ovf.OVFReader(self.virtualbox_ovf)
+ disk_names = reader.GetDisksNames()
+ expected_names = [
+ ("new_disk.vmdk", None) ,
+ ("second_disk.vmdk", None),
+ ]
+ self.assertEqual(sorted(disk_names), sorted(expected_names))
+
+ def testGetDisksNamesEmpty(self):
+ reader = ovf.OVFReader(self.empty_ovf)
+ disk_names = reader.GetDisksNames()
+ self.assertEqual(disk_names, [])
+
+ def testGetDisksNamesCompressed(self):
+ reader = ovf.OVFReader(self.compressed_disk)
+ disk_names = reader.GetDisksNames()
+ self.assertEqual(disk_names, [("compr_disk.vmdk.gz", "gzip")])
+
+ def testGetNetworkDataGaneti(self):
+ reader = ovf.OVFReader(self.ganeti_ovf)
+ networks = reader.GetNetworkData()
+ self.assertEqual(networks, GANETI_NETWORKS)
+
+ def testGetNetworkDataVirtualbox(self):
+ reader = ovf.OVFReader(self.virtualbox_ovf)
+ networks = reader.GetNetworkData()
+ self.assertEqual(networks, VIRTUALBOX_NETWORKS)
+
+ def testGetNetworkDataEmpty(self):
+ reader = ovf.OVFReader(self.empty_ovf)
+ networks = reader.GetNetworkData()
+ self.assertEqual(networks, EMPTY_NETWORKS)
+
+ def testGetHypervisorDataGaneti(self):
+ reader = ovf.OVFReader(self.ganeti_ovf)
+ hypervisor = reader.GetHypervisorData()
+ self.assertEqual(hypervisor, GANETI_HYPERVISOR)
+
+ def testGetHypervisorDataEmptyOvf(self):
+ reader = ovf.OVFReader(self.empty_ovf)
+ hypervisor = reader.GetHypervisorData()
+ self.assertEqual(hypervisor, EMPTY_HYPERVISOR)
+
+ def testGetOSDataGaneti(self):
+ reader = ovf.OVFReader(self.ganeti_ovf)
+ osys = reader.GetOSData()
+ self.assertEqual(osys, GANETI_OS)
+
+ def testGetOSDataEmptyOvf(self):
+ reader = ovf.OVFReader(self.empty_ovf)
+ osys = reader.GetOSData()
+ self.assertEqual(osys, EMPTY_OS)
+
+ def testGetBackendDataGaneti(self):
+ reader = ovf.OVFReader(self.ganeti_ovf)
+ backend = reader.GetBackendData()
+ self.assertEqual(backend, GANETI_BACKEND)
+
+ def testGetBackendDataVirtualbox(self):
+ reader = ovf.OVFReader(self.virtualbox_ovf)
+ backend = reader.GetBackendData()
+ self.assertEqual(backend, VIRTUALBOX_BACKEND)
+
+ def testGetBackendDataEmptyOvf(self):
+ reader = ovf.OVFReader(self.empty_ovf)
+ backend = reader.GetBackendData()
+ self.assertEqual(backend, EMPTY_BACKEND)
+
+ def testGetInstanceNameGaneti(self):
+ reader = ovf.OVFReader(self.ganeti_ovf)
+ name = reader.GetInstanceName()
+ self.assertEqual(name, GANETI_NAME)
+
+ def testGetInstanceNameDataEmptyOvf(self):
+ reader = ovf.OVFReader(self.empty_ovf)
+ name = reader.GetInstanceName()
+ self.assertEqual(name, EMPTY_NAME)
+
+ def testGetDiskTemplateGaneti(self):
+ reader = ovf.OVFReader(self.ganeti_ovf)
+ name = reader.GetDiskTemplate()
+ self.assertEqual(name, GANETI_TEMPLATE)
+
+ def testGetDiskTemplateEmpty(self):
+ reader = ovf.OVFReader(self.empty_ovf)
+ name = reader.GetDiskTemplate()
+ self.assertEqual(name, EMPTY_TEMPLATE)
+
+ def testGetTagsGaneti(self):
+ reader = ovf.OVFReader(self.ganeti_ovf)
+ tags = reader.GetTagsData()
+ self.assertEqual(tags, GANETI_TAGS)
+
+ def testGetTagsEmpty(self):
+ reader = ovf.OVFReader(self.empty_ovf)
+ tags = reader.GetTagsData()
+ self.assertEqual(tags, EMPTY_TAGS)
+
+ def testGetVersionGaneti(self):
+ reader = ovf.OVFReader(self.ganeti_ovf)
+ version = reader.GetVersionData()
+ self.assertEqual(version, GANETI_VERSION)
+
+ def testGetVersionEmpty(self):
+ reader = ovf.OVFReader(self.empty_ovf)
+ version = reader.GetVersionData()
+ self.assertEqual(version, EMPTY_VERSION)
+
+
+class TestOVFWriter(BetterUnitTest):
+ def setUp(self):
+ self.writer = ovf.OVFWriter(True)
+
+ def tearDown(self):
+ pass
+
+ def testOVFWriterInit(self):
+ result = ET.tostring(self.writer.tree)
+ self.assertTrue(EXPORT_EMPTY in result)
+
+ def testSaveDisksDataEmpty(self):
+ self.writer.SaveDisksData([])
+ result = ET.tostring(self.writer.tree)
+ self.assertTrue(EXPORT_DISKS_EMPTY in result)
+
+ def testSaveDisksData(self):
+ self.writer.SaveDisksData(EXP_DISKS_LIST)
+ result = ET.tostring(self.writer.tree)
+ self.assertTrue(EXPORT_DISKS in result)
+
+ def testSaveNetworkDataEmpty(self):
+ self.writer.SaveNetworksData([])
+ result = ET.tostring(self.writer.tree)
+ self.assertTrue(EXPORT_NETWORKS_EMPTY in result)
+
+ def testSaveNetworksData(self):
+ self.writer.SaveNetworksData(EXP_NETWORKS_LIST)
+ result = ET.tostring(self.writer.tree)
+ self.assertTrue(EXPORT_NETWORKS in result)
+
+ def testSaveGanetiDataIncomplete(self):
+ self.writer.SaveGanetiData(EXP_PARTIAL_GANETI_DICT, EXP_NETWORKS_LIST)
+ result = ET.tostring(self.writer.tree)
+ self.assertTrue(EXPORT_GANETI_INCOMPLETE in result)
+
+ def testSaveGanetiDataComplete(self):
+ self.writer.SaveGanetiData(EXP_GANETI_DICT, EXP_NETWORKS_LIST)
+ result = ET.tostring(self.writer.tree)
+ self.assertTrue(EXPORT_GANETI in result)
+
+ def testSaveVirtualSystem(self):
+ self.writer.SaveDisksData(EXP_DISKS_LIST)
+ self.writer.SaveNetworksData(EXP_NETWORKS_LIST)
+ self.writer.SaveVirtualSystemData(EXP_NAME, EXP_VCPUS, EXP_MEMORY)
+ result = ET.tostring(self.writer.tree)
+ self.assertTrue(EXPORT_SYSTEM in result)
+
+
+if __name__ == "__main__":
+ testutils.GanetiTestProgram()
#!/usr/bin/python
#
-# Copyright (C) 2010 Google Inc.
+# Copyright (C) 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
if parse_exp is None:
parse_exp = names
- filter_ = qlang.MakeSimpleFilter(field, names)
- self.assertEqual(filter_, expected)
+ qfilter = qlang.MakeSimpleFilter(field, names)
+ self.assertEqual(qfilter, expected)
def test(self):
self._Test("name", None, None, parse_exp=[])
def setUp(self):
self.parser = qlang.BuildFilterParser()
- def _Test(self, filter_, expected, expect_filter=True):
- self.assertEqual(qlang.MakeFilter([filter_], not expect_filter), expected)
- self.assertEqual(qlang.ParseFilter(filter_, parser=self.parser), expected)
+ def _Test(self, qfilter, expected, expect_filter=True):
+ self.assertEqual(qlang.MakeFilter([qfilter], not expect_filter), expected)
+ self.assertEqual(qlang.ParseFilter(qfilter, parser=self.parser), expected)
def test(self):
self._Test("name==\"foobar\"", [qlang.OP_EQUAL, "name", "foobar"])
# Non-matching regexp delimiters
tests.append("name =~ /foobarbaz#")
- for filter_ in tests:
+ for qfilter in tests:
try:
- qlang.ParseFilter(filter_, parser=self.parser)
+ qlang.ParseFilter(qfilter, parser=self.parser)
except errors.QueryFilterParseError, err:
self.assertEqual(len(err.GetDetails()), 3)
else:
- self.fail("Invalid filter '%s' did not raise exception" % filter_)
+ self.fail("Invalid filter '%s' did not raise exception" % qfilter)
class TestMakeFilter(unittest.TestCase):
None, 0, lambda *args: None),
], [])
+ # Duplicate field name
+ self.assertRaises(ValueError, query._PrepareFieldList, [
+ (query._MakeField("name", "Name", constants.QFT_TEXT, "Name"),
+ None, 0, lambda *args: None),
+ (query._MakeField("name", "Other", constants.QFT_OTHER, "Other"),
+ None, 0, lambda *args: None),
+ ], [])
+
def testUnknown(self):
fielddef = query._PrepareFieldList([
(query._MakeField("name", "Name", constants.QFT_TEXT, "Name"),
return query.Query(query.INSTANCE_FIELDS, selected)
def testSimple(self):
- q = self._Create(["name", "be/memory", "ip"])
+ q = self._Create(["name", "be/maxmem", "ip"])
self.assertEqual(q.RequestedData(), set([query.IQ_CONFIG]))
cluster = objects.Cluster(cluster_name="testcluster",
objects.Instance(name="inst2", hvparams={}, nics=[], osparams={},
os="foomoo",
beparams={
- constants.BE_MEMORY: 512,
+ constants.BE_MAXMEM: 512,
}),
objects.Instance(name="inst3", hvparams={}, beparams={}, osparams={},
os="dos", nics=[objects.NIC(ip="192.0.2.99", nicparams={})]),
objects.Instance(name="inst1", hvparams={}, beparams={}, nics=[],
uuid="f90eccb3-e227-4e3c-bf2a-94a21ca8f9cd",
ctime=1291244000, mtime=1291244400, serial_no=30,
- admin_up=True, hypervisor=constants.HT_XEN_PVM, os="linux1",
+ admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_PVM,
+ os="linux1",
primary_node="node1",
disk_template=constants.DT_PLAIN,
disks=[],
objects.Instance(name="inst2", hvparams={}, nics=[],
uuid="73a0f8a7-068c-4630-ada2-c3440015ab1a",
ctime=1291211000, mtime=1291211077, serial_no=1,
- admin_up=True, hypervisor=constants.HT_XEN_HVM, os="deb99",
+ admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_HVM,
+ os="deb99",
primary_node="node5",
disk_template=constants.DT_DISKLESS,
disks=[],
beparams={
- constants.BE_MEMORY: 512,
+ constants.BE_MAXMEM: 512,
+ constants.BE_MINMEM: 256,
},
osparams={}),
objects.Instance(name="inst3", hvparams={}, beparams={},
uuid="11ec8dff-fb61-4850-bfe0-baa1803ff280",
ctime=1291011000, mtime=1291013000, serial_no=1923,
- admin_up=False, hypervisor=constants.HT_KVM, os="busybox",
+ admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_KVM,
+ os="busybox",
primary_node="node6",
disk_template=constants.DT_DRBD8,
disks=[],
objects.Instance(name="inst4", hvparams={}, beparams={},
uuid="68dab168-3ef5-4c9d-b4d3-801e0672068c",
ctime=1291244390, mtime=1291244395, serial_no=25,
- admin_up=False, hypervisor=constants.HT_XEN_PVM, os="linux1",
+ admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_PVM,
+ os="linux1",
primary_node="nodeoff2",
disk_template=constants.DT_DRBD8,
disks=[],
objects.Instance(name="inst5", hvparams={}, nics=[],
uuid="0e3dca12-5b42-4e24-98a2-415267545bd0",
ctime=1231211000, mtime=1261200000, serial_no=3,
- admin_up=True, hypervisor=constants.HT_XEN_HVM, os="deb99",
+ admin_state=constants.ADMINST_UP, hypervisor=constants.HT_XEN_HVM,
+ os="deb99",
primary_node="nodebad2",
disk_template=constants.DT_DISKLESS,
disks=[],
beparams={
- constants.BE_MEMORY: 512,
+ constants.BE_MAXMEM: 512,
+ constants.BE_MINMEM: 512,
},
osparams={}),
objects.Instance(name="inst6", hvparams={}, nics=[],
uuid="72de6580-c8d5-4661-b902-38b5785bb8b3",
ctime=7513, mtime=11501, serial_no=13390,
- admin_up=False, hypervisor=constants.HT_XEN_HVM, os="deb99",
+ admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_HVM,
+ os="deb99",
primary_node="node7",
disk_template=constants.DT_DISKLESS,
disks=[],
beparams={
- constants.BE_MEMORY: 768,
+ constants.BE_MAXMEM: 768,
+ constants.BE_MINMEM: 256,
},
osparams={
"clean_install": "no",
objects.Instance(name="inst7", hvparams={}, nics=[],
uuid="ceec5dc4-b729-4f42-ae28-69b3cd24920e",
ctime=None, mtime=None, serial_no=1947,
- admin_up=False, hypervisor=constants.HT_XEN_HVM, os="deb99",
+ admin_state=constants.ADMINST_DOWN, hypervisor=constants.HT_XEN_HVM,
+ os="deb99",
+ primary_node="node6",
+ disk_template=constants.DT_DISKLESS,
+ disks=[],
+ beparams={},
+ osparams={}),
+ objects.Instance(name="inst8", hvparams={}, nics=[],
+ uuid="ceec5dc4-b729-4f42-ae28-69b3cd24920f",
+ ctime=None, mtime=None, serial_no=19478,
+ admin_state=constants.ADMINST_OFFLINE, hypervisor=constants.HT_XEN_HVM,
+ os="deb99",
primary_node="node6",
disk_template=constants.DT_DISKLESS,
disks=[],
elif inst.name in live_data:
if inst.name in wrongnode_inst:
exp_status = constants.INSTST_WRONGNODE
- elif inst.admin_up:
+ elif inst.admin_state == constants.ADMINST_UP:
exp_status = constants.INSTST_RUNNING
else:
exp_status = constants.INSTST_ERRORUP
- elif inst.admin_up:
+ elif inst.admin_state == constants.ADMINST_UP:
exp_status = constants.INSTST_ERRORDOWN
- else:
+ elif inst.admin_state == constants.ADMINST_DOWN:
exp_status = constants.INSTST_ADMINDOWN
+ else:
+ exp_status = constants.INSTST_ADMINOFFLINE
self.assertEqual(row[fieldidx["status"]],
(constants.RS_NORMAL, exp_status))
(_, status) = row[fieldidx["status"]]
tested_status.add(status)
- for (field, livefield) in [("oper_ram", "memory"),
- ("oper_vcpus", "vcpus")]:
+ #FIXME(dynmem): check oper_ram vs min/max mem
+ for (field, livefield) in [("oper_vcpus", "vcpus")]:
if inst.primary_node in bad_nodes:
exp = (constants.RS_NODATA, None)
elif inst.name in live_data:
self.groups = [
objects.NodeGroup(name="default",
uuid="c0e89160-18e7-11e0-a46e-001d0904baeb",
- alloc_policy=constants.ALLOC_POLICY_PREFERRED),
+ alloc_policy=constants.ALLOC_POLICY_PREFERRED,
+ ipolicy=objects.MakeEmptyIPolicy()),
objects.NodeGroup(name="restricted",
uuid="d2a40a74-18e7-11e0-9143-001d0904baeb",
- alloc_policy=constants.ALLOC_POLICY_LAST_RESORT),
+ alloc_policy=constants.ALLOC_POLICY_LAST_RESORT,
+ ipolicy=objects.MakeEmptyIPolicy()),
]
+ self.cluster = objects.Cluster(cluster_name="testcluster",
+ hvparams=constants.HVC_DEFAULTS,
+ beparams={
+ constants.PP_DEFAULT: constants.BEC_DEFAULTS,
+ },
+ nicparams={
+ constants.PP_DEFAULT: constants.NICC_DEFAULTS,
+ },
+ ndparams=constants.NDC_DEFAULTS,
+ ipolicy=constants.IPOLICY_DEFAULTS,
+ )
def _Create(self, selected):
return query.Query(query.GROUP_FIELDS, selected)
def testSimple(self):
q = self._Create(["name", "uuid", "alloc_policy"])
- gqd = query.GroupQueryData(self.groups, None, None)
+ gqd = query.GroupQueryData(self.cluster, self.groups, None, None)
self.assertEqual(q.RequestedData(), set([query.GQ_CONFIG]))
}
q = self._Create(["name", "node_cnt", "node_list"])
- gqd = query.GroupQueryData(self.groups, groups_to_nodes, None)
+ gqd = query.GroupQueryData(self.cluster, self.groups, groups_to_nodes, None)
self.assertEqual(q.RequestedData(), set([query.GQ_CONFIG, query.GQ_NODE]))
}
q = self._Create(["pinst_cnt", "pinst_list"])
- gqd = query.GroupQueryData(self.groups, None, groups_to_instances)
+ gqd = query.GroupQueryData(self.cluster, self.groups, None,
+ groups_to_instances)
self.assertEqual(q.RequestedData(), set([query.GQ_INST]))
assert "name" in fielddefs
# No name field
- q = query.Query(fielddefs, ["name"], filter_=["=", "name", "abc"],
+ q = query.Query(fielddefs, ["name"], qfilter=["=", "name", "abc"],
namefield=None)
self.assertEqual(q.RequestedNames(), None)
# No filter
- q = query.Query(fielddefs, ["name"], filter_=None, namefield="name")
+ q = query.Query(fielddefs, ["name"], qfilter=None, namefield="name")
self.assertEqual(q.RequestedNames(), None)
# Check empty query
- q = query.Query(fielddefs, ["name"], filter_=["|"], namefield="name")
+ q = query.Query(fielddefs, ["name"], qfilter=["|"], namefield="name")
self.assertEqual(q.RequestedNames(), None)
# Check order
- q = query.Query(fielddefs, ["name"], filter_=["|"] + innerfilter,
+ q = query.Query(fielddefs, ["name"], qfilter=["|"] + innerfilter,
namefield="name")
self.assertEqual(q.RequestedNames(), ["x0", "x1", "x2", "x3"])
# Check reverse order
q = query.Query(fielddefs, ["name"],
- filter_=["|"] + list(reversed(innerfilter)),
+ qfilter=["|"] + list(reversed(innerfilter)),
namefield="name")
self.assertEqual(q.RequestedNames(), ["x3", "x2", "x1", "x0"])
# Duplicates
q = query.Query(fielddefs, ["name"],
- filter_=["|"] + innerfilter + list(reversed(innerfilter)),
+ qfilter=["|"] + innerfilter + list(reversed(innerfilter)),
namefield="name")
self.assertEqual(q.RequestedNames(), ["x0", "x1", "x2", "x3"])
# Filter with AND
q = query.Query(fielddefs, ["name"],
- filter_=["|", ["=", "name", "foo"],
+ qfilter=["|", ["=", "name", "foo"],
["&", ["=", "name", ""]]],
namefield="name")
self.assertTrue(q.RequestedNames() is None)
# Filter with NOT
q = query.Query(fielddefs, ["name"],
- filter_=["|", ["=", "name", "foo"],
+ qfilter=["|", ["=", "name", "foo"],
["!", ["=", "name", ""]]],
namefield="name")
self.assertTrue(q.RequestedNames() is None)
# Filter with only OR (names must be in correct order)
q = query.Query(fielddefs, ["name"],
- filter_=["|", ["=", "name", "x17361"],
+ qfilter=["|", ["=", "name", "x17361"],
["|", ["=", "name", "x22015"]],
["|", ["|", ["=", "name", "x13193"]]],
["=", "name", "x15215"]],
]
for fielddefs in query.ALL_FIELD_LISTS:
- for filter_ in checks:
+ for qfilter in checks:
self.assertRaises(errors.ParameterError, query._CompileFilter,
- fielddefs, None, filter_)
+ fielddefs, None, qfilter)
for op in ["|", "!"]:
- filter_ = self._GenNestedFilter(op, levels_max - 1)
+ qfilter = self._GenNestedFilter(op, levels_max - 1)
self.assertTrue(callable(query._CompileFilter(fielddefs, None,
- filter_)))
+ qfilter)))
def testQueryInputOrder(self):
fielddefs = query._PrepareFieldList([
{ "pnode": "node20", "snode": "node1", },
]
- filter_ = ["|", ["=", "pnode", "node1"], ["=", "snode", "node1"]]
+ qfilter = ["|", ["=", "pnode", "node1"], ["=", "snode", "node1"]]
q = query.Query(fielddefs, ["pnode", "snode"], namefield="pnode",
- filter_=filter_)
+ qfilter=qfilter)
self.assertTrue(q.RequestedNames() is None)
self.assertFalse(q.RequestedData())
self.assertEqual(q.Query(data),
# No name field, result must be in incoming order
q = query.Query(fielddefs, ["pnode", "snode"], namefield=None,
- filter_=filter_)
+ qfilter=qfilter)
self.assertFalse(q.RequestedData())
self.assertEqual(q.Query(data),
[[(constants.RS_NORMAL, "node1"), (constants.RS_NORMAL, "node44")],
]
q = query.Query(fielddefs, ["pnode", "num"], namefield="pnode",
- filter_=["|", ["=", "pnode", "node1"],
+ qfilter=["|", ["=", "pnode", "node1"],
["=", "pnode", "node2"],
["=", "pnode", "node1"]])
self.assertEqual(q.RequestedNames(), ["node1", "node2"],
]
q = query.Query(fielddefs, ["pnode", "num"], namefield="pnode",
- filter_=["|", ["=", "pnode", "nodeX"],
+ qfilter=["|", ["=", "pnode", "nodeX"],
["=", "pnode", "nodeY"],
["=", "pnode", "nodeY"],
["=", "pnode", "nodeY"],
# Empty filter
q = query.Query(fielddefs, ["name", "other"], namefield="name",
- filter_=["|"])
+ qfilter=["|"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.RequestedData(), set([DK_A, DK_B]))
self.assertEqual(q.Query(data), [])
# Normal filter
q = query.Query(fielddefs, ["name", "other"], namefield="name",
- filter_=["=", "name", "node1"])
+ qfilter=["=", "name", "node1"])
self.assertEqual(q.RequestedNames(), ["node1"])
self.assertEqual(q.Query(data),
[[(constants.RS_NORMAL, "node1"), (constants.RS_NORMAL, "foo")]])
q = query.Query(fielddefs, ["name", "other"], namefield="name",
- filter_=(["|", ["=", "name", "node1"],
+ qfilter=(["|", ["=", "name", "node1"],
["=", "name", "node3"]]))
self.assertEqual(q.RequestedNames(), ["node1", "node3"])
self.assertEqual(q.Query(data),
# Complex filter
q = query.Query(fielddefs, ["name", "other"], namefield="name",
- filter_=(["|", ["=", "name", "node1"],
+ qfilter=(["|", ["=", "name", "node1"],
["|", ["=", "name", "node3"],
["=", "name", "node2"]],
["=", "name", "node3"]]))
for i in [-1, 0, 1, 123, [], None, True, False]:
self.assertRaises(errors.ParameterError, query.Query,
fielddefs, ["name", "other"], namefield="name",
- filter_=["=", "name", i])
+ qfilter=["=", "name", i])
# Negative filter
q = query.Query(fielddefs, ["name", "other"], namefield="name",
- filter_=["!", ["|", ["=", "name", "node1"],
+ qfilter=["!", ["|", ["=", "name", "node1"],
["=", "name", "node3"]]])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data),
# Not equal
q = query.Query(fielddefs, ["name", "other"], namefield="name",
- filter_=["!=", "name", "node3"])
+ qfilter=["!=", "name", "node3"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data),
[[(constants.RS_NORMAL, "node1"), (constants.RS_NORMAL, "foo")],
# Data type
q = query.Query(fielddefs, [], namefield="name",
- filter_=["|", ["=", "other", "bar"],
+ qfilter=["|", ["=", "other", "bar"],
["=", "name", "foo"]])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.RequestedData(), set([DK_A, DK_B]))
# Only one data type
q = query.Query(fielddefs, ["other"], namefield="name",
- filter_=["=", "other", "bar"])
+ qfilter=["=", "other", "bar"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.RequestedData(), set([DK_B]))
self.assertEqual(q.Query(data), [[(constants.RS_NORMAL, "bar")]])
q = query.Query(fielddefs, [], namefield="name",
- filter_=["=", "other", "bar"])
+ qfilter=["=", "other", "bar"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.RequestedData(), set([DK_B]))
self.assertEqual(q.Query(data), [[]])
]
q = query.Query(fielddefs, ["name", "other"], namefield="name",
- filter_=["=[]", "other", "bar"])
+ qfilter=["=[]", "other", "bar"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "node2"),
])
q = query.Query(fielddefs, ["name", "other"], namefield="name",
- filter_=["|", ["=[]", "other", "bar"],
+ qfilter=["|", ["=[]", "other", "bar"],
["=[]", "other", "a"],
["=[]", "other", "b"]])
self.assertTrue(q.RequestedNames() is None)
# Boolean test
q = query.Query(fielddefs, ["name", "other"], namefield="name",
- filter_=["?", "other"])
+ qfilter=["?", "other"])
self.assertEqual(q.OldStyleQuery(data), [
["node1", ["a", "b", "foo"]],
["node2", ["x", "y", "bar"]],
])
q = query.Query(fielddefs, ["name", "other"], namefield="name",
- filter_=["!", ["?", "other"]])
+ qfilter=["!", ["?", "other"]])
self.assertEqual(q.OldStyleQuery(data), [
["empty", []],
])
]
q = query.Query(fielddefs, ["name"], namefield="name",
- filter_=["=", "name", "node2"])
+ qfilter=["=", "name", "node2"])
self.assertEqual(q.RequestedNames(), ["node2"])
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "node2.example.com")],
])
q = query.Query(fielddefs, ["name"], namefield="name",
- filter_=["=", "name", "node1"])
+ qfilter=["=", "name", "node1"])
self.assertEqual(q.RequestedNames(), ["node1"])
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "node1.example.com")],
])
q = query.Query(fielddefs, ["name"], namefield="name",
- filter_=["=", "name", "othername"])
+ qfilter=["=", "name", "othername"])
self.assertEqual(q.RequestedNames(), ["othername"])
self.assertEqual(q.Query(data), [])
q = query.Query(fielddefs, ["name"], namefield="name",
- filter_=["|", ["=", "name", "node1.example.com"],
+ qfilter=["|", ["=", "name", "node1.example.com"],
["=", "name", "node2"]])
self.assertEqual(q.RequestedNames(), ["node1.example.com", "node2"])
self.assertEqual(q.Query(data), [
])
q = query.Query(fielddefs, ["name"], namefield="name",
- filter_=["!=", "name", "node1"])
+ qfilter=["!=", "name", "node1"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "node2.example.com")],
]
q = query.Query(fielddefs, ["name", "value"],
- filter_=["|", ["=", "value", False],
+ qfilter=["|", ["=", "value", False],
["=", "value", True]])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
])
q = query.Query(fielddefs, ["name", "value"],
- filter_=["|", ["=", "value", False],
+ qfilter=["|", ["=", "value", False],
["!", ["=", "value", False]]])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
for i in ["False", "True", "0", "1", "no", "yes", "N", "Y"]:
self.assertRaises(errors.ParameterError, query.Query,
fielddefs, ["name", "value"],
- filter_=["=", "value", i])
+ qfilter=["=", "value", i])
# Truth filter
- q = query.Query(fielddefs, ["name", "value"], filter_=["?", "value"])
+ q = query.Query(fielddefs, ["name", "value"], qfilter=["?", "value"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "node2"), (constants.RS_NORMAL, True)],
])
# Negative bool filter
- q = query.Query(fielddefs, ["name", "value"], filter_=["!", ["?", "value"]])
+ q = query.Query(fielddefs, ["name", "value"], qfilter=["!", ["?", "value"]])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "node1"), (constants.RS_NORMAL, False)],
# Complex truth filter
q = query.Query(fielddefs, ["name", "value"],
- filter_=["|", ["&", ["=", "name", "node1"],
+ qfilter=["|", ["&", ["=", "name", "node1"],
["!", ["?", "value"]]],
["?", "value"]])
self.assertTrue(q.RequestedNames() is None)
]
q = query.Query(fielddefs, ["name"], namefield="name",
- filter_=["=~", "name", "site"])
+ qfilter=["=~", "name", "site"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "node2.site.example.com")],
])
q = query.Query(fielddefs, ["name"], namefield="name",
- filter_=["=~", "name", "^node2"])
+ qfilter=["=~", "name", "^node2"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "node2.example.net")],
])
q = query.Query(fielddefs, ["name"], namefield="name",
- filter_=["=~", "name", r"(?i)\.COM$"])
+ qfilter=["=~", "name", r"(?i)\.COM$"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "node1.example.com")],
])
q = query.Query(fielddefs, ["name"], namefield="name",
- filter_=["=~", "name", r"."])
+ qfilter=["=~", "name", r"."])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "node1.example.com")],
])
q = query.Query(fielddefs, ["name"], namefield="name",
- filter_=["=~", "name", r"^$"])
+ qfilter=["=~", "name", r"^$"])
self.assertTrue(q.RequestedNames() is None)
self.assertEqual(q.Query(data), [
[(constants.RS_NORMAL, "")],
# Invalid regular expression
self.assertRaises(errors.ParameterError, query.Query, fielddefs, ["name"],
- filter_=["=~", "name", r"["])
+ qfilter=["=~", "name", r"["])
if __name__ == "__main__":
"""Script for testing ganeti.rapi.baserlib"""
import unittest
+import itertools
from ganeti import errors
from ganeti import opcodes
from ganeti import ht
from ganeti import http
+from ganeti import compat
from ganeti.rapi import baserlib
import testutils
rename={ "data": "test", })
+class TestOpcodeResource(unittest.TestCase):
+ @staticmethod
+ def _MakeClass(method, attrs):
+ return type("Test%s" % method, (baserlib.OpcodeResource, ), attrs)
+
+ @staticmethod
+ def _GetMethodAttributes(method):
+ attrs = ["%s_OPCODE" % method, "%s_RENAME" % method,
+ "Get%sOpInput" % method.capitalize()]
+ assert attrs == dict((opattrs[0], list(opattrs[1:]))
+ for opattrs in baserlib._OPCODE_ATTRS)[method]
+ return attrs
+
+ def test(self):
+ for method in baserlib._SUPPORTED_METHODS:
+ # Empty handler
+ obj = self._MakeClass(method, {})(None, None, None)
+ for attr in itertools.chain(*baserlib._OPCODE_ATTRS):
+ self.assertFalse(hasattr(obj, attr))
+
+ # Direct handler function
+ obj = self._MakeClass(method, {
+ method: lambda _: None,
+ })(None, None, None)
+ self.assertFalse(compat.all(hasattr(obj, attr)
+ for i in baserlib._SUPPORTED_METHODS
+ for attr in self._GetMethodAttributes(i)))
+
+ # Let metaclass define handler function
+ for opcls in [None, object()]:
+ obj = self._MakeClass(method, {
+ "%s_OPCODE" % method: opcls,
+ })(None, None, None)
+ self.assertTrue(callable(getattr(obj, method)))
+ self.assertEqual(getattr(obj, "%s_OPCODE" % method), opcls)
+ self.assertFalse(hasattr(obj, "%s_RENAME" % method))
+ self.assertFalse(compat.any(hasattr(obj, attr)
+ for i in baserlib._SUPPORTED_METHODS
+ if i != method
+ for attr in self._GetMethodAttributes(i)))
+
+ def testIllegalRename(self):
+ class _TClass(baserlib.OpcodeResource):
+ PUT_RENAME = None
+ def PUT(self): pass
+
+ self.assertRaises(AssertionError, _TClass, None, None, None)
+
+ def testEmpty(self):
+ class _Empty(baserlib.OpcodeResource):
+ pass
+
+ obj = _Empty(None, None, None)
+
+ for attr in itertools.chain(*baserlib._OPCODE_ATTRS):
+ self.assertFalse(hasattr(obj, attr))
+
+
if __name__ == "__main__":
testutils.GanetiTestProgram()
#!/usr/bin/python
#
-# Copyright (C) 2010 Google Inc.
+# Copyright (C) 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# List of resource handlers which aren't used by the RAPI client
_KNOWN_UNUSED = set([
- connector.R_root,
- connector.R_2,
+ rlib2.R_root,
+ rlib2.R_2,
])
# Global variable for collecting used handlers
self.assertEqual(client.GANETI_RAPI_VERSION, constants.RAPI_VERSION)
self.assertEqual(client.HTTP_APP_JSON, http.HTTP_APP_JSON)
self.assertEqual(client._REQ_DATA_VERSION_FIELD, rlib2._REQ_DATA_VERSION)
- self.assertEqual(client._INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
- self.assertEqual(client._INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
- self.assertEqual(client._NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
- self.assertEqual(client._NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
self.assertEqual(client._INST_NIC_PARAMS, constants.INIC_PARAMS)
self.assertEqual(client.JOB_STATUS_QUEUED, constants.JOB_STATUS_QUEUED)
self.assertEqual(client.JOB_STATUS_WAITING, constants.JOB_STATUS_WAITING)
# Legacy name
self.assertEqual(client.JOB_STATUS_WAITLOCK, constants.JOB_STATUS_WAITING)
+ # RAPI feature strings
+ self.assertEqual(client._INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
+ self.assertEqual(client.INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
+ self.assertEqual(client._INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
+ self.assertEqual(client.INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
+ self.assertEqual(client._NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
+ self.assertEqual(client.NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
+ self.assertEqual(client._NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
+ self.assertEqual(client.NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
+
class RapiMockTest(unittest.TestCase):
def test(self):
self.assertQuery("force", ["1"])
self.assertEqual("\"master-candidate\"", self.rapi.GetLastRequestData())
+ def testPowercycleNode(self):
+ self.rapi.AddResponse("23051")
+ self.assertEqual(23051,
+ self.client.PowercycleNode("node5468", force=True))
+ self.assertHandler(rlib2.R_2_nodes_name_powercycle)
+ self.assertItems(["node5468"])
+ self.assertQuery("force", ["1"])
+ self.assertFalse(self.rapi.GetLastRequestData())
+ self.assertEqual(self.rapi.CountPending(), 0)
+
def testModifyNode(self):
self.rapi.AddResponse("3783")
job_id = self.client.ModifyNode("node16979.example.com", drained=True)
self.assertHandler(rlib2.R_2_instances_name_deactivate_disks)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
+ def testRecreateInstanceDisks(self):
+ self.rapi.AddResponse("13553")
+ job_id = self.client.RecreateInstanceDisks("inst23153")
+ self.assertEqual(job_id, 13553)
+ self.assertItems(["inst23153"])
+ self.assertHandler(rlib2.R_2_instances_name_recreate_disks)
+ self.assertFalse(self.rapi.GetLastHandler().queryargs)
+
def testGetInstanceConsole(self):
self.rapi.AddResponse("26876")
job_id = self.client.GetInstanceConsole("inst21491")
def testQuery(self):
for idx, what in enumerate(constants.QR_VIA_RAPI):
- for idx2, filter_ in enumerate([None, ["?", "name"]]):
+ for idx2, qfilter in enumerate([None, ["?", "name"]]):
job_id = 11010 + (idx << 4) + (idx2 << 16)
fields = sorted(query.ALL_FIELDS[what].keys())[:10]
self.rapi.AddResponse(str(job_id))
- self.assertEqual(self.client.Query(what, fields, filter_=filter_),
+ self.assertEqual(self.client.Query(what, fields, qfilter=qfilter),
job_id)
self.assertItems([what])
self.assertHandler(rlib2.R_2_query)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(data["fields"], fields)
- if filter_ is None:
- self.assertTrue("filter" not in data)
+ if qfilter is None:
+ self.assertTrue("qfilter" not in data)
else:
- self.assertEqual(data["filter"], filter_)
+ self.assertEqual(data["qfilter"], qfilter)
self.assertEqual(self.rapi.CountPending(), 0)
def testQueryFields(self):
self._TestFailingUri("/instances/does/not/exist")
-class R_RootTests(unittest.TestCase):
- """Testing for R_root class."""
-
- def setUp(self):
- self.root = connector.R_root(None, None, None)
-
- def testGet(self):
- expected = [
- {'name': '2', 'uri': '/2'},
- {'name': 'version', 'uri': '/version'},
- ]
- self.assertEquals(self.root.GET(), expected)
-
-
if __name__ == '__main__':
testutils.GanetiTestProgram()
import unittest
-import tempfile
+import itertools
+import random
from ganeti import constants
from ganeti import opcodes
from ganeti import compat
from ganeti import http
from ganeti import query
+from ganeti import luxi
+from ganeti import errors
from ganeti.rapi import rlib2
import testutils
+class _FakeRequestPrivateData:
+ def __init__(self, body_data):
+ self.body_data = body_data
+
+
+class _FakeRequest:
+ def __init__(self, body_data):
+ self.private = _FakeRequestPrivateData(body_data)
+
+
+def _CreateHandler(cls, items, queryargs, body_data, client_cls):
+ return cls(items, queryargs, _FakeRequest(body_data),
+ _client_cls=client_cls)
+
+
+class _FakeClient:
+ def __init__(self):
+ self._jobs = []
+
+ def GetNextSubmittedJob(self):
+ return self._jobs.pop(0)
+
+ def SubmitJob(self, ops):
+ job_id = str(1 + int(random.random() * 1000000))
+ self._jobs.append((job_id, ops))
+ return job_id
+
+
+class _FakeClientFactory:
+ def __init__(self, cls):
+ self._client_cls = cls
+ self._clients = []
+
+ def GetNextClient(self):
+ return self._clients.pop(0)
+
+ def __call__(self):
+ cl = self._client_cls()
+ self._clients.append(cl)
+ return cl
+
+
class TestConstants(unittest.TestCase):
def testConsole(self):
# Exporting the console field without authentication might expose
self.assertFalse(set(fields) - set(query.ALL_FIELDS[qr].keys()))
-class TestParseInstanceCreateRequestVersion1(testutils.GanetiTestCase):
- def setUp(self):
- testutils.GanetiTestCase.setUp(self)
+class TestClientConnectError(unittest.TestCase):
+ @staticmethod
+ def _FailingClient():
+ raise luxi.NoMasterError("test")
+
+ def test(self):
+ resources = [
+ rlib2.R_2_groups,
+ rlib2.R_2_instances,
+ rlib2.R_2_nodes,
+ ]
+ for cls in resources:
+ handler = _CreateHandler(cls, ["name"], [], None, self._FailingClient)
+ self.assertRaises(http.HttpBadGateway, handler.GET)
+
+
+class TestJobSubmitError(unittest.TestCase):
+ class _SubmitErrorClient:
+ @staticmethod
+ def SubmitJob(ops):
+ raise errors.JobQueueFull("test")
+
+ def test(self):
+ handler = _CreateHandler(rlib2.R_2_redist_config, [], [], None,
+ self._SubmitErrorClient)
+ self.assertRaises(http.HttpServiceUnavailable, handler.PUT)
+
+
+class TestClusterModify(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_cluster_modify, [], [], {
+ "vg_name": "testvg",
+ "candidate_pool_size": 100,
+ }, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpClusterSetParams))
+ self.assertEqual(op.vg_name, "testvg")
+ self.assertEqual(op.candidate_pool_size, 100)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+ def testInvalidValue(self):
+ for attr in ["vg_name", "candidate_pool_size", "beparams", "_-Unknown#"]:
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_cluster_modify, [], [], {
+ attr: True,
+ }, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.PUT)
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+
+class TestRedistConfig(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_redist_config, [], [], None, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpClusterRedistConf))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestNodeMigrate(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_nodes_name_migrate, ["node1"], {}, {
+ "iallocator": "fooalloc",
+ }, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpNodeMigrate))
+ self.assertEqual(op.node_name, "node1")
+ self.assertEqual(op.iallocator, "fooalloc")
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+ def testQueryArgsConflict(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_nodes_name_migrate, ["node2"], {
+ "live": True,
+ "mode": constants.HT_MIGRATION_NONLIVE,
+ }, None, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ def testQueryArgsMode(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ queryargs = {
+ "mode": [constants.HT_MIGRATION_LIVE],
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_migrate, ["node17292"],
+ queryargs, None, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpNodeMigrate))
+ self.assertEqual(op.node_name, "node17292")
+ self.assertEqual(op.mode, constants.HT_MIGRATION_LIVE)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+ def testQueryArgsLive(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ for live in [False, True]:
+ queryargs = {
+ "live": [str(int(live))],
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_migrate, ["node6940"],
+ queryargs, None, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpNodeMigrate))
+ self.assertEqual(op.node_name, "node6940")
+ if live:
+ self.assertEqual(op.mode, constants.HT_MIGRATION_LIVE)
+ else:
+ self.assertEqual(op.mode, constants.HT_MIGRATION_NONLIVE)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestNodeEvacuate(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_nodes_name_evacuate, ["node92"], {
+ "dry-run": ["1"],
+ }, {
+ "mode": constants.IALLOCATOR_NEVAC_SEC,
+ }, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpNodeEvacuate))
+ self.assertEqual(op.node_name, "node92")
+ self.assertEqual(op.mode, constants.IALLOCATOR_NEVAC_SEC)
+ self.assertTrue(op.dry_run)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestNodePowercycle(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_nodes_name_powercycle, ["node20744"], {
+ "force": ["1"],
+ }, None, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpNodePowercycle))
+ self.assertEqual(op.node_name, "node20744")
+ self.assertTrue(op.force)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestGroupAssignNodes(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_groups_name_assign_nodes, ["grp-a"], {
+ "dry-run": ["1"],
+ "force": ["1"],
+ }, {
+ "nodes": ["n2", "n3"],
+ }, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpGroupAssignNodes))
+ self.assertEqual(op.group_name, "grp-a")
+ self.assertEqual(op.nodes, ["n2", "n3"])
+ self.assertTrue(op.dry_run)
+ self.assertTrue(op.force)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestInstanceDelete(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_instances_name, ["inst30965"], {
+ "dry-run": ["1"],
+ }, {}, clfactory)
+ job_id = handler.DELETE()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceRemove))
+ self.assertEqual(op.instance_name, "inst30965")
+ self.assertTrue(op.dry_run)
+ self.assertFalse(op.ignore_failures)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestInstanceInfo(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_instances_name_info, ["inst31217"], {
+ "static": ["1"],
+ }, {}, clfactory)
+ job_id = handler.GET()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceQueryData))
+ self.assertEqual(op.instances, ["inst31217"])
+ self.assertTrue(op.static)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestInstanceReboot(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_instances_name_reboot, ["inst847"], {
+ "dry-run": ["1"],
+ "ignore_secondaries": ["1"],
+ }, {}, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceReboot))
+ self.assertEqual(op.instance_name, "inst847")
+ self.assertEqual(op.reboot_type, constants.INSTANCE_REBOOT_HARD)
+ self.assertTrue(op.ignore_secondaries)
+ self.assertTrue(op.dry_run)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestInstanceStartup(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_instances_name_startup, ["inst31083"], {
+ "force": ["1"],
+ "no_remember": ["1"],
+ }, {}, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceStartup))
+ self.assertEqual(op.instance_name, "inst31083")
+ self.assertTrue(op.no_remember)
+ self.assertTrue(op.force)
+ self.assertFalse(op.dry_run)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestInstanceShutdown(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_instances_name_shutdown, ["inst26791"], {
+ "no_remember": ["0"],
+ }, {}, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceShutdown))
+ self.assertEqual(op.instance_name, "inst26791")
+ self.assertFalse(op.no_remember)
+ self.assertFalse(op.dry_run)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestInstanceActivateDisks(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_instances_name_activate_disks, ["xyz"], {
+ "ignore_size": ["1"],
+ }, {}, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceActivateDisks))
+ self.assertEqual(op.instance_name, "xyz")
+ self.assertTrue(op.ignore_size)
+ self.assertFalse(hasattr(op, "dry_run"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestInstanceDeactivateDisks(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_instances_name_deactivate_disks,
+ ["inst22357"], {}, {}, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceDeactivateDisks))
+ self.assertEqual(op.instance_name, "inst22357")
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestInstanceRecreateDisks(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_instances_name_recreate_disks,
+ ["inst22357"], {}, {}, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceRecreateDisks))
+ self.assertEqual(op.instance_name, "inst22357")
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestInstanceFailover(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_instances_name_failover,
+ ["inst12794"], {}, {}, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceFailover))
+ self.assertEqual(op.instance_name, "inst12794")
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestInstanceDiskGrow(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ data = {
+ "amount": 1024,
+ }
+ handler = _CreateHandler(rlib2.R_2_instances_name_disk_grow,
+ ["inst10742", "3"], {}, data, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceGrowDisk))
+ self.assertEqual(op.instance_name, "inst10742")
+ self.assertEqual(op.disk, 3)
+ self.assertEqual(op.amount, 1024)
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestBackupPrepare(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ queryargs = {
+ "mode": constants.EXPORT_MODE_REMOTE,
+ }
+ handler = _CreateHandler(rlib2.R_2_instances_name_prepare_export,
+ ["inst17925"], queryargs, {}, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpBackupPrepare))
+ self.assertEqual(op.instance_name, "inst17925")
+ self.assertEqual(op.mode, constants.EXPORT_MODE_REMOTE)
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestGroupRemove(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ handler = _CreateHandler(rlib2.R_2_groups_name,
+ ["grp28575"], {}, {}, clfactory)
+ job_id = handler.DELETE()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpGroupRemove))
+ self.assertEqual(op.group_name, "grp28575")
+ self.assertFalse(op.dry_run)
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestStorageQuery(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ queryargs = {
+ "storage_type": constants.ST_LVM_PV,
+ "output_fields": "name,other",
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage,
+ ["node21075"], queryargs, {}, clfactory)
+ job_id = handler.GET()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpNodeQueryStorage))
+ self.assertEqual(op.nodes, ["node21075"])
+ self.assertEqual(op.storage_type, constants.ST_LVM_PV)
+ self.assertEqual(op.output_fields, ["name", "other"])
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+ def testErrors(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ queryargs = {
+ "output_fields": "name,other",
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage,
+ ["node10538"], queryargs, {}, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.GET)
+
+ queryargs = {
+ "storage_type": constants.ST_LVM_VG,
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage,
+ ["node21273"], queryargs, {}, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.GET)
+
+ queryargs = {
+ "storage_type": "##unknown_storage##",
+ "output_fields": "name,other",
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage,
+ ["node10315"], queryargs, {}, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.GET)
+
+
+class TestStorageModify(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ for allocatable in [None, "1", "0"]:
+ queryargs = {
+ "storage_type": constants.ST_LVM_VG,
+ "name": "pv-a",
+ }
+
+ if allocatable is not None:
+ queryargs["allocatable"] = allocatable
+
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage_modify,
+ ["node9292"], queryargs, {}, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpNodeModifyStorage))
+ self.assertEqual(op.node_name, "node9292")
+ self.assertEqual(op.storage_type, constants.ST_LVM_VG)
+ self.assertEqual(op.name, "pv-a")
+ if allocatable is None:
+ self.assertFalse(op.changes)
+ else:
+ assert allocatable in ("0", "1")
+ self.assertEqual(op.changes, {
+ constants.SF_ALLOCATABLE: (allocatable == "1"),
+ })
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+ def testErrors(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ # No storage type
+ queryargs = {
+ "name": "xyz",
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage_modify,
+ ["node26016"], queryargs, {}, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.PUT)
+
+ # No name
+ queryargs = {
+ "storage_type": constants.ST_LVM_VG,
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage_modify,
+ ["node21218"], queryargs, {}, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.PUT)
+
+ # Invalid value
+ queryargs = {
+ "storage_type": constants.ST_LVM_VG,
+ "name": "pv-b",
+ "allocatable": "noint",
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage_modify,
+ ["node30685"], queryargs, {}, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.PUT)
+
+
+class TestStorageRepair(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+ queryargs = {
+ "storage_type": constants.ST_LVM_PV,
+ "name": "pv16611",
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage_repair,
+ ["node19265"], queryargs, {}, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpRepairNodeStorage))
+ self.assertEqual(op.node_name, "node19265")
+ self.assertEqual(op.storage_type, constants.ST_LVM_PV)
+ self.assertEqual(op.name, "pv16611")
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+ def testErrors(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ # No storage type
+ queryargs = {
+ "name": "xyz",
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage_repair,
+ ["node11275"], queryargs, {}, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.PUT)
+
+ # No name
+ queryargs = {
+ "storage_type": constants.ST_LVM_VG,
+ }
+ handler = _CreateHandler(rlib2.R_2_nodes_name_storage_repair,
+ ["node21218"], queryargs, {}, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.PUT)
+
+
+class TestTags(unittest.TestCase):
+ TAG_HANDLERS = [
+ rlib2.R_2_instances_name_tags,
+ rlib2.R_2_nodes_name_tags,
+ rlib2.R_2_groups_name_tags,
+ rlib2.R_2_tags,
+ ]
+
+ def testSetAndDelete(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ for method, opcls in [("PUT", opcodes.OpTagsSet),
+ ("DELETE", opcodes.OpTagsDel)]:
+ for idx, handler in enumerate(self.TAG_HANDLERS):
+ dry_run = bool(idx % 2)
+ name = "test%s" % idx
+ queryargs = {
+ "tag": ["foo", "bar", "baz"],
+ "dry-run": str(int(dry_run)),
+ }
+
+ handler = _CreateHandler(handler, [name], queryargs, {}, clfactory)
+ job_id = getattr(handler, method)()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcls))
+ self.assertEqual(op.kind, handler.TAG_LEVEL)
+ if handler.TAG_LEVEL == constants.TAG_CLUSTER:
+ self.assertTrue(op.name is None)
+ else:
+ self.assertEqual(op.name, name)
+ self.assertEqual(op.tags, ["foo", "bar", "baz"])
+ self.assertEqual(op.dry_run, dry_run)
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
- self.Parse = rlib2._ParseInstanceCreateRequestVersion1
+class TestInstanceCreation(testutils.GanetiTestCase):
def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ name = "inst863.example.com"
+
disk_variants = [
# No disks
[],
None,
{},
{ constants.BE_VCPUS: 2, },
- { constants.BE_MEMORY: 123, },
+ { constants.BE_MAXMEM: 200, },
+ { constants.BE_MEMORY: 256, },
{ constants.BE_VCPUS: 2,
- constants.BE_MEMORY: 1024,
- constants.BE_AUTO_BALANCE: True, }
+ constants.BE_MAXMEM: 1024,
+ constants.BE_MINMEM: 1024,
+ constants.BE_AUTO_BALANCE: True,
+ constants.BE_ALWAYS_FAILOVER: True, }
]
hvparam_variants = [
for disks in disk_variants:
for beparams in beparam_variants:
for hvparams in hvparam_variants:
- data = {
- "name": "inst1.example.com",
- "hypervisor": constants.HT_FAKE,
- "disks": disks,
- "nics": nics,
- "mode": mode,
- "disk_template": disk_template,
- "os": "debootstrap",
- }
-
- if beparams is not None:
- data["beparams"] = beparams
-
- if hvparams is not None:
- data["hvparams"] = hvparams
-
for dry_run in [False, True]:
- op = self.Parse(data, dry_run)
- self.assert_(isinstance(op, opcodes.OpInstanceCreate))
+ queryargs = {
+ "dry-run": str(int(dry_run)),
+ }
+
+ data = {
+ rlib2._REQ_DATA_VERSION: 1,
+ "name": name,
+ "hypervisor": constants.HT_FAKE,
+ "disks": disks,
+ "nics": nics,
+ "mode": mode,
+ "disk_template": disk_template,
+ "os": "debootstrap",
+ }
+
+ if beparams is not None:
+ data["beparams"] = beparams
+
+ if hvparams is not None:
+ data["hvparams"] = hvparams
+
+ handler = _CreateHandler(rlib2.R_2_instances, [],
+ queryargs, data, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+ self.assertTrue(isinstance(op, opcodes.OpInstanceCreate))
+ self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, mode)
self.assertEqual(op.disk_template, disk_template)
self.assertEqual(op.dry_run, dry_run)
self.assertEqualValues(op.hvparams, hvparams)
def testLegacyName(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "inst29128.example.com"
data = {
+ rlib2._REQ_DATA_VERSION: 1,
"name": name,
"disks": [],
"nics": [],
"mode": constants.INSTANCE_CREATE,
"disk_template": constants.DT_PLAIN,
}
- op = self.Parse(data, False)
- self.assert_(isinstance(op, opcodes.OpInstanceCreate))
+
+ handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceCreate))
self.assertEqual(op.instance_name, name)
self.assertFalse(hasattr(op, "name"))
+ self.assertFalse(op.dry_run)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
# Define both
- data = {
- "name": name,
- "instance_name": "other.example.com",
- "disks": [],
- "nics": [],
- "mode": constants.INSTANCE_CREATE,
- "disk_template": constants.DT_PLAIN,
- }
- self.assertRaises(http.HttpBadRequest, self.Parse, data, False)
+ data["instance_name"] = "other.example.com"
+ assert "name" in data and "instance_name" in data
+ handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
+ self.assertRaises(IndexError, clfactory.GetNextClient)
def testLegacyOs(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "inst4673.example.com"
os = "linux29206"
data = {
+ rlib2._REQ_DATA_VERSION: 1,
"name": name,
"os_type": os,
"disks": [],
"mode": constants.INSTANCE_CREATE,
"disk_template": constants.DT_PLAIN,
}
- op = self.Parse(data, False)
- self.assert_(isinstance(op, opcodes.OpInstanceCreate))
+
+ handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceCreate))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.os_type, os)
self.assertFalse(hasattr(op, "os"))
+ self.assertFalse(op.dry_run)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
# Define both
- data = {
- "instance_name": name,
- "os": os,
- "os_type": "linux9584",
- "disks": [],
- "nics": [],
- "mode": constants.INSTANCE_CREATE,
- "disk_template": constants.DT_PLAIN,
- }
- self.assertRaises(http.HttpBadRequest, self.Parse, data, False)
+ data["os"] = "linux9584"
+ assert "os" in data and "os_type" in data
+ handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
def testErrors(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
# Test all required fields
reqfields = {
+ rlib2._REQ_DATA_VERSION: 1,
"name": "inst1.example.com",
"disks": [],
"nics": [],
}
for name in reqfields.keys():
- self.assertRaises(http.HttpBadRequest, self.Parse,
- dict(i for i in reqfields.iteritems() if i[0] != name),
- False)
+ data = dict(i for i in reqfields.iteritems() if i[0] != name)
+
+ handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
+ self.assertRaises(IndexError, clfactory.GetNextClient)
# Invalid disks and nics
for field in ["disks", "nics"]:
for invvalue in invalid_values:
data = reqfields.copy()
data[field] = invvalue
- self.assertRaises(http.HttpBadRequest, self.Parse, data, False)
+ handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+ def testVersion(self):
+ clfactory = _FakeClientFactory(_FakeClient)
-class TestParseExportInstanceRequest(testutils.GanetiTestCase):
- def setUp(self):
- testutils.GanetiTestCase.setUp(self)
+ # No version field
+ data = {
+ "name": "inst1.example.com",
+ "disks": [],
+ "nics": [],
+ "mode": constants.INSTANCE_CREATE,
+ "disk_template": constants.DT_PLAIN,
+ }
+
+ handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
+
+ # Old and incorrect versions
+ for version in [0, -1, 10483, "Hello World"]:
+ data[rlib2._REQ_DATA_VERSION] = version
+
+ handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
- self.Parse = rlib2._ParseExportInstanceRequest
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+ # Correct version
+ data[rlib2._REQ_DATA_VERSION] = 1
+ handler = _CreateHandler(rlib2.R_2_instances, [], {}, data, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceCreate))
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestBackupExport(unittest.TestCase):
def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "instmoo"
data = {
"mode": constants.EXPORT_MODE_REMOTE,
"x509_key_name": ["name", "hash"],
"destination_x509_ca": "---cert---"
}
- op = self.Parse(name, data)
- self.assert_(isinstance(op, opcodes.OpBackupExport))
+
+ handler = _CreateHandler(rlib2.R_2_instances_name_export, [name], {},
+ data, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpBackupExport))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, constants.EXPORT_MODE_REMOTE)
+ self.assertEqual(op.target_node, [(1, 2, 3), (99, 99, 99)])
self.assertEqual(op.shutdown, True)
self.assertEqual(op.remove_instance, True)
- self.assertEqualValues(op.x509_key_name, ("name", "hash"))
+ self.assertEqual(op.x509_key_name, ["name", "hash"])
self.assertEqual(op.destination_x509_ca, "---cert---")
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDefaults(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "inst1"
data = {
"destination": "node2",
"shutdown": False,
}
- op = self.Parse(name, data)
- self.assert_(isinstance(op, opcodes.OpBackupExport))
+
+ handler = _CreateHandler(rlib2.R_2_instances_name_export, [name], {},
+ data, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpBackupExport))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.target_node, "node2")
self.assertFalse(hasattr(op, "mode"))
self.assertFalse(hasattr(op, "remove_instance"))
self.assertFalse(hasattr(op, "destination"))
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
- def testErrors(self):
- self.assertRaises(http.HttpBadRequest, self.Parse, "err1",
- { "remove_instance": "True", })
- self.assertRaises(http.HttpBadRequest, self.Parse, "err1",
- { "remove_instance": "False", })
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+ def testErrors(self):
+ clfactory = _FakeClientFactory(_FakeClient)
-class TestParseMigrateInstanceRequest(testutils.GanetiTestCase):
- def setUp(self):
- testutils.GanetiTestCase.setUp(self)
+ for value in ["True", "False"]:
+ handler = _CreateHandler(rlib2.R_2_instances_name_export, ["err1"], {}, {
+ "remove_instance": value,
+ }, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.PUT)
- self.Parse = rlib2._ParseMigrateInstanceRequest
+class TestInstanceMigrate(testutils.GanetiTestCase):
def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "instYooho6ek"
for cleanup in [False, True]:
"cleanup": cleanup,
"mode": mode,
}
- op = self.Parse(name, data)
- self.assert_(isinstance(op, opcodes.OpInstanceMigrate))
+
+ handler = _CreateHandler(rlib2.R_2_instances_name_migrate, [name], {},
+ data, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceMigrate))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, mode)
self.assertEqual(op.cleanup, cleanup)
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDefaults(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "instnohZeex0"
- op = self.Parse(name, {})
- self.assert_(isinstance(op, opcodes.OpInstanceMigrate))
+ handler = _CreateHandler(rlib2.R_2_instances_name_migrate, [name], {}, {},
+ clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceMigrate))
self.assertEqual(op.instance_name, name)
self.assertFalse(hasattr(op, "mode"))
self.assertFalse(hasattr(op, "cleanup"))
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
-class TestParseRenameInstanceRequest(testutils.GanetiTestCase):
- def setUp(self):
- testutils.GanetiTestCase.setUp(self)
-
- self.Parse = rlib2._ParseRenameInstanceRequest
+class TestParseRenameInstanceRequest(testutils.GanetiTestCase):
def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "instij0eeph7"
for new_name in ["ua0aiyoo", "fai3ongi"]:
"name_check": name_check,
}
- op = self.Parse(name, data)
- self.assert_(isinstance(op, opcodes.OpInstanceRename))
+ handler = _CreateHandler(rlib2.R_2_instances_name_rename, [name],
+ {}, data, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceRename))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.new_name, new_name)
self.assertEqual(op.ip_check, ip_check)
self.assertEqual(op.name_check, name_check)
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDefaults(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "instahchie3t"
for new_name in ["thag9mek", "quees7oh"]:
"new_name": new_name,
}
- op = self.Parse(name, data)
- self.assert_(isinstance(op, opcodes.OpInstanceRename))
+ handler = _CreateHandler(rlib2.R_2_instances_name_rename, [name],
+ {}, data, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceRename))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.new_name, new_name)
self.assertFalse(hasattr(op, "ip_check"))
self.assertFalse(hasattr(op, "name_check"))
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertFalse(hasattr(op, "force"))
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
-class TestParseModifyInstanceRequest(testutils.GanetiTestCase):
- def setUp(self):
- testutils.GanetiTestCase.setUp(self)
-
- self.Parse = rlib2._ParseModifyInstanceRequest
+class TestParseModifyInstanceRequest(unittest.TestCase):
def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "instush8gah"
test_disks = [
for osparams in [{}, { "some": "value", "other": "Hello World", }]:
for hvparams in [{}, { constants.HV_KERNEL_PATH: "/some/kernel", }]:
- for beparams in [{}, { constants.BE_MEMORY: 128, }]:
+ for beparams in [{}, { constants.BE_MAXMEM: 128, }]:
for force in [False, True]:
for nics in [[], [(0, { constants.INIC_IP: "192.0.2.1", })]]:
for disks in test_disks:
"disk_template": disk_template,
}
- op = self.Parse(name, data)
- self.assert_(isinstance(op, opcodes.OpInstanceSetParams))
+ handler = _CreateHandler(rlib2.R_2_instances_name_modify,
+ [name], {}, data, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceSetParams))
self.assertEqual(op.instance_name, name)
self.assertEqual(op.hvparams, hvparams)
self.assertEqual(op.beparams, beparams)
self.assertFalse(hasattr(op, "remote_node"))
self.assertFalse(hasattr(op, "os_name"))
self.assertFalse(hasattr(op, "force_variant"))
+ self.assertFalse(hasattr(op, "dry_run"))
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDefaults(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "instir8aish31"
- op = self.Parse(name, {})
- self.assert_(isinstance(op, opcodes.OpInstanceSetParams))
+ handler = _CreateHandler(rlib2.R_2_instances_name_modify,
+ [name], {}, {}, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpInstanceSetParams))
self.assertEqual(op.instance_name, name)
+
for i in ["hvparams", "beparams", "osparams", "force", "nics", "disks",
"disk_template", "remote_node", "os_name", "force_variant"]:
self.assertFalse(hasattr(op, i))
self.assertEqual(ops[1].os_type, "linux1")
self.assertFalse(ops[1].osparams)
+ def testErrors(self):
+ self.assertRaises(http.HttpBadRequest, self.Parse,
+ "foo", "not a dictionary")
-class TestParseRenameGroupRequest(testutils.GanetiTestCase):
- def setUp(self):
- testutils.GanetiTestCase.setUp(self)
-
- self.Parse = rlib2._ParseRenameGroupRequest
+class TestGroupRename(unittest.TestCase):
def test(self):
- name = "instij0eeph7"
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ name = "group608242564"
data = {
- "new_name": "ua0aiyoo",
+ "new_name": "ua0aiyoo15112",
}
- op = self.Parse(name, data, False)
+ handler = _CreateHandler(rlib2.R_2_groups_name_rename, [name], {}, data,
+ clfactory)
+ job_id = handler.PUT()
- self.assert_(isinstance(op, opcodes.OpGroupRename))
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+
+ self.assertTrue(isinstance(op, opcodes.OpGroupRename))
self.assertEqual(op.group_name, name)
- self.assertEqual(op.new_name, "ua0aiyoo")
+ self.assertEqual(op.new_name, "ua0aiyoo15112")
self.assertFalse(op.dry_run)
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDryRun(self):
- name = "instij0eeph7"
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ name = "group28548"
data = {
"new_name": "ua0aiyoo",
}
- op = self.Parse(name, data, True)
+ handler = _CreateHandler(rlib2.R_2_groups_name_rename, [name], {
+ "dry-run": ["1"],
+ }, data, clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
- self.assert_(isinstance(op, opcodes.OpGroupRename))
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+
+ self.assertTrue(isinstance(op, opcodes.OpGroupRename))
self.assertEqual(op.group_name, name)
self.assertEqual(op.new_name, "ua0aiyoo")
- self.assert_(op.dry_run)
+ self.assertTrue(op.dry_run)
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
-class TestParseInstanceReplaceDisksRequest(unittest.TestCase):
- def setUp(self):
- self.Parse = rlib2._ParseInstanceReplaceDisksRequest
-
+class TestInstanceReplaceDisks(unittest.TestCase):
def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "inst22568"
for disks in [range(1, 4), "1,2,3", "1, 2, 3"]:
"iallocator": "myalloc",
}
- op = self.Parse(name, data)
- self.assert_(isinstance(op, opcodes.OpInstanceReplaceDisks))
+ handler = _CreateHandler(rlib2.R_2_instances_name_replace_disks,
+ [name], {}, data, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+
+ self.assertTrue(isinstance(op, opcodes.OpInstanceReplaceDisks))
+ self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, constants.REPLACE_DISK_SEC)
self.assertEqual(op.disks, [1, 2, 3])
self.assertEqual(op.iallocator, "myalloc")
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testDefaults(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "inst11413"
data = {
"mode": constants.REPLACE_DISK_AUTO,
}
- op = self.Parse(name, data)
- self.assert_(isinstance(op, opcodes.OpInstanceReplaceDisks))
+ handler = _CreateHandler(rlib2.R_2_instances_name_replace_disks,
+ [name], {}, data, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+
+ self.assertTrue(isinstance(op, opcodes.OpInstanceReplaceDisks))
+ self.assertEqual(op.instance_name, name)
self.assertEqual(op.mode, constants.REPLACE_DISK_AUTO)
self.assertFalse(hasattr(op, "iallocator"))
self.assertFalse(hasattr(op, "disks"))
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testNoDisks(self):
- self.assertRaises(http.HttpBadRequest, self.Parse, "inst20661", {})
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ handler = _CreateHandler(rlib2.R_2_instances_name_replace_disks,
+ ["inst20661"], {}, {}, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
for disks in [None, "", {}]:
- self.assertRaises(http.HttpBadRequest, self.Parse, "inst20661", {
+ handler = _CreateHandler(rlib2.R_2_instances_name_replace_disks,
+ ["inst20661"], {}, {
"disks": disks,
- })
+ }, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
def testWrong(self):
- self.assertRaises(http.HttpBadRequest, self.Parse, "inst",
- { "mode": constants.REPLACE_DISK_AUTO,
- "disks": "hello world",
- })
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ data = {
+ "mode": constants.REPLACE_DISK_AUTO,
+ "disks": "hello world",
+ }
+ handler = _CreateHandler(rlib2.R_2_instances_name_replace_disks,
+ ["foo"], {}, data, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
-class TestParseModifyGroupRequest(unittest.TestCase):
- def setUp(self):
- self.Parse = rlib2._ParseModifyGroupRequest
+class TestGroupModify(unittest.TestCase):
def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "group6002"
for policy in constants.VALID_ALLOC_POLICIES:
"alloc_policy": policy,
}
- op = self.Parse(name, data)
- self.assert_(isinstance(op, opcodes.OpGroupSetParams))
+ handler = _CreateHandler(rlib2.R_2_groups_name_modify, [name], {}, data,
+ clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+
+ self.assertTrue(isinstance(op, opcodes.OpGroupSetParams))
self.assertEqual(op.group_name, name)
self.assertEqual(op.alloc_policy, policy)
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testUnknownPolicy(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
data = {
"alloc_policy": "_unknown_policy_",
}
- self.assertRaises(http.HttpBadRequest, self.Parse, "name", data)
+ handler = _CreateHandler(rlib2.R_2_groups_name_modify, ["xyz"], {}, data,
+ clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.PUT)
+ self.assertRaises(IndexError, clfactory.GetNextClient)
def testDefaults(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "group6679"
- data = {}
- op = self.Parse(name, data)
- self.assert_(isinstance(op, opcodes.OpGroupSetParams))
+ handler = _CreateHandler(rlib2.R_2_groups_name_modify, [name], {}, {},
+ clfactory)
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+
+ self.assertTrue(isinstance(op, opcodes.OpGroupSetParams))
self.assertEqual(op.group_name, name)
self.assertFalse(hasattr(op, "alloc_policy"))
+ self.assertFalse(hasattr(op, "dry_run"))
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
-class TestParseCreateGroupRequest(unittest.TestCase):
- def setUp(self):
- self.Parse = rlib2._ParseCreateGroupRequest
-
+class TestGroupAdd(unittest.TestCase):
def test(self):
name = "group3618"
+ clfactory = _FakeClientFactory(_FakeClient)
for policy in constants.VALID_ALLOC_POLICIES:
data = {
"alloc_policy": policy,
}
- op = self.Parse(data, False)
- self.assert_(isinstance(op, opcodes.OpGroupAdd))
+ handler = _CreateHandler(rlib2.R_2_groups, [], {}, data,
+ clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+
+ self.assertTrue(isinstance(op, opcodes.OpGroupAdd))
self.assertEqual(op.group_name, name)
self.assertEqual(op.alloc_policy, policy)
self.assertFalse(op.dry_run)
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
def testUnknownPolicy(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
data = {
"alloc_policy": "_unknown_policy_",
}
- self.assertRaises(http.HttpBadRequest, self.Parse, "name", data)
+ handler = _CreateHandler(rlib2.R_2_groups, [], {}, data, clfactory)
+ self.assertRaises(http.HttpBadRequest, handler.POST)
+ self.assertRaises(IndexError, clfactory.GetNextClient)
def testDefaults(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "group15395"
data = {
"group_name": name,
}
- op = self.Parse(data, True)
- self.assert_(isinstance(op, opcodes.OpGroupAdd))
+ handler = _CreateHandler(rlib2.R_2_groups, [], {}, data, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+
+ self.assertTrue(isinstance(op, opcodes.OpGroupAdd))
self.assertEqual(op.group_name, name)
self.assertFalse(hasattr(op, "alloc_policy"))
- self.assertTrue(op.dry_run)
+ self.assertFalse(op.dry_run)
def testLegacyName(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
name = "group29852"
data = {
"name": name,
}
- op = self.Parse(data, True)
- self.assert_(isinstance(op, opcodes.OpGroupAdd))
+ handler = _CreateHandler(rlib2.R_2_groups, [], {
+ "dry-run": ["1"],
+ }, data, clfactory)
+ job_id = handler.POST()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+
+ self.assertTrue(isinstance(op, opcodes.OpGroupAdd))
self.assertEqual(op.group_name, name)
+ self.assertFalse(hasattr(op, "alloc_policy"))
+ self.assertTrue(op.dry_run)
+
+
+class TestNodeRole(unittest.TestCase):
+ def test(self):
+ clfactory = _FakeClientFactory(_FakeClient)
+
+ for role in rlib2._NR_MAP.values():
+ handler = _CreateHandler(rlib2.R_2_nodes_name_role,
+ ["node-z"], {}, role, clfactory)
+ if role == rlib2._NR_MASTER:
+ self.assertRaises(http.HttpBadRequest, handler.PUT)
+ else:
+ job_id = handler.PUT()
+
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+
+ (exp_job_id, (op, )) = cl.GetNextSubmittedJob()
+ self.assertEqual(job_id, exp_job_id)
+ self.assertTrue(isinstance(op, opcodes.OpNodeSetParams))
+ self.assertEqual(op.node_name, "node-z")
+ self.assertFalse(op.force)
+ self.assertFalse(hasattr(op, "dry_run"))
+
+ if role == rlib2._NR_REGULAR:
+ self.assertFalse(op.drained)
+ self.assertFalse(op.offline)
+ self.assertFalse(op.master_candidate)
+ elif role == rlib2._NR_MASTER_CANDIDATE:
+ self.assertFalse(op.drained)
+ self.assertFalse(op.offline)
+ self.assertTrue(op.master_candidate)
+ elif role == rlib2._NR_DRAINED:
+ self.assertTrue(op.drained)
+ self.assertFalse(op.offline)
+ self.assertFalse(op.master_candidate)
+ elif role == rlib2._NR_OFFLINE:
+ self.assertFalse(op.drained)
+ self.assertTrue(op.offline)
+ self.assertFalse(op.master_candidate)
+ else:
+ self.fail("Unknown role '%s'" % role)
+
+ self.assertRaises(IndexError, cl.GetNextSubmittedJob)
+
+
+class TestSimpleResources(unittest.TestCase):
+ def setUp(self):
+ self.clfactory = _FakeClientFactory(_FakeClient)
+
+ def tearDown(self):
+ self.assertRaises(IndexError, self.clfactory.GetNextClient)
+
+ def testFeatures(self):
+ handler = _CreateHandler(rlib2.R_2_features, [], {}, None, self.clfactory)
+ self.assertEqual(set(handler.GET()), rlib2.ALL_FEATURES)
+
+ def testEmpty(self):
+ for cls in [rlib2.R_root, rlib2.R_2]:
+ handler = _CreateHandler(cls, [], {}, None, self.clfactory)
+ self.assertTrue(handler.GET() is None)
+
+ def testVersion(self):
+ handler = _CreateHandler(rlib2.R_version, [], {}, None, self.clfactory)
+ self.assertEqual(handler.GET(), constants.RAPI_VERSION)
+
+
+class TestClusterInfo(unittest.TestCase):
+ class _ClusterInfoClient:
+ def __init__(self):
+ self.cluster_info = None
+
+ def QueryClusterInfo(self):
+ assert self.cluster_info is None
+ self.cluster_info = object()
+ return self.cluster_info
+
+ def test(self):
+ clfactory = _FakeClientFactory(self._ClusterInfoClient)
+ handler = _CreateHandler(rlib2.R_2_info, [], {}, None, clfactory)
+ result = handler.GET()
+ cl = clfactory.GetNextClient()
+ self.assertRaises(IndexError, clfactory.GetNextClient)
+ self.assertEqual(result, cl.cluster_info)
if __name__ == '__main__':
#!/usr/bin/python
#
-# Copyright (C) 2010 Google Inc.
+# Copyright (C) 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
import os
import sys
import unittest
+import random
+import tempfile
from ganeti import constants
from ganeti import compat
from ganeti import rpc
+from ganeti import rpc_defs
from ganeti import http
from ganeti import errors
from ganeti import serializer
+from ganeti import objects
+from ganeti import backend
import testutils
+import mocks
-class TestTimeouts(unittest.TestCase):
- def test(self):
- names = [name[len("call_"):] for name in dir(rpc.RpcRunner)
- if name.startswith("call_")]
- self.assertEqual(len(names), len(rpc._TIMEOUTS))
- self.assertFalse([name for name in names
- if not (rpc._TIMEOUTS[name] is None or
- rpc._TIMEOUTS[name] > 0)])
-
-
-class FakeHttpPool:
+class _FakeRequestProcessor:
def __init__(self, response_fn):
self._response_fn = response_fn
self.reqcount = 0
- def ProcessRequests(self, reqs):
+ def __call__(self, reqs, lock_monitor_cb=None):
+ assert lock_monitor_cb is None or callable(lock_monitor_cb)
for req in reqs:
self.reqcount += 1
self._response_fn(req)
return FakeSimpleStore
-class TestClient(unittest.TestCase):
+class TestRpcProcessor(unittest.TestCase):
def _FakeAddressLookup(self, map):
return lambda node_list: [map.get(node) for node in node_list]
def _GetVersionResponse(self, req):
- self.assertEqual(req.host, "localhost")
+ self.assertEqual(req.host, "127.0.0.1")
self.assertEqual(req.port, 24094)
self.assertEqual(req.path, "/version")
+ self.assertEqual(req.read_timeout, rpc._TMO_URGENT)
req.success = True
req.resp_status_code = http.HTTP_OK
req.resp_body = serializer.DumpJson((True, 123))
def testVersionSuccess(self):
- fn = self._FakeAddressLookup({"localhost": "localhost"})
- client = rpc.Client("version", None, 24094, address_lookup_fn=fn)
- client.ConnectNode("localhost")
- pool = FakeHttpPool(self._GetVersionResponse)
- result = client.GetResults(http_pool=pool)
+ resolver = rpc._StaticResolver(["127.0.0.1"])
+ http_proc = _FakeRequestProcessor(self._GetVersionResponse)
+ proc = rpc._RpcProcessor(resolver, 24094)
+ result = proc(["localhost"], "version", {"localhost": ""}, 60,
+ NotImplemented, _req_process_fn=http_proc)
self.assertEqual(result.keys(), ["localhost"])
lhresp = result["localhost"]
self.assertFalse(lhresp.offline)
self.assertEqual(lhresp.payload, 123)
self.assertEqual(lhresp.call, "version")
lhresp.Raise("should not raise")
- self.assertEqual(pool.reqcount, 1)
+ self.assertEqual(http_proc.reqcount, 1)
+
+ def _ReadTimeoutResponse(self, req):
+ self.assertEqual(req.host, "192.0.2.13")
+ self.assertEqual(req.port, 19176)
+ self.assertEqual(req.path, "/version")
+ self.assertEqual(req.read_timeout, 12356)
+ req.success = True
+ req.resp_status_code = http.HTTP_OK
+ req.resp_body = serializer.DumpJson((True, -1))
+
+ def testReadTimeout(self):
+ resolver = rpc._StaticResolver(["192.0.2.13"])
+ http_proc = _FakeRequestProcessor(self._ReadTimeoutResponse)
+ proc = rpc._RpcProcessor(resolver, 19176)
+ host = "node31856"
+ body = {host: ""}
+ result = proc([host], "version", body, 12356, NotImplemented,
+ _req_process_fn=http_proc)
+ self.assertEqual(result.keys(), [host])
+ lhresp = result[host]
+ self.assertFalse(lhresp.offline)
+ self.assertEqual(lhresp.node, host)
+ self.assertFalse(lhresp.fail_msg)
+ self.assertEqual(lhresp.payload, -1)
+ self.assertEqual(lhresp.call, "version")
+ lhresp.Raise("should not raise")
+ self.assertEqual(http_proc.reqcount, 1)
+
+ def testOfflineNode(self):
+ resolver = rpc._StaticResolver([rpc._OFFLINE])
+ http_proc = _FakeRequestProcessor(NotImplemented)
+ proc = rpc._RpcProcessor(resolver, 30668)
+ host = "n17296"
+ body = {host: ""}
+ result = proc([host], "version", body, 60, NotImplemented,
+ _req_process_fn=http_proc)
+ self.assertEqual(result.keys(), [host])
+ lhresp = result[host]
+ self.assertTrue(lhresp.offline)
+ self.assertEqual(lhresp.node, host)
+ self.assertTrue(lhresp.fail_msg)
+ self.assertFalse(lhresp.payload)
+ self.assertEqual(lhresp.call, "version")
+
+ # With a message
+ self.assertRaises(errors.OpExecError, lhresp.Raise, "should raise")
+
+ # No message
+ self.assertRaises(errors.OpExecError, lhresp.Raise, None)
+
+ self.assertEqual(http_proc.reqcount, 0)
def _GetMultiVersionResponse(self, req):
self.assert_(req.host.startswith("node"))
def testMultiVersionSuccess(self):
nodes = ["node%s" % i for i in range(50)]
- fn = self._FakeAddressLookup(dict(zip(nodes, nodes)))
- client = rpc.Client("version", None, 23245, address_lookup_fn=fn)
- client.ConnectList(nodes)
-
- pool = FakeHttpPool(self._GetMultiVersionResponse)
- result = client.GetResults(http_pool=pool)
+ body = dict((n, "") for n in nodes)
+ resolver = rpc._StaticResolver(nodes)
+ http_proc = _FakeRequestProcessor(self._GetMultiVersionResponse)
+ proc = rpc._RpcProcessor(resolver, 23245)
+ result = proc(nodes, "version", body, 60, NotImplemented,
+ _req_process_fn=http_proc)
self.assertEqual(sorted(result.keys()), sorted(nodes))
for name in nodes:
self.assertEqual(lhresp.call, "version")
lhresp.Raise("should not raise")
- self.assertEqual(pool.reqcount, len(nodes))
+ self.assertEqual(http_proc.reqcount, len(nodes))
- def _GetVersionResponseFail(self, req):
+ def _GetVersionResponseFail(self, errinfo, req):
self.assertEqual(req.path, "/version")
req.success = True
req.resp_status_code = http.HTTP_OK
- req.resp_body = serializer.DumpJson((False, "Unknown error"))
+ req.resp_body = serializer.DumpJson((False, errinfo))
def testVersionFailure(self):
- lookup_map = {"aef9ur4i.example.com": "aef9ur4i.example.com"}
- fn = self._FakeAddressLookup(lookup_map)
- client = rpc.Client("version", None, 5903, address_lookup_fn=fn)
- client.ConnectNode("aef9ur4i.example.com")
- pool = FakeHttpPool(self._GetVersionResponseFail)
- result = client.GetResults(http_pool=pool)
- self.assertEqual(result.keys(), ["aef9ur4i.example.com"])
- lhresp = result["aef9ur4i.example.com"]
- self.assertFalse(lhresp.offline)
- self.assertEqual(lhresp.node, "aef9ur4i.example.com")
- self.assert_(lhresp.fail_msg)
- self.assertFalse(lhresp.payload)
- self.assertEqual(lhresp.call, "version")
- self.assertRaises(errors.OpExecError, lhresp.Raise, "failed")
- self.assertEqual(pool.reqcount, 1)
+ resolver = rpc._StaticResolver(["aef9ur4i.example.com"])
+ proc = rpc._RpcProcessor(resolver, 5903)
+ for errinfo in [None, "Unknown error"]:
+ http_proc = \
+ _FakeRequestProcessor(compat.partial(self._GetVersionResponseFail,
+ errinfo))
+ host = "aef9ur4i.example.com"
+ body = {host: ""}
+ result = proc(body.keys(), "version", body, 60, NotImplemented,
+ _req_process_fn=http_proc)
+ self.assertEqual(result.keys(), [host])
+ lhresp = result[host]
+ self.assertFalse(lhresp.offline)
+ self.assertEqual(lhresp.node, host)
+ self.assert_(lhresp.fail_msg)
+ self.assertFalse(lhresp.payload)
+ self.assertEqual(lhresp.call, "version")
+ self.assertRaises(errors.OpExecError, lhresp.Raise, "failed")
+ self.assertEqual(http_proc.reqcount, 1)
def _GetHttpErrorResponse(self, httperrnodes, failnodes, req):
self.assertEqual(req.path, "/vg_list")
def testHttpError(self):
nodes = ["uaf6pbbv%s" % i for i in range(50)]
- fn = self._FakeAddressLookup(dict(zip(nodes, nodes)))
+ body = dict((n, "") for n in nodes)
+ resolver = rpc._StaticResolver(nodes)
httperrnodes = set(nodes[1::7])
self.assertEqual(len(httperrnodes), 7)
self.assertEqual(len(set(nodes) - failnodes - httperrnodes), 29)
- client = rpc.Client("vg_list", None, 15165, address_lookup_fn=fn)
- client.ConnectList(nodes)
-
- pool = FakeHttpPool(compat.partial(self._GetHttpErrorResponse,
- httperrnodes, failnodes))
- result = client.GetResults(http_pool=pool)
+ proc = rpc._RpcProcessor(resolver, 15165)
+ http_proc = \
+ _FakeRequestProcessor(compat.partial(self._GetHttpErrorResponse,
+ httperrnodes, failnodes))
+ result = proc(nodes, "vg_list", body, rpc._TMO_URGENT, NotImplemented,
+ _req_process_fn=http_proc)
self.assertEqual(sorted(result.keys()), sorted(nodes))
for name in nodes:
self.assertEqual(lhresp.payload, hash(name))
lhresp.Raise("should not raise")
- self.assertEqual(pool.reqcount, len(nodes))
+ self.assertEqual(http_proc.reqcount, len(nodes))
def _GetInvalidResponseA(self, req):
self.assertEqual(req.path, "/version")
req.resp_body = serializer.DumpJson("invalid response")
def testInvalidResponse(self):
- lookup_map = {"oqo7lanhly.example.com": "oqo7lanhly.example.com"}
- fn = self._FakeAddressLookup(lookup_map)
- client = rpc.Client("version", None, 19978, address_lookup_fn=fn)
+ resolver = rpc._StaticResolver(["oqo7lanhly.example.com"])
+ proc = rpc._RpcProcessor(resolver, 19978)
+
for fn in [self._GetInvalidResponseA, self._GetInvalidResponseB]:
- client.ConnectNode("oqo7lanhly.example.com")
- pool = FakeHttpPool(fn)
- result = client.GetResults(http_pool=pool)
- self.assertEqual(result.keys(), ["oqo7lanhly.example.com"])
- lhresp = result["oqo7lanhly.example.com"]
+ http_proc = _FakeRequestProcessor(fn)
+ host = "oqo7lanhly.example.com"
+ body = {host: ""}
+ result = proc([host], "version", body, 60, NotImplemented,
+ _req_process_fn=http_proc)
+ self.assertEqual(result.keys(), [host])
+ lhresp = result[host]
self.assertFalse(lhresp.offline)
- self.assertEqual(lhresp.node, "oqo7lanhly.example.com")
+ self.assertEqual(lhresp.node, host)
self.assert_(lhresp.fail_msg)
self.assertFalse(lhresp.payload)
self.assertEqual(lhresp.call, "version")
self.assertRaises(errors.OpExecError, lhresp.Raise, "failed")
- self.assertEqual(pool.reqcount, 1)
+ self.assertEqual(http_proc.reqcount, 1)
+
+ def _GetBodyTestResponse(self, test_data, req):
+ self.assertEqual(req.host, "192.0.2.84")
+ self.assertEqual(req.port, 18700)
+ self.assertEqual(req.path, "/upload_file")
+ self.assertEqual(serializer.LoadJson(req.post_data), test_data)
+ req.success = True
+ req.resp_status_code = http.HTTP_OK
+ req.resp_body = serializer.DumpJson((True, None))
+
+ def testResponseBody(self):
+ test_data = {
+ "Hello": "World",
+ "xyz": range(10),
+ }
+ resolver = rpc._StaticResolver(["192.0.2.84"])
+ http_proc = _FakeRequestProcessor(compat.partial(self._GetBodyTestResponse,
+ test_data))
+ proc = rpc._RpcProcessor(resolver, 18700)
+ host = "node19759"
+ body = {host: serializer.DumpJson(test_data)}
+ result = proc([host], "upload_file", body, 30, NotImplemented,
+ _req_process_fn=http_proc)
+ self.assertEqual(result.keys(), [host])
+ lhresp = result[host]
+ self.assertFalse(lhresp.offline)
+ self.assertEqual(lhresp.node, host)
+ self.assertFalse(lhresp.fail_msg)
+ self.assertEqual(lhresp.payload, None)
+ self.assertEqual(lhresp.call, "upload_file")
+ lhresp.Raise("should not raise")
+ self.assertEqual(http_proc.reqcount, 1)
+
- def testAddressLookupSimpleStore(self):
+class TestSsconfResolver(unittest.TestCase):
+ def testSsconfLookup(self):
addr_list = ["192.0.2.%d" % n for n in range(0, 255, 13)]
node_list = ["node%d.example.com" % n for n in range(0, 255, 13)]
- node_addr_list = [ " ".join(t) for t in zip(node_list, addr_list)]
+ node_addr_list = [" ".join(t) for t in zip(node_list, addr_list)]
ssc = GetFakeSimpleStoreClass(lambda _: node_addr_list)
- result = rpc._AddressLookup(node_list, ssc=ssc)
- self.assertEqual(result, addr_list)
+ result = rpc._SsconfResolver(node_list, NotImplemented,
+ ssc=ssc, nslookup_fn=NotImplemented)
+ self.assertEqual(result, zip(node_list, addr_list))
- def testAddressLookupNSLookup(self):
+ def testNsLookup(self):
addr_list = ["192.0.2.%d" % n for n in range(0, 255, 13)]
node_list = ["node%d.example.com" % n for n in range(0, 255, 13)]
ssc = GetFakeSimpleStoreClass(lambda _: [])
node_addr_map = dict(zip(node_list, addr_list))
nslookup_fn = lambda name, family=None: node_addr_map.get(name)
- result = rpc._AddressLookup(node_list, ssc=ssc, nslookup_fn=nslookup_fn)
- self.assertEqual(result, addr_list)
+ result = rpc._SsconfResolver(node_list, NotImplemented,
+ ssc=ssc, nslookup_fn=nslookup_fn)
+ self.assertEqual(result, zip(node_list, addr_list))
- def testAddressLookupBoth(self):
+ def testBothLookups(self):
addr_list = ["192.0.2.%d" % n for n in range(0, 255, 13)]
node_list = ["node%d.example.com" % n for n in range(0, 255, 13)]
n = len(addr_list) / 2
- node_addr_list = [ " ".join(t) for t in zip(node_list[n:], addr_list[n:])]
+ node_addr_list = [" ".join(t) for t in zip(node_list[n:], addr_list[n:])]
ssc = GetFakeSimpleStoreClass(lambda _: node_addr_list)
node_addr_map = dict(zip(node_list[:n], addr_list[:n]))
nslookup_fn = lambda name, family=None: node_addr_map.get(name)
- result = rpc._AddressLookup(node_list, ssc=ssc, nslookup_fn=nslookup_fn)
- self.assertEqual(result, addr_list)
+ result = rpc._SsconfResolver(node_list, NotImplemented,
+ ssc=ssc, nslookup_fn=nslookup_fn)
+ self.assertEqual(result, zip(node_list, addr_list))
def testAddressLookupIPv6(self):
- addr_list = ["2001:db8::%d" % n for n in range(0, 255, 13)]
- node_list = ["node%d.example.com" % n for n in range(0, 255, 13)]
- node_addr_list = [ " ".join(t) for t in zip(node_list, addr_list)]
+ addr_list = ["2001:db8::%d" % n for n in range(0, 255, 11)]
+ node_list = ["node%d.example.com" % n for n in range(0, 255, 11)]
+ node_addr_list = [" ".join(t) for t in zip(node_list, addr_list)]
ssc = GetFakeSimpleStoreClass(lambda _: node_addr_list)
- result = rpc._AddressLookup(node_list, ssc=ssc)
- self.assertEqual(result, addr_list)
+ result = rpc._SsconfResolver(node_list, NotImplemented,
+ ssc=ssc, nslookup_fn=NotImplemented)
+ self.assertEqual(result, zip(node_list, addr_list))
+
+
+class TestStaticResolver(unittest.TestCase):
+ def test(self):
+ addresses = ["192.0.2.%d" % n for n in range(0, 123, 7)]
+ nodes = ["node%s.example.com" % n for n in range(0, 123, 7)]
+ res = rpc._StaticResolver(addresses)
+ self.assertEqual(res(nodes, NotImplemented), zip(nodes, addresses))
+
+ def testWrongLength(self):
+ res = rpc._StaticResolver([])
+ self.assertRaises(AssertionError, res, ["abc"], NotImplemented)
+
+
+class TestNodeConfigResolver(unittest.TestCase):
+ @staticmethod
+ def _GetSingleOnlineNode(name):
+ assert name == "node90.example.com"
+ return objects.Node(name=name, offline=False, primary_ip="192.0.2.90")
+
+ @staticmethod
+ def _GetSingleOfflineNode(name):
+ assert name == "node100.example.com"
+ return objects.Node(name=name, offline=True, primary_ip="192.0.2.100")
+
+ def testSingleOnline(self):
+ self.assertEqual(rpc._NodeConfigResolver(self._GetSingleOnlineNode,
+ NotImplemented,
+ ["node90.example.com"], None),
+ [("node90.example.com", "192.0.2.90")])
+
+ def testSingleOffline(self):
+ self.assertEqual(rpc._NodeConfigResolver(self._GetSingleOfflineNode,
+ NotImplemented,
+ ["node100.example.com"], None),
+ [("node100.example.com", rpc._OFFLINE)])
+
+ def testSingleOfflineWithAcceptOffline(self):
+ fn = self._GetSingleOfflineNode
+ assert fn("node100.example.com").offline
+ self.assertEqual(rpc._NodeConfigResolver(fn, NotImplemented,
+ ["node100.example.com"],
+ rpc_defs.ACCEPT_OFFLINE_NODE),
+ [("node100.example.com", "192.0.2.100")])
+ for i in [False, True, "", "Hello", 0, 1]:
+ self.assertRaises(AssertionError, rpc._NodeConfigResolver,
+ fn, NotImplemented, ["node100.example.com"], i)
+
+ def testUnknownSingleNode(self):
+ self.assertEqual(rpc._NodeConfigResolver(lambda _: None, NotImplemented,
+ ["node110.example.com"], None),
+ [("node110.example.com", "node110.example.com")])
+
+ def testMultiEmpty(self):
+ self.assertEqual(rpc._NodeConfigResolver(NotImplemented,
+ lambda: {},
+ [], None),
+ [])
+
+ def testMultiSomeOffline(self):
+ nodes = dict(("node%s.example.com" % i,
+ objects.Node(name="node%s.example.com" % i,
+ offline=((i % 3) == 0),
+ primary_ip="192.0.2.%s" % i))
+ for i in range(1, 255))
+
+ # Resolve no names
+ self.assertEqual(rpc._NodeConfigResolver(NotImplemented,
+ lambda: nodes,
+ [], None),
+ [])
+
+ # Offline, online and unknown hosts
+ self.assertEqual(rpc._NodeConfigResolver(NotImplemented,
+ lambda: nodes,
+ ["node3.example.com",
+ "node92.example.com",
+ "node54.example.com",
+ "unknown.example.com",],
+ None), [
+ ("node3.example.com", rpc._OFFLINE),
+ ("node92.example.com", "192.0.2.92"),
+ ("node54.example.com", rpc._OFFLINE),
+ ("unknown.example.com", "unknown.example.com"),
+ ])
+
+
+class TestCompress(unittest.TestCase):
+ def test(self):
+ for data in ["", "Hello", "Hello World!\nnew\nlines"]:
+ self.assertEqual(rpc._Compress(data),
+ (constants.RPC_ENCODING_NONE, data))
+
+ for data in [512 * " ", 5242 * "Hello World!\n"]:
+ compressed = rpc._Compress(data)
+ self.assertEqual(len(compressed), 2)
+ self.assertEqual(backend._Decompress(compressed), data)
+
+ def testDecompression(self):
+ self.assertRaises(AssertionError, backend._Decompress, "")
+ self.assertRaises(AssertionError, backend._Decompress, [""])
+ self.assertRaises(AssertionError, backend._Decompress,
+ ("unknown compression", "data"))
+ self.assertRaises(Exception, backend._Decompress,
+ (constants.RPC_ENCODING_ZLIB_BASE64, "invalid zlib data"))
+
+
+class TestRpcClientBase(unittest.TestCase):
+ def testNoHosts(self):
+ cdef = ("test_call", NotImplemented, None, rpc_defs.TMO_SLOW, [],
+ None, None, NotImplemented)
+ http_proc = _FakeRequestProcessor(NotImplemented)
+ client = rpc._RpcClientBase(rpc._StaticResolver([]), NotImplemented,
+ _req_process_fn=http_proc)
+ self.assertEqual(client._Call(cdef, [], []), {})
+
+ # Test wrong number of arguments
+ self.assertRaises(errors.ProgrammerError, client._Call,
+ cdef, [], [0, 1, 2])
+
+ def testTimeout(self):
+ def _CalcTimeout((arg1, arg2)):
+ return arg1 + arg2
+
+ def _VerifyRequest(exp_timeout, req):
+ self.assertEqual(req.read_timeout, exp_timeout)
+
+ req.success = True
+ req.resp_status_code = http.HTTP_OK
+ req.resp_body = serializer.DumpJson((True, hex(req.read_timeout)))
+
+ resolver = rpc._StaticResolver([
+ "192.0.2.1",
+ "192.0.2.2",
+ ])
+
+ nodes = [
+ "node1.example.com",
+ "node2.example.com",
+ ]
+
+ tests = [(100, None, 100), (30, None, 30)]
+ tests.extend((_CalcTimeout, i, i + 300)
+ for i in [0, 5, 16485, 30516])
+
+ for timeout, arg1, exp_timeout in tests:
+ cdef = ("test_call", NotImplemented, None, timeout, [
+ ("arg1", None, NotImplemented),
+ ("arg2", None, NotImplemented),
+ ], None, None, NotImplemented)
+
+ http_proc = _FakeRequestProcessor(compat.partial(_VerifyRequest,
+ exp_timeout))
+ client = rpc._RpcClientBase(resolver, NotImplemented,
+ _req_process_fn=http_proc)
+ result = client._Call(cdef, nodes, [arg1, 300])
+ self.assertEqual(len(result), len(nodes))
+ self.assertTrue(compat.all(not res.fail_msg and
+ res.payload == hex(exp_timeout)
+ for res in result.values()))
+
+ def testArgumentEncoder(self):
+ (AT1, AT2) = range(1, 3)
+
+ resolver = rpc._StaticResolver([
+ "192.0.2.5",
+ "192.0.2.6",
+ ])
+
+ nodes = [
+ "node5.example.com",
+ "node6.example.com",
+ ]
+
+ encoders = {
+ AT1: hex,
+ AT2: hash,
+ }
+
+ cdef = ("test_call", NotImplemented, None, rpc_defs.TMO_NORMAL, [
+ ("arg0", None, NotImplemented),
+ ("arg1", AT1, NotImplemented),
+ ("arg1", AT2, NotImplemented),
+ ], None, None, NotImplemented)
+
+ def _VerifyRequest(req):
+ req.success = True
+ req.resp_status_code = http.HTTP_OK
+ req.resp_body = serializer.DumpJson((True, req.post_data))
+
+ http_proc = _FakeRequestProcessor(_VerifyRequest)
+
+ for num in [0, 3796, 9032119]:
+ client = rpc._RpcClientBase(resolver, encoders.get,
+ _req_process_fn=http_proc)
+ result = client._Call(cdef, nodes, ["foo", num, "Hello%s" % num])
+ self.assertEqual(len(result), len(nodes))
+ for res in result.values():
+ self.assertFalse(res.fail_msg)
+ self.assertEqual(serializer.LoadJson(res.payload),
+ ["foo", hex(num), hash("Hello%s" % num)])
+
+ def testPostProc(self):
+ def _VerifyRequest(nums, req):
+ req.success = True
+ req.resp_status_code = http.HTTP_OK
+ req.resp_body = serializer.DumpJson((True, nums))
+
+ resolver = rpc._StaticResolver([
+ "192.0.2.90",
+ "192.0.2.95",
+ ])
+
+ nodes = [
+ "node90.example.com",
+ "node95.example.com",
+ ]
+
+ def _PostProc(res):
+ self.assertFalse(res.fail_msg)
+ res.payload = sum(res.payload)
+ return res
+
+ cdef = ("test_call", NotImplemented, None, rpc_defs.TMO_NORMAL, [],
+ None, _PostProc, NotImplemented)
+
+ # Seeded random generator
+ rnd = random.Random(20299)
+
+ for i in [0, 4, 74, 1391]:
+ nums = [rnd.randint(0, 1000) for _ in range(i)]
+ http_proc = _FakeRequestProcessor(compat.partial(_VerifyRequest, nums))
+ client = rpc._RpcClientBase(resolver, NotImplemented,
+ _req_process_fn=http_proc)
+ result = client._Call(cdef, nodes, [])
+ self.assertEqual(len(result), len(nodes))
+ for res in result.values():
+ self.assertFalse(res.fail_msg)
+ self.assertEqual(res.payload, sum(nums))
+
+ def testPreProc(self):
+ def _VerifyRequest(req):
+ req.success = True
+ req.resp_status_code = http.HTTP_OK
+ req.resp_body = serializer.DumpJson((True, req.post_data))
+
+ resolver = rpc._StaticResolver([
+ "192.0.2.30",
+ "192.0.2.35",
+ ])
+
+ nodes = [
+ "node30.example.com",
+ "node35.example.com",
+ ]
+
+ def _PreProc(node, data):
+ self.assertEqual(len(data), 1)
+ return data[0] + node
+
+ cdef = ("test_call", NotImplemented, None, rpc_defs.TMO_NORMAL, [
+ ("arg0", None, NotImplemented),
+ ], _PreProc, None, NotImplemented)
+
+ http_proc = _FakeRequestProcessor(_VerifyRequest)
+ client = rpc._RpcClientBase(resolver, NotImplemented,
+ _req_process_fn=http_proc)
+
+ for prefix in ["foo", "bar", "baz"]:
+ result = client._Call(cdef, nodes, [prefix])
+ self.assertEqual(len(result), len(nodes))
+ for (idx, (node, res)) in enumerate(result.items()):
+ self.assertFalse(res.fail_msg)
+ self.assertEqual(serializer.LoadJson(res.payload), prefix + node)
+
+ def testResolverOptions(self):
+ def _VerifyRequest(req):
+ req.success = True
+ req.resp_status_code = http.HTTP_OK
+ req.resp_body = serializer.DumpJson((True, req.post_data))
+
+ nodes = [
+ "node30.example.com",
+ "node35.example.com",
+ ]
+
+ def _Resolver(expected, hosts, options):
+ self.assertEqual(hosts, nodes)
+ self.assertEqual(options, expected)
+ return zip(hosts, nodes)
+
+ def _DynamicResolverOptions((arg0, )):
+ return sum(arg0)
+
+ tests = [
+ (None, None, None),
+ (rpc_defs.ACCEPT_OFFLINE_NODE, None, rpc_defs.ACCEPT_OFFLINE_NODE),
+ (False, None, False),
+ (True, None, True),
+ (0, None, 0),
+ (_DynamicResolverOptions, [1, 2, 3], 6),
+ (_DynamicResolverOptions, range(4, 19), 165),
+ ]
+
+ for (resolver_opts, arg0, expected) in tests:
+ cdef = ("test_call", NotImplemented, resolver_opts, rpc_defs.TMO_NORMAL, [
+ ("arg0", None, NotImplemented),
+ ], None, None, NotImplemented)
+
+ http_proc = _FakeRequestProcessor(_VerifyRequest)
+
+ client = rpc._RpcClientBase(compat.partial(_Resolver, expected),
+ NotImplemented, _req_process_fn=http_proc)
+ result = client._Call(cdef, nodes, [arg0])
+ self.assertEqual(len(result), len(nodes))
+ for (idx, (node, res)) in enumerate(result.items()):
+ self.assertFalse(res.fail_msg)
+
+
+class _FakeConfigForRpcRunner:
+ GetAllNodesInfo = NotImplemented
+
+ def GetNodeInfo(self, name):
+ return objects.Node(name=name)
+
+
+class TestRpcRunner(unittest.TestCase):
+ def testUploadFile(self):
+ data = 1779 * "Hello World\n"
+
+ tmpfile = tempfile.NamedTemporaryFile()
+ tmpfile.write(data)
+ tmpfile.flush()
+ st = os.stat(tmpfile.name)
+
+ def _VerifyRequest(req):
+ (uldata, ) = serializer.LoadJson(req.post_data)
+ self.assertEqual(len(uldata), 7)
+ self.assertEqual(uldata[0], tmpfile.name)
+ self.assertEqual(list(uldata[1]), list(rpc._Compress(data)))
+ self.assertEqual(uldata[2], st.st_mode)
+ self.assertEqual(uldata[3], "user%s" % os.getuid())
+ self.assertEqual(uldata[4], "group%s" % os.getgid())
+ self.assertEqual(uldata[5], st.st_atime)
+ self.assertEqual(uldata[6], st.st_mtime)
+
+ req.success = True
+ req.resp_status_code = http.HTTP_OK
+ req.resp_body = serializer.DumpJson((True, None))
+
+ http_proc = _FakeRequestProcessor(_VerifyRequest)
+ cfg = _FakeConfigForRpcRunner()
+ runner = rpc.RpcRunner(cfg, None, _req_process_fn=http_proc,
+ _getents=mocks.FakeGetentResolver)
+
+ nodes = [
+ "node1.example.com",
+ ]
+
+ result = runner.call_upload_file(nodes, tmpfile.name)
+ self.assertEqual(len(result), len(nodes))
+ for (idx, (node, res)) in enumerate(result.items()):
+ self.assertFalse(res.fail_msg)
if __name__ == "__main__":
]
def _TestSerializer(self, dump_fn, load_fn):
- for indent in [True, False]:
- for data in self._TESTDATA:
- self.failUnless(dump_fn(data, indent=indent).endswith("\n"))
- self.assertEqualValues(load_fn(dump_fn(data, indent=indent)), data)
+ for data in self._TESTDATA:
+ self.failUnless(dump_fn(data).endswith("\n"))
+ self.assertEqualValues(load_fn(dump_fn(data)), data)
def testGeneric(self):
self._TestSerializer(serializer.Dump, serializer.Load)
import operator
from ganeti import constants
+from ganeti import compat
from ganeti.utils import algo
import testutils
{ 1: "foo", 2: "bar", 5: "baz"})
+class TestInsertAtPos(unittest.TestCase):
+ def test(self):
+ a = [1, 5, 6]
+ b = [2, 3, 4]
+ self.assertEqual(algo.InsertAtPos(a, 1, b), [1, 2, 3, 4, 5, 6])
+ self.assertEqual(algo.InsertAtPos(a, 0, b), b + a)
+ self.assertEqual(algo.InsertAtPos(a, len(a), b), a + b)
+ self.assertEqual(algo.InsertAtPos(a, 2, b), [1, 5, 2, 3, 4, 6])
+
+
class TimeMock:
def __init__(self, values):
self.values = values
self.assertEqual(result, algo.JoinDisjointDicts(dict_b, dict_a))
+class TestSequenceToDict(unittest.TestCase):
+ def testEmpty(self):
+ self.assertEqual(algo.SequenceToDict([]), {})
+ self.assertEqual(algo.SequenceToDict({}), {})
+
+ def testSimple(self):
+ data = [(i, str(i), "test%s" % i) for i in range(391)]
+ self.assertEqual(algo.SequenceToDict(data),
+ dict((i, (i, str(i), "test%s" % i))
+ for i in range(391)))
+
+ def testCustomKey(self):
+ data = [(i, hex(i), "test%s" % i) for i in range(100)]
+ self.assertEqual(algo.SequenceToDict(data, key=compat.snd),
+ dict((hex(i), (i, hex(i), "test%s" % i))
+ for i in range(100)))
+ self.assertEqual(algo.SequenceToDict(data,
+ key=lambda (a, b, val): hash(val)),
+ dict((hash("test%s" % i), (i, hex(i), "test%s" % i))
+ for i in range(100)))
+
+ def testDuplicate(self):
+ self.assertRaises(ValueError, algo.SequenceToDict,
+ [(0, 0), (0, 0)])
+ self.assertRaises(ValueError, algo.SequenceToDict,
+ [(i, ) for i in range(200)] + [(10, )])
+
+
+class TestFlatToDict(unittest.TestCase):
+ def testNormal(self):
+ data = [
+ ("lv/xenvg", {"foo": "bar", "bar": "baz"}),
+ ("lv/xenfoo", {"foo": "bar", "baz": "blubb"}),
+ ("san/foo", {"ip": "127.0.0.1", "port": 1337}),
+ ("san/blubb/blibb", 54),
+ ]
+ reference = {
+ "lv": {
+ "xenvg": {"foo": "bar", "bar": "baz"},
+ "xenfoo": {"foo": "bar", "baz": "blubb"},
+ },
+ "san": {
+ "foo": {"ip": "127.0.0.1", "port": 1337},
+ "blubb": {"blibb": 54},
+ },
+ }
+ self.assertEqual(algo.FlatToDict(data), reference)
+
+ def testUnlikeDepth(self):
+ data = [
+ ("san/foo", {"ip": "127.0.0.1", "port": 1337}),
+ ("san/foo/blubb", 23), # Another foo entry under san
+ ("san/blubb/blibb", 54),
+ ]
+ self.assertRaises(AssertionError, algo.FlatToDict, data)
+
+
if __name__ == "__main__":
testutils.GanetiTestProgram()
--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Script for testing ganeti.utils.io (tests that require root access)"""
+
+import os
+import tempfile
+import shutil
+import errno
+
+from ganeti import constants
+from ganeti import utils
+from ganeti import compat
+from ganeti import errors
+
+import testutils
+
+
+class TestWriteFile(testutils.GanetiTestCase):
+ def setUp(self):
+ testutils.GanetiTestCase.setUp(self)
+ self.tmpdir = None
+ self.tfile = tempfile.NamedTemporaryFile()
+ self.did_pre = False
+ self.did_post = False
+ self.did_write = False
+
+ def tearDown(self):
+ testutils.GanetiTestCase.tearDown(self)
+ if self.tmpdir:
+ shutil.rmtree(self.tmpdir)
+
+ def testFileUid(self):
+ self.tmpdir = tempfile.mkdtemp()
+ target = utils.PathJoin(self.tmpdir, "target")
+ tuid = os.geteuid() + 1
+ utils.WriteFile(target, data="data", uid=tuid + 1)
+ self.assertFileUid(target, tuid + 1)
+ utils.WriteFile(target, data="data", uid=tuid)
+ self.assertFileUid(target, tuid)
+ utils.WriteFile(target, data="data", uid=tuid + 1,
+ keep_perms=utils.KP_IF_EXISTS)
+ self.assertFileUid(target, tuid)
+ utils.WriteFile(target, data="data", keep_perms=utils.KP_ALWAYS)
+ self.assertFileUid(target, tuid)
+
+ def testNewFileUid(self):
+ self.tmpdir = tempfile.mkdtemp()
+ target = utils.PathJoin(self.tmpdir, "target")
+ tuid = os.geteuid() + 1
+ utils.WriteFile(target, data="data", uid=tuid,
+ keep_perms=utils.KP_IF_EXISTS)
+ self.assertFileUid(target, tuid)
+
+ def testFileGid(self):
+ self.tmpdir = tempfile.mkdtemp()
+ target = utils.PathJoin(self.tmpdir, "target")
+ tgid = os.getegid() + 1
+ utils.WriteFile(target, data="data", gid=tgid + 1)
+ self.assertFileGid(target, tgid + 1)
+ utils.WriteFile(target, data="data", gid=tgid)
+ self.assertFileGid(target, tgid)
+ utils.WriteFile(target, data="data", gid=tgid + 1,
+ keep_perms=utils.KP_IF_EXISTS)
+ self.assertFileGid(target, tgid)
+ utils.WriteFile(target, data="data", keep_perms=utils.KP_ALWAYS)
+ self.assertFileGid(target, tgid)
+
+ def testNewFileGid(self):
+ self.tmpdir = tempfile.mkdtemp()
+ target = utils.PathJoin(self.tmpdir, "target")
+ tgid = os.getegid() + 1
+ utils.WriteFile(target, data="data", gid=tgid,
+ keep_perms=utils.KP_IF_EXISTS)
+ self.assertFileGid(target, tgid)
+
+
+if __name__ == "__main__":
+ testutils.GanetiTestProgram()
self._test(files, expected)
-class TestWriteFile(unittest.TestCase):
+class TestWriteFile(testutils.GanetiTestCase):
def setUp(self):
+ testutils.GanetiTestCase.setUp(self)
self.tmpdir = None
self.tfile = tempfile.NamedTemporaryFile()
self.did_pre = False
self.did_write = False
def tearDown(self):
+ testutils.GanetiTestCase.tearDown(self)
if self.tmpdir:
shutil.rmtree(self.tmpdir)
self.assertRaises(errors.ProgrammerError, utils.WriteFile, self.tfile.name)
self.assertRaises(errors.ProgrammerError, utils.WriteFile,
self.tfile.name, data="test", atime=0)
+ self.assertRaises(errors.ProgrammerError, utils.WriteFile, self.tfile.name,
+ mode=0400, keep_perms=utils.KP_ALWAYS)
+ self.assertRaises(errors.ProgrammerError, utils.WriteFile, self.tfile.name,
+ uid=0, keep_perms=utils.KP_ALWAYS)
+ self.assertRaises(errors.ProgrammerError, utils.WriteFile, self.tfile.name,
+ gid=0, keep_perms=utils.KP_ALWAYS)
+ self.assertRaises(errors.ProgrammerError, utils.WriteFile, self.tfile.name,
+ mode=0400, uid=0, keep_perms=utils.KP_ALWAYS)
def testPreWrite(self):
utils.WriteFile(self.tfile.name, data="", prewrite=self.markPre)
self.assertTrue("test" in os.listdir(self.tmpdir))
self.assertEqual(len(os.listdir(self.tmpdir)), 2)
+ def testFileMode(self):
+ self.tmpdir = tempfile.mkdtemp()
+ target = utils.PathJoin(self.tmpdir, "target")
+ self.assertRaises(OSError, utils.WriteFile, target, data="data",
+ keep_perms=utils.KP_ALWAYS)
+ # All masks have only user bits set, to avoid interactions with umask
+ utils.WriteFile(target, data="data", mode=0200)
+ self.assertFileMode(target, 0200)
+ utils.WriteFile(target, data="data", mode=0400,
+ keep_perms=utils.KP_IF_EXISTS)
+ self.assertFileMode(target, 0200)
+ utils.WriteFile(target, data="data", keep_perms=utils.KP_ALWAYS)
+ self.assertFileMode(target, 0200)
+ utils.WriteFile(target, data="data", mode=0700)
+ self.assertFileMode(target, 0700)
+
+ def testNewFileMode(self):
+ self.tmpdir = tempfile.mkdtemp()
+ target = utils.PathJoin(self.tmpdir, "target")
+ utils.WriteFile(target, data="data", mode=0400,
+ keep_perms=utils.KP_IF_EXISTS)
+ self.assertFileMode(target, 0400)
class TestFileID(testutils.GanetiTestCase):
def testEquality(self):
handle.write("192.0.2.1 router gw\n")
finally:
handle.close()
+ os.chmod(self.tmpname, 0644)
def testSettingNewIp(self):
utils.SetEtcHostsEntry(self.tmpname, "198.51.100.4", "myhost.example.com",
sw = None
self.assertEqual(buf.getvalue(), "")
+ def testEmptyLines(self):
+ buf = StringIO()
+ sw = utils.ShellWriter(buf)
+
+ def _AddLevel(level):
+ if level == 6:
+ return
+ sw.IncIndent()
+ try:
+ # Add empty line, it should not be indented
+ sw.Write("")
+ sw.Write(str(level))
+ _AddLevel(level + 1)
+ finally:
+ sw.DecIndent()
+
+ _AddLevel(1)
+
+ self.assertEqual(buf.getvalue(),
+ "".join("\n%s%s\n" % (i * " ", i) for i in range(1, 6)))
+
class TestNormalizeAndValidateMac(unittest.TestCase):
def testInvalid(self):
self.assertRaises(errors.ParseError, utils.ParseCpuMask, data)
+class TestParseMultiCpuMask(unittest.TestCase):
+ """Test case for the ParseMultiCpuMask function."""
+
+ def testWellFormed(self):
+ self.assertEqual(utils.ParseMultiCpuMask(""), [])
+ self.assertEqual(utils.ParseMultiCpuMask("1"), [[1]])
+ self.assertEqual(utils.ParseMultiCpuMask("0-2,4,5-5"), [[0, 1, 2, 4, 5]])
+ self.assertEqual(utils.ParseMultiCpuMask("all"), [[-1]])
+ self.assertEqual(utils.ParseMultiCpuMask("0-2:all:4,6-8"),
+ [[0, 1, 2], [-1], [4, 6, 7, 8]])
+
+ def testInvalidInput(self):
+ for data in ["garbage", "0,", "0-1-2", "2-1", "1-a", "all-all"]:
+ self.assertRaises(errors.ParseError, utils.ParseCpuMask, data)
+
+
class TestGetMounts(unittest.TestCase):
"""Test case for GetMounts()."""
wp.TerminateWorkers()
self._CheckWorkerCount(wp, 0)
+ def testActive(self):
+ ctx = CountingContext()
+ wp = workerpool.WorkerPool("TestActive", 5, CountingBaseWorker)
+ try:
+ self._CheckWorkerCount(wp, 5)
+ self.assertTrue(wp._active)
+
+ # Process some tasks
+ for _ in range(10):
+ wp.AddTask((ctx, None))
+
+ wp.Quiesce()
+ self._CheckNoTasks(wp)
+ self.assertEquals(ctx.GetDoneTasks(), 10)
+
+ # Repeat a few times
+ for count in range(10):
+ # Deactivate pool
+ wp.SetActive(False)
+ self._CheckNoTasks(wp)
+
+ # Queue some more tasks
+ for _ in range(10):
+ wp.AddTask((ctx, None))
+
+ for _ in range(5):
+ # Short delays to give other threads a chance to cause breakage
+ time.sleep(.01)
+ wp.AddTask((ctx, "Hello world %s" % 999))
+ self.assertFalse(wp._active)
+
+ self.assertEquals(ctx.GetDoneTasks(), 10 + (count * 15))
+
+ # Start processing again
+ wp.SetActive(True)
+ self.assertTrue(wp._active)
+
+ # Wait for tasks to finish
+ wp.Quiesce()
+ self._CheckNoTasks(wp)
+ self.assertEquals(ctx.GetDoneTasks(), 10 + (count * 15) + 15)
+
+ self._CheckWorkerCount(wp, 5)
+ finally:
+ wp.TerminateWorkers()
+ self._CheckWorkerCount(wp, 0)
+
def testChecksum(self):
# Tests whether all tasks are run and, since we're only using a single
# thread, whether everything is started in order.
--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Script for testing lock performance"""
+
+import os
+import sys
+import time
+import optparse
+import threading
+import resource
+
+from ganeti import locking
+
+
+def ParseOptions():
+ """Parses the command line options.
+
+ In case of command line errors, it will show the usage and exit the
+ program.
+
+ @return: the options in a tuple
+
+ """
+ parser = optparse.OptionParser()
+ parser.add_option("-t", dest="thread_count", default=1, type="int",
+ help="Number of threads", metavar="NUM")
+ parser.add_option("-d", dest="duration", default=5, type="float",
+ help="Duration", metavar="SECS")
+
+ (opts, args) = parser.parse_args()
+
+ if opts.thread_count < 1:
+ parser.error("Number of threads must be at least 1")
+
+ return (opts, args)
+
+
+class State:
+ def __init__(self, thread_count):
+ """Initializes this class.
+
+ """
+ self.verify = [0 for _ in range(thread_count)]
+ self.counts = [0 for _ in range(thread_count)]
+ self.total_count = 0
+
+
+def _Counter(lock, state, me):
+ """Thread function for acquiring locks.
+
+ """
+ counts = state.counts
+ verify = state.verify
+
+ while True:
+ lock.acquire()
+ try:
+ verify[me] = 1
+
+ counts[me] += 1
+
+ state.total_count += 1
+
+ if state.total_count % 1000 == 0:
+ sys.stdout.write(" %8d\r" % state.total_count)
+ sys.stdout.flush()
+
+ if sum(verify) != 1:
+ print "Inconsistent state!"
+ os._exit(1) # pylint: disable=W0212
+
+ verify[me] = 0
+ finally:
+ lock.release()
+
+
+def main():
+ (opts, _) = ParseOptions()
+
+ lock = locking.SharedLock("TestLock")
+
+ state = State(opts.thread_count)
+
+ lock.acquire(shared=0)
+ try:
+ for i in range(opts.thread_count):
+ t = threading.Thread(target=_Counter, args=(lock, state, i))
+ t.setDaemon(True)
+ t.start()
+
+ start = time.clock()
+ finally:
+ lock.release()
+
+ while True:
+ if (time.clock() - start) > opts.duration:
+ break
+ time.sleep(0.1)
+
+ # Make sure we get a consistent view
+ lock.acquire(shared=0)
+
+ lock_cputime = time.clock() - start
+
+ res = resource.getrusage(resource.RUSAGE_SELF)
+
+ print "Total number of acquisitions: %s" % state.total_count
+ print "Per-thread acquisitions:"
+ for (i, count) in enumerate(state.counts):
+ print (" Thread %s: %d (%0.1f%%)" %
+ (i, count, (100.0 * count / state.total_count)))
+
+ print "Benchmark CPU time: %0.3fs" % lock_cputime
+ print ("Average time per lock acquisition: %0.5fms" %
+ (1000.0 * lock_cputime / state.total_count))
+ print "Process:"
+ print " User time: %0.3fs" % res.ru_utime
+ print " System time: %0.3fs" % res.ru_stime
+ print " Total time: %0.3fs" % (res.ru_utime + res.ru_stime)
+
+ # Exit directly without attempting to clean up threads
+ os._exit(0) # pylint: disable=W0212
+
+
+if __name__ == "__main__":
+ main()
self.daemons_gid = gid
self.admin_gid = gid
+
+ def LookupUid(self, uid):
+ return "user%s" % uid
+
+ def LookupGid(self, gid):
+ return "group%s" % gid
--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Script for testing for an issue in PycURL"""
+
+import sys
+import warnings
+import unittest
+import textwrap
+import pycurl
+
+import testutils
+
+
+DETAILS = [
+ ("PycURL 7.19.0 added a new function named \"reset\" on \"pycurl.Curl\""
+ " objects to release all references to other resources. Unfortunately that"
+ " version contains a bug with reference counting on the \"None\" singleton,"
+ " leading to a crash of the Python interpreter after a certain amount of"
+ " performed requests. Your system uses a version of PycURL affected by this"
+ " issue. A patch is available at [1]. A detailed description can be found"
+ " at [2].\n"),
+ "\n",
+ ("[1] http://sf.net/tracker/?"
+ "func=detail&aid=2893665&group_id=28236&atid=392777\n"),
+ "[2] https://bugzilla.redhat.com/show_bug.cgi?id=624559",
+ ]
+
+
+class TestPyCurlReset(unittest.TestCase):
+ def test(self):
+ start_refcount = sys.getrefcount(None)
+ abort_refcount = int(start_refcount * 0.8)
+
+ assert start_refcount > 100
+
+ curl = pycurl.Curl()
+ try:
+ reset_fn = curl.reset
+ except AttributeError:
+ pass
+ else:
+ for i in range(start_refcount * 2):
+ reset_fn()
+ # The bug can be detected if calling "reset" several times continously
+ # reduces the number of references
+ if sys.getrefcount(None) < abort_refcount:
+ print >>sys.stderr, "#" * 78
+ for line in DETAILS:
+ print >>sys.stderr, textwrap.fill(line, width=78)
+ print >>sys.stderr, "#" * 78
+ break
+
+
+if __name__ == "__main__":
+ testutils.GanetiTestProgram()
actual_mode = stat.S_IMODE(st.st_mode)
self.assertEqual(actual_mode, expected_mode)
+ def assertFileUid(self, file_name, expected_uid):
+ """Checks that the user id of a file is what we expect.
+
+ @type file_name: str
+ @param file_name: the file whose contents we should check
+ @type expected_uid: int
+ @param expected_uid: the user id we expect
+
+ """
+ st = os.stat(file_name)
+ actual_uid = st.st_uid
+ self.assertEqual(actual_uid, expected_uid)
+
+ def assertFileGid(self, file_name, expected_gid):
+ """Checks that the group id of a file is what we expect.
+
+ @type file_name: str
+ @param file_name: the file whose contents we should check
+ @type expected_gid: int
+ @param expected_gid: the group id we expect
+
+ """
+ st = os.stat(file_name)
+ actual_gid = st.st_gid
+ self.assertEqual(actual_gid, expected_gid)
+
def assertEqualValues(self, first, second, msg=None):
"""Compares two values whether they're equal.
const=[], default=[{}]),
cli.cli_option("--no-confd", dest="do_confd_tests",
help="Skip confd queries",
- action="store_false", default=True),
+ action="store_false", default=constants.ENABLE_CONFD),
cli.cli_option("--rename", dest="rename", default=None,
help=("Give one unused instance name which is taken"
" to start the renaming sequence"),
self.opts = options
self.instances = args
self.bep = {
- constants.BE_MEMORY: options.mem_size,
+ constants.BE_MINMEM: options.mem_size,
+ constants.BE_MAXMEM: options.mem_size,
constants.BE_VCPUS: options.vcpu_count,
}
Err("When one node is available/selected the disk template must"
" be 'diskless', 'file' or 'plain'")
+ if opts.do_confd_tests and not constants.ENABLE_CONFD:
+ Err("You selected confd tests but confd was disabled at configure time")
+
has_err = True
try:
self.BurnCreateInstances()
options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
+ options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
+ options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
backup=True)
if not options.dry_run:
- bootstrap.GenerateClusterCrypto(False, False, False, False,
- nodecert_file=options.SERVER_PEM_PATH,
- rapicert_file=options.RAPI_CERT_FILE,
- hmackey_file=options.CONFD_HMAC_KEY,
- cds_file=options.CDS_FILE)
+ bootstrap.GenerateClusterCrypto(False, False, False, False, False,
+ nodecert_file=options.SERVER_PEM_PATH,
+ rapicert_file=options.RAPI_CERT_FILE,
+ spicecert_file=options.SPICE_CERT_FILE,
+ spicecacert_file=options.SPICE_CACERT_FILE,
+ hmackey_file=options.CONFD_HMAC_KEY,
+ cds_file=options.CDS_FILE)
except Exception:
logging.critical("Writing configuration failed. It is probably in an"
--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# pylint: disable=C0103
+
+"""confd client program
+
+This is can be used to test and debug confd daemon functionality.
+
+"""
+
+import sys
+import optparse
+import time
+
+from ganeti import constants
+from ganeti import cli
+from ganeti import utils
+
+from ganeti.confd import client as confd_client
+
+USAGE = ("\tconfd-client [--addr=host] [--hmac=key]")
+
+LOG_HEADERS = {
+ 0: "- ",
+ 1: "* ",
+ 2: ""
+ }
+
+OPTIONS = [
+ cli.cli_option("--hmac", dest="hmac", default=None,
+ help="Specify HMAC key instead of reading"
+ " it from the filesystem",
+ metavar="<KEY>"),
+ cli.cli_option("-a", "--address", dest="mc", default="localhost",
+ help="Server IP to query (default: 127.0.0.1)",
+ metavar="<ADDRESS>"),
+ cli.cli_option("-r", "--requests", dest="requests", default=100,
+ help="Number of requests for the timing tests",
+ type="int", metavar="<REQUESTS>"),
+ ]
+
+
+def Log(msg, *args, **kwargs):
+ """Simple function that prints out its argument.
+
+ """
+ if args:
+ msg = msg % args
+ indent = kwargs.get("indent", 0)
+ sys.stdout.write("%*s%s%s\n" % (2 * indent, "",
+ LOG_HEADERS.get(indent, " "), msg))
+ sys.stdout.flush()
+
+
+def LogAtMost(msgs, count, **kwargs):
+ """Log at most count of given messages.
+
+ """
+ for m in msgs[:count]:
+ Log(m, **kwargs)
+ if len(msgs) > count:
+ Log("...", **kwargs)
+
+
+def Err(msg, exit_code=1):
+ """Simple error logging that prints to stderr.
+
+ """
+ sys.stderr.write(msg + "\n")
+ sys.stderr.flush()
+ sys.exit(exit_code)
+
+
+def Usage():
+ """Shows program usage information and exits the program."""
+
+ print >> sys.stderr, "Usage:"
+ print >> sys.stderr, USAGE
+ sys.exit(2)
+
+
+class TestClient(object):
+ """Confd test client."""
+
+ def __init__(self):
+ """Constructor."""
+ self.opts = None
+ self.cluster_master = None
+ self.instance_ips = None
+ self.is_timing = False
+ self.ParseOptions()
+
+ def ParseOptions(self):
+ """Parses the command line options.
+
+ In case of command line errors, it will show the usage and exit the
+ program.
+
+ """
+ parser = optparse.OptionParser(usage="\n%s" % USAGE,
+ version=("%%prog (ganeti) %s" %
+ constants.RELEASE_VERSION),
+ option_list=OPTIONS)
+
+ options, args = parser.parse_args()
+ if args:
+ Usage()
+
+ if options.hmac is None:
+ options.hmac = utils.ReadFile(constants.CONFD_HMAC_KEY)
+ self.hmac_key = options.hmac
+
+ self.mc_list = [options.mc]
+
+ self.opts = options
+
+ def ConfdCallback(self, reply):
+ """Callback for confd queries"""
+ if reply.type == confd_client.UPCALL_REPLY:
+ answer = reply.server_reply.answer
+ reqtype = reply.orig_request.type
+ if reply.server_reply.status != constants.CONFD_REPL_STATUS_OK:
+ Log("Query %s gave non-ok status %s: %s" % (reply.orig_request,
+ reply.server_reply.status,
+ reply.server_reply))
+ if self.is_timing:
+ Err("Aborting timing tests")
+ if reqtype == constants.CONFD_REQ_CLUSTER_MASTER:
+ Err("Cannot continue after master query failure")
+ if reqtype == constants.CONFD_REQ_INSTANCES_IPS_LIST:
+ Err("Cannot continue after instance IP list query failure")
+ return
+ if self.is_timing:
+ return
+ if reqtype == constants.CONFD_REQ_PING:
+ Log("Ping: OK")
+ elif reqtype == constants.CONFD_REQ_CLUSTER_MASTER:
+ Log("Master: OK (%s)", answer)
+ if self.cluster_master is None:
+ # only assign the first time, in the plain query
+ self.cluster_master = answer
+ elif reqtype == constants.CONFD_REQ_NODE_ROLE_BYNAME:
+ if answer == constants.CONFD_NODE_ROLE_MASTER:
+ Log("Node role for master: OK",)
+ else:
+ Err("Node role for master: wrong: %s" % answer)
+ elif reqtype == constants.CONFD_REQ_NODE_PIP_LIST:
+ Log("Node primary ip query: OK")
+ LogAtMost(answer, 5, indent=1)
+ elif reqtype == constants.CONFD_REQ_MC_PIP_LIST:
+ Log("Master candidates primary IP query: OK")
+ LogAtMost(answer, 5, indent=1)
+ elif reqtype == constants.CONFD_REQ_INSTANCES_IPS_LIST:
+ Log("Instance primary IP query: OK")
+ if not answer:
+ Log("no IPs received", indent=1)
+ else:
+ LogAtMost(answer, 5, indent=1)
+ self.instance_ips = answer
+ elif reqtype == constants.CONFD_REQ_NODE_PIP_BY_INSTANCE_IP:
+ Log("Instance IP to node IP query: OK")
+ if not answer:
+ Log("no mapping received", indent=1)
+ else:
+ LogAtMost(answer, 5, indent=1)
+ else:
+ Log("Unhandled reply %s, please fix the client", reqtype)
+ print answer
+
+ def DoConfdRequestReply(self, req):
+ self.confd_counting_callback.RegisterQuery(req.rsalt)
+ self.confd_client.SendRequest(req, async=False)
+ while not self.confd_counting_callback.AllAnswered():
+ if not self.confd_client.ReceiveReply():
+ Err("Did not receive all expected confd replies")
+ break
+
+ def TestConfd(self):
+ """Run confd queries for the cluster.
+
+ """
+ Log("Checking confd results")
+
+ filter_callback = confd_client.ConfdFilterCallback(self.ConfdCallback)
+ counting_callback = confd_client.ConfdCountingCallback(filter_callback)
+ self.confd_counting_callback = counting_callback
+
+ self.confd_client = confd_client.ConfdClient(self.hmac_key,
+ self.mc_list,
+ counting_callback)
+
+ tests = [
+ {"type": constants.CONFD_REQ_PING},
+ {"type": constants.CONFD_REQ_CLUSTER_MASTER},
+ {"type": constants.CONFD_REQ_CLUSTER_MASTER,
+ "query": {constants.CONFD_REQQ_FIELDS:
+ [constants.CONFD_REQFIELD_NAME,
+ constants.CONFD_REQFIELD_IP,
+ constants.CONFD_REQFIELD_MNODE_PIP,
+ ]}},
+ {"type": constants.CONFD_REQ_NODE_ROLE_BYNAME},
+ {"type": constants.CONFD_REQ_NODE_PIP_LIST},
+ {"type": constants.CONFD_REQ_MC_PIP_LIST},
+ {"type": constants.CONFD_REQ_INSTANCES_IPS_LIST,
+ "query": None},
+ {"type": constants.CONFD_REQ_NODE_PIP_BY_INSTANCE_IP},
+ ]
+
+ for kwargs in tests:
+ if kwargs["type"] == constants.CONFD_REQ_NODE_ROLE_BYNAME:
+ assert self.cluster_master is not None
+ kwargs["query"] = self.cluster_master
+ elif kwargs["type"] == constants.CONFD_REQ_NODE_PIP_BY_INSTANCE_IP:
+ kwargs["query"] = {constants.CONFD_REQQ_IPLIST: self.instance_ips}
+
+ # pylint: disable=W0142
+ # used ** magic
+ req = confd_client.ConfdClientRequest(**kwargs)
+ self.DoConfdRequestReply(req)
+
+ def TestTiming(self):
+ """Run timing tests.
+
+ """
+ # timing tests
+ if self.opts.requests <= 0:
+ return
+ Log("Timing tests")
+ self.is_timing = True
+ self.TimingOp("ping", {"type": constants.CONFD_REQ_PING})
+ self.TimingOp("instance ips",
+ {"type": constants.CONFD_REQ_INSTANCES_IPS_LIST})
+
+ def TimingOp(self, name, kwargs):
+ """Run a single timing test.
+
+ """
+ start = time.time()
+ for _ in range(self.opts.requests):
+ # pylint: disable=W0142
+ req = confd_client.ConfdClientRequest(**kwargs)
+ self.DoConfdRequestReply(req)
+ stop = time.time()
+ per_req = 1000 * (stop - start) / self.opts.requests
+ Log("%.3fms per %s request", per_req, name, indent=1)
+
+ def Run(self):
+ """Run all the tests.
+
+ """
+ self.TestConfd()
+ self.TestTiming()
+
+
+def main():
+ """Main function.
+
+ """
+ return TestClient().Run()
+
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/bin/bash
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+set -e -u
+
+USAGE_MSG="Usage: $0 {start|stop}"
+PATH=$PATH:/sbin:/usr/sbin:/usr/local/sbin
+
+# Start the master IP
+start() {
+ case $CLUSTER_IP_VERSION in
+ 4)
+ ARP_COMMAND="arping -q -U -c 3 -I $MASTER_NETDEV -s $MASTER_IP $MASTER_IP"
+ ;;
+ 6)
+ ARP_COMMAND="ndisc6 -q r 3 $MASTER_IP $MASTER_NETDEV"
+ ;;
+ *)
+ echo "Invalid cluster IP version specified: $CLUSTER_IP_VERSION" >&2
+ exit 1
+ ;;
+ esac
+
+ # Check if the master IP address is already configured on this machine
+ if fping -S 127.0.0.1 $MASTER_IP >/dev/null 2>&1; then
+ echo "Master IP address already configured on this machine. Doing nothing."
+ exit 0
+ fi
+
+ # Check if the master IP address is already configured on another machine
+ if fping $MASTER_IP >/dev/null 2>&1; then
+ echo "Error: master IP address configured on another machine." >&2
+ exit 1
+ fi
+
+ if ! ip addr add $MASTER_IP/$MASTER_NETMASK \
+ dev $MASTER_NETDEV label $MASTER_NETDEV:0; then
+ echo "Error during the activation of the master IP address" >&2
+ exit 1
+ fi
+
+ # Send gratuituous ARP to update neighbours' ARP cache
+ $ARP_COMMAND || :
+}
+
+# Stop the master IP
+stop() {
+ if ! ip addr del $MASTER_IP/$MASTER_NETMASK dev $MASTER_NETDEV; then
+ echo "Error during the deactivation of the master IP address" >&2
+ exit 1
+ fi
+}
+
+if (( $# < 1 )); then
+ echo $USAGE_MSG >&2
+ exit 1
+fi
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ *)
+ echo $USAGE_MSG >&2
+ exit 1
+ ;;
+esac
+
+exit 0
--- /dev/null
+#!/usr/bin/python
+#
+
+# Copyright (C) 2011 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+
+"""Tool to translate between ovf and ganeti backup format.
+
+"""
+
+import logging
+import optparse
+import os
+
+from ganeti import cli
+from ganeti import constants
+from ganeti import errors
+from ganeti import ovf
+
+
+IMPORT_MODE = "import"
+EXPORT_MODE = "export"
+
+
+def CheckOptions(parser, options_dict, required, forbidden, excluding, mode):
+ """Performes check on the command line options.
+
+ Checks whether the required arguments are present and if none of the arguments
+ not supported for the current mode are given.
+
+ @type options_dict: list
+ @param options_dict: dictionary containing all the options from the command
+ line
+ @type required: list
+ @param required: list of pairs (option, argument) where 'option' is required
+ in mode 'mode'
+ @type forbidden: list
+ @param forbidden: list of pairs (option, argument) which are not allowed in
+ mode 'mode'
+ @type excluding: list
+ @param excluding: list of pairs (argument1, argument2); each pair contains
+ mutually exclusive arguments
+ @type mode: string
+ @param mode: current mode of the converter
+
+ """
+ for (option, argument) in required:
+ if not options_dict[option]:
+ parser.error("Argument %s is required for %s" % (argument, mode))
+ for (option, argument) in forbidden:
+ if options_dict[option]:
+ parser.error("Argument %s is not allowed in %s mode" % (argument, mode))
+ for (arg1, arg2) in excluding:
+ if options_dict[arg1] and options_dict[arg2]:
+ parser.error("Arguments %s and %s exclude each other" % (arg1, arg2))
+
+
+def ParseOptions():
+ """Parses the command line options and arguments.
+
+ In case of mismatching parameters, it will show the correct usage and exit.
+
+ @rtype: tuple
+ @return: (mode, sourcefile to read from, additional options)
+
+ """
+ usage = ("%%prog {%s|%s} <source-cfg-file> [options...]" %
+ (IMPORT_MODE, EXPORT_MODE))
+ parser = optparse.OptionParser(usage=usage)
+
+ #global options
+ parser.add_option(cli.DEBUG_OPT)
+ parser.add_option(cli.VERBOSE_OPT)
+ parser.add_option("-n", "--name", dest="name", action="store",
+ help="Name of the instance")
+ parser.add_option("--output-dir", dest="output_dir",
+ help="Path to the output directory")
+
+ #import options
+ import_group = optparse.OptionGroup(parser, "Import options")
+ import_group.add_option(cli.BACKEND_OPT)
+ import_group.add_option(cli.DISK_OPT)
+ import_group.add_option(cli.DISK_TEMPLATE_OPT)
+ import_group.add_option(cli.HYPERVISOR_OPT)
+ import_group.add_option(cli.NET_OPT)
+ import_group.add_option(cli.NONICS_OPT)
+ import_group.add_option(cli.OS_OPT)
+ import_group.add_option(cli.OSPARAMS_OPT)
+ import_group.add_option(cli.TAG_ADD_OPT)
+ parser.add_option_group(import_group)
+
+ #export options
+ export_group = optparse.OptionGroup(parser, "Export options")
+ export_group.add_option("--compress", dest="compression",
+ action="store_true", default=False,
+ help="The exported disk will be compressed to tar.gz")
+ export_group.add_option("--external", dest="ext_usage",
+ action="store_true", default=False,
+ help="The package will be used externally (ommits the"
+ " Ganeti-specific parts of configuration)")
+ export_group.add_option("-f", "--format", dest="disk_format",
+ action="store",
+ choices=("raw", "cow", "vmdk"),
+ help="Disk format for export (one of raw/cow/vmdk)")
+ export_group.add_option("--ova", dest="ova_package",
+ action="store_true", default=False,
+ help="Export everything into OVA package")
+ parser.add_option_group(export_group)
+
+ options, args = parser.parse_args()
+ if len(args) != 2:
+ parser.error("Wrong number of arguments")
+ mode = args.pop(0)
+ input_path = os.path.abspath(args.pop(0))
+
+ if mode == IMPORT_MODE:
+ required = []
+ forbidden = [
+ ("compression", "--compress"),
+ ("disk_format", "--format"),
+ ("ext_usage", "--external"),
+ ("ova_package", "--ova"),
+ ]
+ excluding = [("nics", "no_nics")]
+ elif mode == EXPORT_MODE:
+ required = [("disk_format", "--format")]
+ forbidden = [
+ ("beparams", "--backend-parameters"),
+ ("disk_template", "--disk-template"),
+ ("disks", "--disk"),
+ ("hypervisor", "--hypervisor-parameters"),
+ ("nics", "--net"),
+ ("no_nics", "--no-nics"),
+ ("os", "--os-type"),
+ ("osparams", "--os-parameters"),
+ ("tags", "--tags"),
+ ]
+ excluding = []
+ else:
+ parser.error("First argument should be either '%s' or '%s'" %
+ (IMPORT_MODE, EXPORT_MODE))
+
+ options_dict = vars(options)
+ CheckOptions(parser, options_dict, required, forbidden, excluding, mode)
+
+ return (mode, input_path, options)
+
+
+def SetupLogging(options):
+ """Setting up logging infrastructure.
+
+ @type options: optparse.Values
+ @param options: parsed command line options
+
+ """
+ formatter = logging.Formatter("%(asctime)s: %(levelname)s %(message)s")
+
+ stderr_handler = logging.StreamHandler()
+ stderr_handler.setFormatter(formatter)
+ if options.debug:
+ stderr_handler.setLevel(logging.NOTSET)
+ elif options.verbose:
+ stderr_handler.setLevel(logging.INFO)
+ else:
+ stderr_handler.setLevel(logging.WARNING)
+
+ root_logger = logging.getLogger("")
+ root_logger.setLevel(logging.NOTSET)
+ root_logger.addHandler(stderr_handler)
+
+
+def main():
+ """Main routine.
+
+ """
+ (mode, input_path, options) = ParseOptions()
+ SetupLogging(options)
+ logging.info("Chosen %s mode, reading the %s file", mode, input_path)
+ assert mode in (IMPORT_MODE, EXPORT_MODE)
+ converter = None
+ try:
+ if mode == IMPORT_MODE:
+ converter = ovf.OVFImporter(input_path, options)
+ elif mode == EXPORT_MODE:
+ converter = ovf.OVFExporter(input_path, options)
+ converter.Parse()
+ converter.Save()
+ except errors.OpPrereqError, err:
+ if converter:
+ converter.Cleanup()
+ logging.exception(err)
+ return constants.EXIT_FAILURE
+
+
+if __name__ == "__main__":
+ main()