echo ''; \
echo '"""'; \
echo ''; \
+ echo '# pylint: disable-msg=C0301,C0324'; \
+ echo '# because this is autogenerated, we do not want'; \
+ echo '# style warnings' ; \
+ echo ''; \
echo "PACKAGE_VERSION = '$(PACKAGE_VERSION)'"; \
echo "VERSION_MAJOR = '$(VERSION_MAJOR)'"; \
echo "VERSION_MINOR = '$(VERSION_MINOR)'"; \
check-local:
$(CHECK_PYTHON_CODE) $(check_python_code)
+.PHONY: lint
+lint: ganeti
+ pylint $(LINT_OPTS) ganeti $(dist_sbin_SCRIPTS) $(dist_tools_SCRIPTS)
+
# a dist hook rule for catching revision control directories
distcheck-hook:
if find $(top_distdir) -name .svn -or -name .git | grep .; then \
"""
+# pylint: disable-msg=C0103
+# C0103: Invalid name ganeti-confd
+
import os
import sys
import logging
-import pyinotify
import time
+try:
+ # pylint: disable-msg=E0611
+ from pyinotify import pyinotify
+except ImportError:
+ import pyinotify
+
from optparse import OptionParser
from ganeti import asyncnotifier
from ganeti import constants
from ganeti import errors
from ganeti import daemon
-from ganeti import ssconf
class ConfdAsyncUDPServer(daemon.AsyncUDPSocket):
self.port = port
self.processor = processor
self.bind((bind_address, port))
- logging.debug("listening on ('%s':%d)" % (bind_address, port))
+ logging.debug("listening on ('%s':%d)", bind_address, port)
# this method is overriding a daemon.AsyncUDPSocket method
def handle_datagram(self, payload_in, ip, port):
class ConfdInotifyEventHandler(pyinotify.ProcessEvent):
def __init__(self, watch_manager, callback,
- file=constants.CLUSTER_CONF_FILE):
+ filename=constants.CLUSTER_CONF_FILE):
"""Constructor for ConfdInotifyEventHandler
@type watch_manager: L{pyinotify.WatchManager}
@param watch_manager: ganeti-confd inotify watch manager
@type callback: function accepting a boolean
@param callback: function to call when an inotify event happens
- @type file: string
- @param file: config file to watch
+ @type filename: string
+ @param filename: config file to watch
"""
# no need to call the parent's constructor
self.watch_manager = watch_manager
self.callback = callback
+ # pylint: disable-msg=E1103
+ # pylint for some reason doesn't see the below constants
self.mask = pyinotify.EventsCodes.IN_IGNORED | \
pyinotify.EventsCodes.IN_MODIFY
- self.file = file
+ self.file = filename
self.watch_handle = None
def enable(self):
# IN_IGNORED event from inotify, because of the file removal (which is
# contextual with the replacement). In such a case we need to create
# another watcher for the "new" file.
- logging.debug("Received 'ignored' inotify event for %s" % event.path)
+ logging.debug("Received 'ignored' inotify event for %s", event.path)
self.watch_handle = None
try:
# going to realod the file after setting up the new watch.
self.callback(False)
except errors.ConfdFatalError, err:
- logging.critical("Critical error, shutting down: %s" % err)
+ logging.critical("Critical error, shutting down: %s", err)
sys.exit(constants.EXIT_FAILURE)
except:
# we need to catch any exception here, log it, but proceed, because even
# usually happen in Ganeti, as the config file is normally replaced by a
# new one, at filesystem level, rather than actually modified (see
# utils.WriteFile)
- logging.debug("Received 'modify' inotify event for %s" % event.path)
+ logging.debug("Received 'modify' inotify event for %s", event.path)
try:
self.callback(True)
except errors.ConfdFatalError, err:
- logging.critical("Critical error, shutting down: %s" % err)
+ logging.critical("Critical error, shutting down: %s", err)
sys.exit(constants.EXIT_FAILURE)
except:
# we need to catch any exception here, log it, but proceed, because even
logging.error("Unexpected exception", exc_info=True)
def process_default(self, event):
- logging.error("Received unhandled inotify event: %s" % event)
+ logging.error("Received unhandled inotify event: %s", event)
class ConfdConfigurationReloader(object):
self._ResetTimer()
-def CheckConfd(options, args):
+def CheckConfd(_, args):
"""Initial checks whether to run exit with a failure.
"""
+ if args: # confd doesn't take any arguments
+ print >> sys.stderr, ("Usage: %s [-f] [-d] [-b ADDRESS]" % sys.argv[0])
+ sys.exit(constants.EXIT_FAILURE)
+
# TODO: collapse HMAC daemons handling in daemons GenericMain, when we'll
# have more than one.
if not os.path.isfile(constants.HMAC_CLUSTER_KEY):
sys.exit(constants.EXIT_FAILURE)
-def ExecConfd(options, args):
+def ExecConfd(options, _):
"""Main confd function, executed with PID file held
"""
+ # TODO: clarify how the server and reloader variables work (they are
+ # not used)
+ # pylint: disable-msg=W0612
mainloop = daemon.Mainloop()
# Asyncronous confd UDP server
# If enabling the processor has failed, we can still go on, but confd will
# be disabled
logging.warning("Confd is starting in disabled mode")
- pass
+
server = ConfdAsyncUDPServer(options.bind_address, options.port, processor)
# Configuration reloader
"""
+# pylint: disable-msg=C0103
+# C0103: Invalid name ganeti-masterd
import os
import sys
class ClientRequestWorker(workerpool.BaseWorker):
+ # pylint: disable-msg=W0221
def RunTask(self, server, request, client_address):
"""Process the request.
try:
server.finish_request(request, client_address)
server.close_request(request)
- except:
+ except: # pylint: disable-msg=W0702
server.handle_error(request, client_address)
server.close_request(request)
self.request_workers.AddTask(self, request, client_address)
@utils.SignalHandled([signal.SIGINT, signal.SIGTERM])
- def serve_forever(self, signal_handlers=None):
+ def serve_forever(self, signal_handlers=None): # pylint: disable-msg=W0221
"""Handle one request at a time until told to quit."""
assert isinstance(signal_handlers, dict) and \
len(signal_handlers) > 0, \
READ_SIZE = 4096
def setup(self):
+ # pylint: disable-msg=W0201
+ # setup() is the api for initialising for this class
self._buffer = ""
self._msgs = collections.deque()
self._ops = ClientOps(self.server)
def __init__(self, server):
self.server = server
- def handle_request(self, method, args):
+ def handle_request(self, method, args): # pylint: disable-msg=R0911
queue = self.server.context.jobqueue
# TODO: Parameter validation
+ # TODO: Rewrite to not exit in each 'if/elif' branch
+
if method == luxi.REQ_SUBMIT_JOB:
logging.info("Received new job")
ops = [opcodes.OpCode.LoadOpCode(state) for state in args]
op = opcodes.OpQueryClusterInfo()
return self._Query(op)
+ elif method == luxi.REQ_QUERY_TAGS:
+ kind, name = args
+ logging.info("Received tags query request")
+ op = opcodes.OpGetTags(kind=kind, name=name)
+ return self._Query(op)
+
elif method == luxi.REQ_QUEUE_SET_DRAIN_FLAG:
drain_flag = args
logging.info("Received queue drain flag change request to %s",
This class creates and holds common objects shared by all threads.
"""
+ # pylint: disable-msg=W0212
+ # we do want to ensure a singleton here
_instance = None
def __init__(self):
# Call function
result = int(bool(fn()))
assert result in (0, 1)
- except:
+ except: # pylint: disable-msg=W0702
logging.exception("Error while calling function in separate process")
# 0 and 1 are reserved for the return value
result = 33
- os._exit(result)
+ os._exit(result) # pylint: disable-msg=W0212
# Parent process
"""Initial checks whether to run or exit with a failure.
"""
+ if args: # masterd doesn't take any arguments
+ print >> sys.stderr, ("Usage: %s [-f] [-d]" % sys.argv[0])
+ sys.exit(constants.EXIT_FAILURE)
+
ssconf.CheckMaster(options.debug)
# If CheckMaster didn't fail we believe we are the master, but we have to
confirmation = sys.stdin.readline().strip()
if confirmation != "YES":
- print >>sys.stderr, "Aborting."
+ print >> sys.stderr, "Aborting."
sys.exit(constants.EXIT_FAILURE)
return
sys.exit(constants.EXIT_FAILURE)
-def ExecMasterd (options, args):
+def ExecMasterd (options, args): # pylint: disable-msg=W0613
"""Main master daemon function, executed with the PID file held.
"""
"""Ganeti node daemon"""
-# functions in this module need to have a given name structure, so:
-# pylint: disable-msg=C0103
+# pylint: disable-msg=C0103,W0142
+
+# C0103: Functions in this module need to have a given name structure,
+# and the name of the daemon doesn't match
+
+# W0142: Used * or ** magic, since we do use it extensively in this
+# module
import os
import sys
-import SocketServer
import logging
import signal
from ganeti import utils
from ganeti import storage
-import ganeti.http.server
+import ganeti.http.server # pylint: disable-msg=W0611
queue_lock = None
This class holds all methods exposed over the RPC interface.
"""
+ # too many public methods, and unused args - all methods get params
+ # due to the API
+ # pylint: disable-msg=R0904,W0613
def __init__(self, *args, **kwargs):
http.server.HttpServer.__init__(self, *args, **kwargs)
self.noded_pid = os.getpid()
return backend.ValidateHVParams(hvname, hvparams)
-def ExecNoded(options, args):
+def CheckNoded(_, args):
+ """Initial checks whether to run or exit with a failure.
+
+ """
+ if args: # noded doesn't take any arguments
+ print >> sys.stderr, ("Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" %
+ sys.argv[0])
+ sys.exit(constants.EXIT_FAILURE)
+
+
+def ExecNoded(options, _):
"""Main node daemon function, executed with the PID file held.
"""
- global queue_lock
+ global queue_lock # pylint: disable-msg=W0603
# Read SSL certificate
if options.ssl:
dirs = [(val, constants.RUN_DIRS_MODE) for val in constants.SUB_RUN_DIRS]
dirs.append((constants.LOG_OS_DIR, 0750))
dirs.append((constants.LOCK_DIR, 1777))
- daemon.GenericMain(constants.NODED, parser, dirs, None, ExecNoded)
+ daemon.GenericMain(constants.NODED, parser, dirs, CheckNoded, ExecNoded)
if __name__ == '__main__':
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
-""" Ganeti Remote API master script.
+"""Ganeti Remote API master script.
+
"""
-import glob
+# pylint: disable-msg=C0103,W0142
+
+# C0103: Invalid name ganeti-watcher
+
import logging
import optparse
import sys
import os
import os.path
-import signal
from ganeti import constants
-from ganeti import errors
from ganeti import http
from ganeti import daemon
from ganeti import ssconf
-from ganeti import utils
from ganeti import luxi
from ganeti import serializer
from ganeti.rapi import connector
-import ganeti.http.auth
+import ganeti.http.auth # pylint: disable-msg=W0611
import ganeti.http.server
method = req.request_method.upper()
try:
ctx.handler_fn = getattr(ctx.handler, method)
- except AttributeError, err:
+ except AttributeError:
raise http.HttpBadRequest("Method %s is unsupported for path %s" %
(method, req.request_path))
"""Initial checks whether to run or exit with a failure.
"""
- if len(args) != 0:
- print >> sys.stderr, "Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" % \
- sys.argv[0]
+ if args: # rapi doesn't take any arguments
+ print >> sys.stderr, ("Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" %
+ sys.argv[0])
sys.exit(constants.EXIT_FAILURE)
ssconf.CheckMaster(options.debug)
-def ExecRapi(options, args):
+def ExecRapi(options, _):
"""Main remote API function, executed with the PID file held.
"""
"""
+# pylint: disable-msg=C0103,W0142
+
+# C0103: Invalid name ganeti-watcher
+
import os
import sys
import time
import logging
-import errno
from optparse import OptionParser
from ganeti import utils
self._data = {}
else:
self._data = serializer.Load(state_data)
- except Exception, msg:
+ except Exception, msg: # pylint: disable-msg=W0703
# Ignore errors while loading the file and treat it as empty
self._data = {}
logging.warning(("Invalid state file. Using defaults."
"""
arch_count, left_count = client.AutoArchiveJobs(age)
- logging.debug("Archived %s jobs, left %s" % (arch_count, left_count))
+ logging.debug("Archived %s jobs, left %s", arch_count, left_count)
def CheckDisks(self, notepad):
"""Check all nodes for restarted ones.
try:
logging.info("Activating disks for instance %s", instance.name)
instance.ActivateDisks()
- except Exception:
+ except Exception: # pylint: disable-msg=W0703
logging.exception("Error while activating disks for instance %s",
instance.name)
instance.name, last)
instance.Restart()
self.started_instances.add(instance.name)
- except Exception:
+ except Exception: # pylint: disable-msg=W0703
logging.exception("Error while restarting instance %s",
instance.name)
"""Main function.
"""
- global client
+ global client # pylint: disable-msg=W0603
options, args = ParseOptions()
+ if args: # watcher doesn't take any arguments
+ print >> sys.stderr, ("Usage: %s [-f] " % sys.argv[0])
+ sys.exit(constants.EXIT_FAILURE)
+
utils.SetupLogging(constants.LOG_WATCHER, debug=options.debug,
stderr_logging=options.debug)
--- /dev/null
+#!/bin/bash
+
+# Copyright (C) 2009 Google Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# This is a test script to ease development and testing on test clusters.
+# It should not be used to update production environments.
+
+# Usage: release v2.0.5
+# Alternative: URL=file:///my/git/repo release e5823b7e2cd8a3...
+# It will clone the given repository from the default or passed URL,
+# checkout the given reference (a tag or branch) and then create a
+# release archive; you will need to copy the archive and delete the
+# temporary directory at the end
+
+set -e
+
+: ${URL:=git://git.ganeti.org/ganeti.git}
+TAG="$1"
+
+TMPDIR=`mktemp -d`
+cd $TMPDIR
+echo "Cloning the repository under $TMPDIR ..."
+git clone -q "$URL" dist
+cd dist
+git checkout $TAG
+./autogen.sh
+./configure
+VERSION=$(sed -n -e '/^PACKAGE_VERSION =/ s/^PACKAGE_VERSION = // p' Makefile)
+make distcheck
+fakeroot make dist
+tar tzvf ganeti-$VERSION.tar.gz
+sha1sum ganeti-$VERSION.tar.gz
+echo "The archive is at $PWD/ganeti-$VERSION.tar.gz"
+echo "Please copy it and remove the temporary directory when done."
# and now put it under $prefix on the target node(s)
for host; do
echo Uploading code to ${host}...
- rsync -v -rlDc --exclude="*.py[oc]" --exclude="*.pdf" --exclude="*.html" \
+ rsync -v -rlDc \
+ -e "ssh -oBatchMode=yes" \
+ --exclude="*.py[oc]" --exclude="*.pdf" --exclude="*.html" \
"$TXD/" \
root@${host}:/ &
done
if test -z "${NO_RESTART}"; then
for host; do
echo Restarting ganeti-noded on ${host}...
- ssh root@${host} /etc/init.d/ganeti restart &
+ ssh -oBatchMode=yes root@${host} /etc/init.d/ganeti restart &
done
wait
fi
# empty file for package definition
+
+"""Ganeti python modules"""
"""Asynchronous pyinotify implementation"""
-import pyinotify
import asyncore
+try:
+ # pylint: disable-msg=E0611
+ from pyinotify import pyinotify
+except ImportError:
+ import pyinotify
+
class AsyncNotifier(asyncore.file_dispatcher):
"""An asyncore dispatcher for inotify events.
"""
-
+ # pylint: disable-msg=W0622,W0212
def __init__(self, watch_manager, default_proc_fun=None, map=None):
"""Initializes this class.
utils.RemoveFile(constants.HMAC_CLUSTER_KEY)
utils.RemoveFile(constants.RAPI_CERT_FILE)
utils.RemoveFile(constants.SSL_CERT_FILE)
- except:
+ except: # pylint: disable-msg=W0702
logging.exception("Error while removing cluster secrets")
result = utils.RunCmd([constants.DAEMON_UTIL, "stop", constants.CONFD])
tmpr.append("The procfs filesystem doesn't seem to be mounted"
" under /proc, missing required directory /proc/sys and"
" the file /proc/sysrq-trigger")
+
+ if constants.NV_TIME in what:
+ result[constants.NV_TIME] = utils.SplitTime(time.time())
+
return result
it's not required to return anything.
"""
+ # TODO: remove the obsolete 'size' argument
+ # pylint: disable-msg=W0613
clist = []
if disk.children:
for child in disk.children:
# we need the children open in case the device itself has to
# be assembled
try:
+ # pylint: disable-msg=E1103
crdev.Open()
except errors.BlockDeviceError, err:
_Fail("Can't make child '%s' read-write: %s", child, err)
try:
result = _RecursiveAssembleBD(disk, owner, as_primary)
if isinstance(result, bdev.BlockDev):
+ # pylint: disable-msg=E1103
result = result.dev_path
except errors.BlockDeviceError, err:
_Fail("Error while assembling disk: %s", err, exc=True)
for cf in disks:
try:
rbd = _RecursiveFindBD(cf)
- except errors.BlockDeviceError, err:
+ except errors.BlockDeviceError:
result.append(None)
continue
if rbd is None:
return detail
-def _OSOndiskAPIVersion(name, os_dir):
+def _OSOndiskAPIVersion(os_dir):
"""Compute and return the API version of a given OS.
- This function will try to read the API version of the OS given by
- the 'name' parameter and residing in the 'os_dir' directory.
+ This function will try to read the API version of the OS residing in
+ the 'os_dir' directory.
- @type name: str
- @param name: the OS name we should look for
@type os_dir: str
- @param os_dir: the directory inwhich we should look for the OS
+ @param os_dir: the directory in which we should look for the OS
@rtype: tuple
@return: tuple (status, data) with status denoting the validity and
data holding either the vaid versions or an error message
if os_dir is None:
return False, "Directory for OS %s not found in search path" % name
- status, api_versions = _OSOndiskAPIVersion(name, os_dir)
+ status, api_versions = _OSOndiskAPIVersion(os_dir)
if not status:
# push the error up
return status, api_versions
"""
if hooks_base_dir is None:
hooks_base_dir = constants.HOOKS_BASE_DIR
- self._BASE_DIR = hooks_base_dir
+ # yeah, _BASE_DIR is not valid for attributes, we use it like a
+ # constant
+ self._BASE_DIR = hooks_base_dir # pylint: disable-msg=C0103
@staticmethod
def ExecHook(script, env):
the master side.
"""
- def Run(self, name, idata):
+ @staticmethod
+ def Run(name, idata):
"""Run an iallocator script.
@type name: str
from ganeti import objects
+# Size of reads in _CanReadDevice
+_DEVICE_READ_SIZE = 128 * 1024
+
+
def _IgnoreError(fn, *args, **kwargs):
"""Executes the given function, ignoring BlockDeviceErrors.
raise errors.BlockDeviceError(msg)
+def _CanReadDevice(path):
+ """Check if we can read from the given device.
+
+ This tries to read the first 128k of the device.
+
+ """
+ try:
+ utils.ReadFile(path, size=_DEVICE_READ_SIZE)
+ return True
+ except EnvironmentError:
+ logging.warning("Can't read from device %s", path, exc_info=True)
+ return False
+
+
class BlockDev(object):
"""Block device abstract class.
_ThrowError("Can't compute PV info for vg %s", self._vg_name)
pvs_info.sort()
pvs_info.reverse()
- free_size, pv_name, _ = pvs_info[0]
+ free_size, _, _ = pvs_info[0]
if free_size < size:
_ThrowError("Not enough free space: required %s,"
" available %s", size, free_size)
self.est_time = None
-class BaseDRBD(BlockDev):
+class BaseDRBD(BlockDev): # pylint: disable-msg=W0223
"""Base DRBD class.
This class contains a few bits of common functionality between the
def __init__(self, unique_id, children, size):
if children and children.count(None) > 0:
children = []
+ if len(children) not in (0, 2):
+ raise ValueError("Invalid configuration data %s" % str(children))
+ if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
+ raise ValueError("Invalid configuration data %s" % str(unique_id))
+ (self._lhost, self._lport,
+ self._rhost, self._rport,
+ self._aminor, self._secret) = unique_id
+ if children:
+ if not _CanReadDevice(children[1].dev_path):
+ logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
+ children = []
super(DRBD8, self).__init__(unique_id, children, size)
self.major = self._DRBD_MAJOR
version = self._GetVersion()
" usage: kernel is %s.%s, ganeti wants 8.x",
version['k_major'], version['k_minor'])
- if len(children) not in (0, 2):
- raise ValueError("Invalid configuration data %s" % str(children))
- if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
- raise ValueError("Invalid configuration data %s" % str(unique_id))
- (self._lhost, self._lport,
- self._rhost, self._rport,
- self._aminor, self._secret) = unique_id
if (self._lhost is not None and self._lhost == self._rhost and
self._lport == self._rport):
raise ValueError("Invalid configuration data, same local/remote %s" %
the attach if can return success.
"""
+ # TODO: Rewrite to not use a for loop just because there is 'break'
+ # pylint: disable-msg=W0631
net_data = (self._lhost, self._lport, self._rhost, self._rport)
for minor in (self._aminor,):
info = self._GetDevInfo(self._GetShowData(minor))
if err.errno != errno.ENOENT:
_ThrowError("Can't remove file '%s': %s", self.dev_path, err)
+ def Rename(self, new_id):
+ """Renames the file.
+
+ """
+ # TODO: implement rename for file-based storage
+ _ThrowError("Rename is not supported for file-based storage")
+
+ def Grow(self, amount):
+ """Grow the file
+
+ @param amount: the amount (in mebibytes) to grow with
+
+ """
+ # TODO: implement grow for file-based storage
+ _ThrowError("Grow not supported for file-based storage")
+
def Attach(self):
"""Attach to an existing file.
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
+"""Module used during the Ganeti build process"""
import imp
import os
"NODE_PLACEMENT_OPT",
"NOHDR_OPT",
"NOIPCHECK_OPT",
+ "NONAMECHECK_OPT",
"NOLVM_STORAGE_OPT",
"NOMODIFY_ETCHOSTS_OPT",
"NOMODIFY_SSH_SETUP_OPT",
class _Argument:
- def __init__(self, min=0, max=None):
+ def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
self.min = min
self.max = max
Value can be any of the ones passed to the constructor.
"""
+ # pylint: disable-msg=W0622
def __init__(self, min=0, max=None, choices=None):
_Argument.__init__(self, min=min, max=max)
self.choices = choices
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
-
def _ExtractTagsObject(opts, args):
"""Extract the tag type object.
"""
kind, name = _ExtractTagsObject(opts, args)
- op = opcodes.OpGetTags(kind=kind, name=name)
- result = SubmitOpCode(op)
+ cl = GetClient()
+ result = cl.QueryTags(kind, name)
result = list(result)
result.sort()
for tag in result:
SubmitOpCode(op)
-def check_unit(option, opt, value):
+def check_unit(option, opt, value): # pylint: disable-msg=W0613
"""OptParsers custom converter for units.
"""
return kv_dict
-def check_ident_key_val(option, opt, value):
+def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
"""Custom parser for ident:key=val,key=val options.
This will store the parsed values as a tuple (ident, {key: val}). As such,
return retval
-def check_key_val(option, opt, value):
+def check_key_val(option, opt, value): # pylint: disable-msg=W0613
"""Custom parser class for key=val,key=val options.
This will store the parsed values as a dict {key: val}.
help="Don't check that the instance's IP"
" is alive")
+NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
+ default=True, action="store_false",
+ help="Don't check that the instance's name"
+ " is resolvable")
+
NET_OPT = cli_option("--net",
help="NIC parameters", default=[],
dest="nics", action="append", type="identkeyval")
nics=nics,
pnode=pnode, snode=snode,
ip_check=opts.ip_check,
+ name_check=opts.name_check,
wait_for_sync=opts.wait_for_sync,
file_storage_dir=opts.file_storage_dir,
file_driver=opts.file_driver,
if unitfields is None:
unitfields = []
- numfields = utils.FieldSet(*numfields)
- unitfields = utils.FieldSet(*unitfields)
+ numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142
+ unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
format_fields = []
for field in fields:
"""Module implementing the master-side code."""
-# pylint: disable-msg=W0613,W0201
+# pylint: disable-msg=W0201
+
+# W0201 since most LU attributes are defined in CheckPrereq or similar
+# functions
import os
import os.path
self.recalculate_locks = {}
self.__ssh = None
# logging
- self.LogWarning = processor.LogWarning
- self.LogInfo = processor.LogInfo
- self.LogStep = processor.LogStep
+ self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
+ self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
+ self.LogStep = processor.LogStep # pylint: disable-msg=C0103
# support for dry-run
self.dry_run_result = None
and hook results
"""
+ # API must be kept, thus we ignore the unused argument and could
+ # be a function warnings
+ # pylint: disable-msg=W0613,R0201
return lu_result
def _ExpandAndLockInstance(self):
del self.recalculate_locks[locking.LEVEL_NODE]
-class NoHooksLU(LogicalUnit):
+class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
"""Simple LU which runs no hooks.
This LU is intended as a parent for other LogicalUnits which will
HPATH = None
HTYPE = None
+ def BuildHooksEnv(self):
+ """Empty BuildHooksEnv for NoHooksLu.
+
+ This just raises an error.
+
+ """
+ assert False, "BuildHooksEnv called for NoHooksLUs"
+
class Tasklet:
"""Tasklet base class.
}
if override:
args.update(override)
- return _BuildInstanceHookEnv(**args)
+ return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
def _AdjustCandidatePool(lu, exceptions):
try:
hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
except:
+ # pylint: disable-msg=W0702
self.LogWarning("Errors occurred running hooks on %s" % master)
result = self.rpc.call_node_stop_master(master, False)
ENODESSH = (TNODE, "ENODESSH")
ENODEVERSION = (TNODE, "ENODEVERSION")
ENODESETUP = (TNODE, "ENODESETUP")
+ ENODETIME = (TNODE, "ENODETIME")
ETYPE_FIELD = "code"
ETYPE_ERROR = "ERROR"
"""
node = nodeinfo.name
- _ErrorIf = self._ErrorIf
+ _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
# main result, node_result should be a non-empty dict
test = not node_result or not isinstance(node_result, dict)
# check that ':' is not present in PV names, since it's a
# special character for lvcreate (denotes the range of PEs to
# use on the PV)
- for size, pvname, owner_vg in pvlist:
+ for _, pvname, owner_vg in pvlist:
test = ":" in pvname
_ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
" '%s' of VG '%s'", pvname, owner_vg)
available on the instance's node.
"""
- _ErrorIf = self._ErrorIf
+ _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
node_current = instanceconfig.primary_node
node_vol_should = {}
"""
self.bad = False
- _ErrorIf = self._ErrorIf
+ _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
verbose = self.op.verbose
self._feedback_fn = feedback_fn
feedback_fn("* Verifying global settings")
constants.NV_VERSION: None,
constants.NV_HVINFO: self.cfg.GetHypervisorType(),
constants.NV_NODESETUP: None,
+ constants.NV_TIME: None,
}
+
if vg_name is not None:
node_verify_param[constants.NV_VGLIST] = None
node_verify_param[constants.NV_LVLIST] = vg_name
node_verify_param[constants.NV_PVLIST] = [vg_name]
node_verify_param[constants.NV_DRBDLIST] = None
+
+ # Due to the way our RPC system works, exact response times cannot be
+ # guaranteed (e.g. a broken node could run into a timeout). By keeping the
+ # time before and after executing the request, we can at least have a time
+ # window.
+ nvinfo_starttime = time.time()
all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
self.cfg.GetClusterName())
+ nvinfo_endtime = time.time()
cluster = self.cfg.GetClusterInfo()
master_node = self.cfg.GetMasterNode()
else:
instance = instanceinfo[instance]
node_drbd[minor] = (instance.name, instance.admin_up)
+
self._VerifyNode(node_i, file_names, local_checksums,
nresult, master_files, node_drbd, vg_name)
if test:
continue
+ # Node time
+ ntime = nresult.get(constants.NV_TIME, None)
+ try:
+ ntime_merged = utils.MergeTime(ntime)
+ except (ValueError, TypeError):
+ _ErrorIf(test, self.ENODETIME, node, "Node returned invalid time")
+
+ if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
+ ntime_diff = abs(nvinfo_starttime - ntime_merged)
+ elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
+ ntime_diff = abs(ntime_merged - nvinfo_endtime)
+ else:
+ ntime_diff = None
+
+ _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
+ "Node time diverges by at least %0.1fs from master node time",
+ ntime_diff)
+
+ if ntime_diff is not None:
+ continue
+
try:
node_info[node] = {
"mfree": int(nodeinfo['memory_free']),
assert hooks_results, "invalid result from hooks"
for node_name in hooks_results:
- show_node_header = True
res = hooks_results[node_name]
msg = res.fail_msg
test = msg and not res.offline
continue
lvs = node_res.payload
- for lv_name, (_, lv_inactive, lv_online) in lvs.items():
+ for lv_name, (_, _, lv_online) in lvs.items():
inst = nv_dict.pop((node, lv_name), None)
if (not lv_online and inst is not None
and inst.name not in res_instances):
"NEW_NAME": self.op.name,
}
mn = self.cfg.GetMasterNode()
- return env, [mn], [mn]
+ all_nodes = self.cfg.GetNodeList()
+ return env, [mn], all_nodes
def CheckPrereq(self):
"""Verify that the passed name is a valid one.
"""
@staticmethod
- def _DiagnoseByOS(node_list, rlist):
+ def _DiagnoseByOS(rlist):
"""Remaps a per-node return list into an a per-os per-node dictionary
- @param node_list: a list with the names of all nodes
@param rlist: a map with node names as keys and OS objects as values
@rtype: dict
"""
valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
node_data = self.rpc.call_os_diagnose(valid_nodes)
- pol = self._DiagnoseByOS(valid_nodes, node_data)
+ pol = self._DiagnoseByOS(node_data)
output = []
calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
calc_variants = "variants" in self.op.output_fields
# Run post hooks on the node before it's removed
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
try:
- h_results = hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
+ hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
except:
+ # pylint: disable-msg=W0702
self.LogWarning("Errors occurred running hooks on %s" % node.name)
result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
"""Logical unit for querying nodes.
"""
+ # pylint: disable-msg=W0142
_OP_REQP = ["output_fields", "names", "use_locking"]
REQ_BGL = False
if inst_fields & frozenset(self.op.output_fields):
inst_data = self.cfg.GetAllInstancesInfo()
- for instance_name, inst in inst_data.items():
+ for inst in inst_data.values():
if inst.primary_node in node_to_primary:
node_to_primary[inst.primary_node].add(inst.name)
for secnode in inst.secondary_nodes:
# later in the procedure; this also means that if the re-add
# fails, we are left with a non-offlined, broken node
if self.op.readd:
- new_node.drained = new_node.offline = False
+ new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
self.LogInfo("Readding a node, the offline/drained flags were reset")
# if we demote the node, we do cleanup later in the procedure
new_node.master_candidate = self.master_candidate
"""
to_skip = []
- for idx, disk in enumerate(self.instance.disks):
+ for idx, _ in enumerate(self.instance.disks):
if idx not in self.op.disks: # disk idx has not been passed in
to_skip.append(idx)
continue
"""Logical unit for querying instances.
"""
+ # pylint: disable-msg=W0142
_OP_REQP = ["output_fields", "names", "use_locking"]
REQ_BGL = False
_SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
"""Computes the list of nodes and their attributes.
"""
+ # pylint: disable-msg=R0912
+ # way too many branches here
all_info = self.cfg.GetAllInstancesInfo()
if self.wanted == locking.ALL_SET:
# caller didn't specify instance names, so ordering is not important
for idx, dsk in enumerate(instance.disks):
if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
raise errors.OpPrereqError("Instance disk %d has a complex layout,"
- " cannot copy", errors.ECODE_STATE)
+ " cannot copy" % idx, errors.ECODE_STATE)
_CheckNodeOnline(self, target_node)
_CheckNodeNotDrained(self, target_node)
"hvparams", "beparams"]
REQ_BGL = False
+ def CheckArguments(self):
+ """Check arguments.
+
+ """
+ # do not require name_check to ease forward/backward compatibility
+ # for tools
+ if not hasattr(self.op, "name_check"):
+ self.op.name_check = True
+ if self.op.ip_check and not self.op.name_check:
+ # TODO: make the ip check more flexible and not depend on the name check
+ raise errors.OpPrereqError("Cannot do ip checks without a name check",
+ errors.ECODE_INVAL)
+
def _ExpandNode(self, node):
"""Expands and checks one node name.
#### instance parameters check
# instance name verification
- hostname1 = utils.GetHostInfo(self.op.instance_name)
- self.op.instance_name = instance_name = hostname1.name
+ if self.op.name_check:
+ hostname1 = utils.GetHostInfo(self.op.instance_name)
+ self.op.instance_name = instance_name = hostname1.name
+ # used in CheckPrereq for ip ping check
+ self.check_ip = hostname1.ip
+ else:
+ instance_name = self.op.instance_name
+ self.check_ip = None
# this is just a preventive check, but someone might still add this
# instance in the meantime, and creation will fail at lock-add time
if ip is None or ip.lower() == constants.VALUE_NONE:
nic_ip = None
elif ip.lower() == constants.VALUE_AUTO:
+ if not self.op.name_check:
+ raise errors.OpPrereqError("IP address set to auto but name checks"
+ " have been skipped. Aborting.",
+ errors.ECODE_INVAL)
nic_ip = hostname1.ip
else:
if not utils.IsValidIP(ip):
# MAC address verification
mac = nic.get("mac", constants.VALUE_AUTO)
if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- if not utils.IsValidMac(mac.lower()):
- raise errors.OpPrereqError("Invalid MAC address specified: %s" %
- mac, errors.ECODE_INVAL)
- else:
- try:
- self.cfg.ReserveMAC(mac, self.proc.GetECId())
- except errors.ReservationError:
- raise errors.OpPrereqError("MAC address %s already in use"
- " in cluster" % mac,
- errors.ECODE_NOTUNIQUE)
+ mac = utils.NormalizeAndValidateMac(mac)
+
+ try:
+ self.cfg.ReserveMAC(mac, self.proc.GetECId())
+ except errors.ReservationError:
+ raise errors.OpPrereqError("MAC address %s already in use"
+ " in cluster" % mac,
+ errors.ECODE_NOTUNIQUE)
# bridge verification
bridge = nic.get("bridge", None)
errors.ECODE_INVAL)
self.disks.append({"size": size, "mode": mode})
- # used in CheckPrereq for ip ping check
- self.check_ip = hostname1.ip
-
# file storage checks
if (self.op.file_driver and
not self.op.file_driver in constants.FILE_DRIVER):
nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
# ENDIF: self.op.mode == constants.INSTANCE_IMPORT
- # ip ping checks (we use the same ip that was resolved in ExpandNames)
- if self.op.start and not self.op.ip_check:
- raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
- " adding an instance in start mode",
- errors.ECODE_INVAL)
+ # ip ping checks (we use the same ip that was resolved in ExpandNames)
if self.op.ip_check:
if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("IP %s of instance %s already in use" %
if len(ial.nodes) != ial.required_nodes:
raise errors.OpPrereqError("iallocator '%s' returned invalid number"
" of nodes (%s), required %s" %
- (len(ial.nodes), ial.required_nodes),
+ (iallocator_name,
+ len(ial.nodes), ial.required_nodes),
errors.ECODE_FAULT)
remote_node_name = ial.nodes[0]
return iv_names
def _CheckDevices(self, node_name, iv_names):
- for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
+ for name, (dev, _, _) in iv_names.iteritems():
self.cfg.SetDiskID(dev, node_name)
result = self.rpc.call_blockdev_find(node_name, dev)
raise errors.OpExecError("DRBD device %s is degraded!" % name)
def _RemoveOldStorage(self, node_name, iv_names):
- for name, (dev, old_lvs, _) in iv_names.iteritems():
+ for name, (_, old_lvs, _) in iv_names.iteritems():
self.lu.LogInfo("Remove logical volumes for %s" % name)
for lv in old_lvs:
if self.instance.primary_node == o_node1:
p_minor = o_minor1
else:
+ assert self.instance.primary_node == o_node2, "Three-node instance?"
p_minor = o_minor2
new_alone_id = (self.instance.primary_node, self.new_node, None,
if 'mac' in nic_dict:
nic_mac = nic_dict['mac']
if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
- if not utils.IsValidMac(nic_mac):
- raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac,
- errors.ECODE_INVAL)
+ nic_mac = utils.NormalizeAndValidateMac(nic_mac)
+
if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
raise errors.OpPrereqError("'auto' is not a valid MAC address when"
" modifying an existing nic",
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return env, nl, nl
- def _GetUpdatedParams(self, old_params, update_dict,
+ @staticmethod
+ def _GetUpdatedParams(old_params, update_dict,
default_values, parameter_types):
"""Return the new params dict for the given params.
raise errors.OpPrereqError("Disk operations not supported for"
" diskless instances",
errors.ECODE_INVAL)
- for disk_op, disk_dict in self.op.disks:
+ for disk_op, _ in self.op.disks:
if disk_op == constants.DDM_REMOVE:
if len(instance.disks) == 1:
raise errors.OpPrereqError("Cannot remove the last disk of"
result = []
instance = self.instance
- cluster = self.cluster
# disk changes
for disk_op, disk_dict in self.op.disks:
if disk_op == constants.DDM_REMOVE:
" Domain Name.")
-class TagsLU(NoHooksLU):
+class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
"""Generic tags LU.
This is an abstract class which is the parent of all the other tags LUs.
easy usage
"""
+ # pylint: disable-msg=R0902
+ # lots of instance attributes
_ALLO_KEYS = [
"mem_size", "disks", "disk_template",
"os", "tags", "nics", "vcpus", "hypervisor",
# E0203: Access to member %r before its definition, since we use
# objects.py which doesn't explicitely initialise its members
-import socket
import time
import random
@param peers: list of peer nodes
"""
+ # we are actually called from init, so:
+ # pylint: disable-msg=W0201
if not isinstance(peers, list):
raise errors.ProgrammerError("peers must be a list")
self._peers = peers
"""
self.reader = reader
- def Exec(self, query):
+ def Exec(self, query): # pylint: disable-msg=R0201,W0613
"""Process a single UDP request from a client.
Different queries should override this function, which by defaults returns
It accepts no arguments, and returns the current cluster master.
"""
+ def _GetMasterNode(self):
+ return self.reader.GetMasterNode()
+
def Exec(self, query):
"""ClusterMasterQuery main execution
"""
- if query is None:
+ if isinstance(query, dict):
+ if constants.CONFD_REQQ_FIELDS in query:
+ status = constants.CONFD_REPL_STATUS_OK
+ req_fields = query[constants.CONFD_REQQ_FIELDS]
+ if not isinstance(req_fields, (list, tuple)):
+ logging.debug("FIELDS request should be a list")
+ return QUERY_ARGUMENT_ERROR
+
+ answer = []
+ for field in req_fields:
+ if field == constants.CONFD_REQFIELD_NAME:
+ answer.append(self._GetMasterNode())
+ elif field == constants.CONFD_REQFIELD_IP:
+ answer.append(self.reader.GetMasterIP())
+ elif field == constants.CONFD_REQFIELD_MNODE_PIP:
+ answer.append(self.reader.GetNodePrimaryIp(self._GetMasterNode()))
+ else:
+ logging.debug("missing FIELDS in query dict")
+ return QUERY_ARGUMENT_ERROR
+ elif not query:
status = constants.CONFD_REPL_STATUS_OK
answer = self.reader.GetMasterNode()
else:
- status = constants.CONFD_REPL_STATUS_ERROR
- answer = 'master query accepts no query argument'
+ logging.debug("Invalid master query argument: not dict or empty")
+ return QUERY_ARGUMENT_ERROR
return status, answer
instances_list = query[constants.CONFD_REQQ_IPLIST]
mode = constants.CONFD_REQQ_IPLIST
else:
- status = constants.CONFD_REPL_STATUS_ERROR
logging.debug("missing IP or IPLIST in query dict")
return QUERY_ARGUMENT_ERROR
network_link = None
mode = constants.CONFD_REQQ_IP
else:
- logging.debug("Invalid query argument type for: %s" % query)
+ logging.debug("Invalid query argument type for: %s", query)
return QUERY_ARGUMENT_ERROR
pnodes_list = []
for instance_ip in instances_list:
if not isinstance(instance_ip, basestring):
- logging.debug("Invalid IP type for: %s" % instance_ip)
+ logging.debug("Invalid IP type for: %s", instance_ip)
return QUERY_ARGUMENT_ERROR
instance = self.reader.GetInstanceByLinkIp(instance_ip, network_link)
if not instance:
- logging.debug("Unknown instance IP: %s" % instance_ip)
+ logging.debug("Unknown instance IP: %s", instance_ip)
pnodes_list.append(QUERY_UNKNOWN_ENTRY_ERROR)
continue
pnode = self.reader.GetInstancePrimaryNode(instance)
if not pnode:
logging.error("Instance '%s' doesn't have an associated primary"
- " node" % instance)
+ " node", instance)
pnodes_list.append(QUERY_INTERNAL_ERROR)
continue
pnode_primary_ip = self.reader.GetNodePrimaryIp(pnode)
if not pnode_primary_ip:
logging.error("Primary node '%s' doesn't have an associated"
- " primary IP" % pnode)
+ " primary IP", pnode)
pnodes_list.append(QUERY_INTERNAL_ERROR)
continue
pnodes_list.append((constants.CONFD_REPL_STATUS_OK, pnode_primary_ip))
- # If a single ip was requested, return a single answer, otherwise the whole
- # list, with a success status (since each entry has its own success/failure)
+ # If a single ip was requested, return a single answer, otherwise
+ # the whole list, with a success status (since each entry has its
+ # own success/failure)
if mode == constants.CONFD_REQQ_IP:
return pnodes_list[0]
payload_out = self.PackReply(reply, rsalt)
return payload_out
except errors.ConfdRequestError, err:
- logging.info('Ignoring broken query from %s:%d: %s' % (ip, port, err))
+ logging.info('Ignoring broken query from %s:%d: %s', ip, port, err)
return None
def ExtractRequest(self, payload):
"""
current_time = time.time()
- logging.debug("Extracting request with size: %d" % (len(payload)))
+ logging.debug("Extracting request with size: %d", len(payload))
try:
(message, salt) = serializer.LoadSigned(payload, self.hmac_key)
except errors.SignatureError, err:
@return: tuple of reply and salt to add to the signature
"""
- logging.debug("Processing request: %s" % request)
+ logging.debug("Processing request: %s", request)
if request.protocol != constants.CONFD_PROTOCOL_VERSION:
msg = "wrong protocol version %d" % request.protocol
raise errors.ConfdRequestError(msg)
serial=self.reader.GetConfigSerialNo(),
)
- logging.debug("Sending reply: %s" % reply)
+ logging.debug("Sending reply: %s", reply)
return (reply, rsalt)
node.offline))
# drbd minors check
- d_map, duplicates = self._UnlockedComputeDRBDMap()
+ _, duplicates = self._UnlockedComputeDRBDMap()
for node, minor, instance_a, instance_b in duplicates:
result.append("DRBD minor %d on node %s is assigned twice to instances"
" %s and %s" % (minor, node, instance_a, instance_b))
"""
if not item.uuid:
item.uuid = self._GenerateUniqueID(ec_id)
- elif item.uuid in self._AllIDs(temporary=True):
- raise errors.ConfigurationError("Cannot add '%s': UUID already in use" %
- (item.name, item.uuid))
+ elif item.uuid in self._AllIDs(include_temporary=True):
+ raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
+ " in use" % (item.name, item.uuid))
def _SetInstanceStatus(self, instance_name, status):
"""Set the instance's status to a given value.
LVM_STRIPECOUNT = _autoconf.LVM_STRIPECOUNT
# default maximum instance wait time, in seconds.
DEFAULT_SHUTDOWN_TIMEOUT = 120
-
+NODE_MAX_CLOCK_SKEW = 150
# RPC constants
(RPC_ENCODING_NONE,
HV_INIT_SCRIPT = "init_script"
HV_MIGRATION_PORT = "migration_port"
HV_USE_LOCALTIME = "use_localtime"
+HV_DISK_CACHE = "disk_cache"
HVS_PARAMETER_TYPES = {
HV_BOOT_ORDER: VTYPE_STRING,
HV_INIT_SCRIPT: VTYPE_STRING,
HV_MIGRATION_PORT: VTYPE_INT,
HV_USE_LOCALTIME: VTYPE_BOOL,
+ HV_DISK_CACHE: VTYPE_STRING,
}
HVS_PARAMETERS = frozenset(HVS_PARAMETER_TYPES.keys())
HT_DISK_MTD = "mtd"
HT_DISK_PFLASH = "pflash"
+HT_CACHE_DEFAULT = "default"
+HT_CACHE_NONE = "none"
+HT_CACHE_WTHROUGH = "writethrough"
+HT_CACHE_WBACK = "writeback"
+HT_VALID_CACHE_TYPES = frozenset([HT_CACHE_DEFAULT,
+ HT_CACHE_NONE,
+ HT_CACHE_WTHROUGH,
+ HT_CACHE_WBACK])
+
HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU])
HT_KVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IDE,
HT_DISK_SCSI, HT_DISK_SD, HT_DISK_MTD,
NV_PVLIST = "pvlist"
NV_DRBDLIST = "drbd-list"
NV_NODESETUP = "nodesetup"
+NV_TIME = "time"
# Allocator framework constants
IALLOCATOR_VERSION = 2
HV_USB_MOUSE: '',
HV_MIGRATION_PORT: 8102,
HV_USE_LOCALTIME: False,
+ HV_DISK_CACHE: HT_CACHE_DEFAULT,
},
HT_FAKE: {
},
CONFD_REQQ_LINK = "0"
CONFD_REQQ_IP = "1"
CONFD_REQQ_IPLIST = "2"
+CONFD_REQQ_FIELDS = "3"
+
+CONFD_REQFIELD_NAME = "0"
+CONFD_REQFIELD_IP = "1"
+CONFD_REQFIELD_MNODE_PIP = "2"
CONFD_REQS = frozenset([
CONFD_REQ_PING,
# Each request is "salted" by the current timestamp.
# This constants decides how many seconds of skew to accept.
# TODO: make this a default and allow the value to be more configurable
-CONFD_MAX_CLOCK_SKEW = 300
+CONFD_MAX_CLOCK_SKEW = 2 * NODE_MAX_CLOCK_SKEW
# When we haven't reloaded the config for more than this amount of seconds, we
# force a test to see if inotify is betraying us.
raise
ip, port = address
self.handle_datagram(payload, ip, port)
- except:
+ except: # pylint: disable-msg=W0702
# we need to catch any exception here, log it, but proceed, because even
# if we failed handling a single request, we still want to continue.
logging.error("Unexpected exception", exc_info=True)
else:
raise
self._out_queue.pop(0)
- except:
+ except: # pylint: disable-msg=W0702
# we need to catch any exception here, log it, but proceed, because even
# if we failed sending a single datagram we still want to continue.
logging.error("Unexpected exception", exc_info=True)
def MaybeRaise(result):
- """Is this looks like an encoded Ganeti exception, raise it.
+ """If this looks like an encoded Ganeti exception, raise it.
This function tries to parse the passed argument and if it looks
like an encoding done by EncodeException, it will re-raise it.
code = 505
-class HttpJsonConverter:
+class HttpJsonConverter: # pylint: disable-msg=W0232
CONTENT_TYPE = "application/json"
- def Encode(self, data):
+ @staticmethod
+ def Encode(data):
return serializer.DumpJson(data)
- def Decode(self, data):
+ @staticmethod
+ def Decode(data):
return serializer.LoadJson(data)
if not io_events:
# Timeout
return None
- for (evfd, evcond) in io_events:
+ for (_, evcond) in io_events:
if evcond & check:
return evcond
finally:
we do on our side.
"""
+ # some parameters are unused, but this is the API
+ # pylint: disable-msg=W0613
assert self._ssl_params, "SSL not initialized"
return (self._ssl_cert.digest("sha1") == cert.digest("sha1") and
@return: Authentication realm
"""
+ # today we don't have per-request filtering, but we might want to
+ # add it in the future
+ # pylint: disable-msg=W0613
return self.AUTH_REALM
def PreHandleRequest(self, req):
# Do the secret SSL handshake
if self.using_ssl:
- self.sock.set_connect_state()
+ self.sock.set_connect_state() # pylint: disable-msg=E1103
try:
http.Handshake(self.sock, self.WRITE_TIMEOUT)
except http.HttpSessionHandshakeUnexpectedEOF:
"""HTTP client worker class.
"""
- def RunTask(self, pend_req):
+ def RunTask(self, pend_req): # pylint: disable-msg=W0221
try:
HttpClientRequestExecutor(pend_req.request)
finally:
import cgi
import logging
import os
-import select
import socket
import time
import signal
for child in self._children:
try:
- pid, status = os.waitpid(child, os.WNOHANG)
+ pid, _ = os.waitpid(child, os.WNOHANG)
except os.error:
pid = None
if pid and pid in self._children:
"""Called for each incoming connection
"""
+ # pylint: disable-msg=W0212
(connection, client_addr) = self.socket.accept()
self._CollectChildren(False)
self.socket = None
self.request_executor(self, connection, client_addr)
- except Exception:
+ except Exception: # pylint: disable-msg=W0703
logging.exception("Error while handling request from %s:%s",
client_addr[0], client_addr[1])
os._exit(1)
"""
raise NotImplementedError
- def MigrationInfo(self, instance):
+ def MigrationInfo(self, instance): # pylint: disable-msg=R0201,W0613
"""Get instance information to perform a migration.
By default assume no information is needed.
"""
raise NotImplementedError
-
- def GetLinuxNodeInfo(self):
+ @staticmethod
+ def GetLinuxNodeInfo():
"""For linux systems, return actual OS information.
This is an abstraction for all non-hypervisor-based classes, where
import os.path
import time
import logging
-from cStringIO import StringIO
from ganeti import constants
-from ganeti import errors
+from ganeti import errors # pylint: disable-msg=W0611
from ganeti import utils
from ganeti.hypervisor import hv_base
from ganeti.errors import HypervisorError
fh = open("/proc/mounts", "r")
try:
for line in fh:
- fstype, mountpoint, rest = line.split(" ", 2)
+ _, mountpoint, _ = line.split(" ", 2)
if (mountpoint.startswith(path) and
mountpoint != path):
data.append(mountpoint)
"""
if not os.path.exists(self._ROOT_DIR):
return "The required directory '%s' does not exist." % self._ROOT_DIR
+
+ @classmethod
+ def PowercycleNode(cls):
+ """Chroot powercycle, just a wrapper over Linux powercycle.
+
+ """
+ cls.LinuxPowercycle()
+
+ def MigrateInstance(self, instance, target, live):
+ """Migrate an instance.
+
+ @type instance: L{object.Instance}
+ @param instance: the instance to be migrated
+ @type target: string
+ @param target: hostname (usually ip) of the target node
+ @type live: boolean
+ @param live: whether to do a live or non-live migration
+
+ """
+ raise HypervisorError("Migration not supported by the chroot hypervisor")
import os
import os.path
+import logging
from ganeti import utils
from ganeti import constants
try:
inst_id = fh.readline().strip()
memory = utils.TryConvert(int, fh.readline().strip())
- vcpus = utils.TryConvert(fh.readline().strip())
+ vcpus = utils.TryConvert(int, fh.readline().strip())
stat = "---b-"
times = "0"
return (instance_name, inst_id, memory, vcpus, stat, times)
raise errors.HypervisorError("Failed to list instances: %s" % err)
return data
+
+ def _InstanceFile(self, instance_name):
+ """Compute the instance file for an instance name.
+
+ """
+ return self._ROOT_DIR + "/%s" % instance_name
+
+ def _IsAlive(self, instance_name):
+ """Checks if an instance is alive.
+
+ """
+ file_name = self._InstanceFile(instance_name)
+ return os.path.exists(file_name)
+
+ def _MarkUp(self, instance):
+ """Mark the instance as running.
+
+ This does no checks, which should be done by its callers.
+
+ """
+ file_name = self._InstanceFile(instance.name)
+ fh = file(file_name, "w")
+ try:
+ fh.write("0\n%d\n%d\n" %
+ (instance.beparams[constants.BE_MEMORY],
+ instance.beparams[constants.BE_VCPUS]))
+ finally:
+ fh.close()
+
+ def _MarkDown(self, instance):
+ """Mark the instance as running.
+
+ This does no checks, which should be done by its callers.
+
+ """
+ file_name = self._InstanceFile(instance.name)
+ utils.RemoveFile(file_name)
+
def StartInstance(self, instance, block_devices):
"""Start an instance.
handle race conditions properly, since these are *FAKE* instances.
"""
- file_name = self._ROOT_DIR + "/%s" % instance.name
- if os.path.exists(file_name):
+ if self._IsAlive(instance.name):
raise errors.HypervisorError("Failed to start instance %s: %s" %
(instance.name, "already running"))
try:
- fh = file(file_name, "w")
- try:
- fh.write("0\n%d\n%d\n" %
- (instance.beparams[constants.BE_MEMORY],
- instance.beparams[constants.BE_VCPUS]))
- finally:
- fh.close()
+ self._MarkUp(instance)
except IOError, err:
raise errors.HypervisorError("Failed to start instance %s: %s" %
(instance.name, err))
dir, if it exist, otherwise we raise an exception.
"""
- file_name = self._ROOT_DIR + "/%s" % instance.name
- if not os.path.exists(file_name):
+ if not self._IsAlive(instance.name):
raise errors.HypervisorError("Failed to stop instance %s: %s" %
(instance.name, "not running"))
- utils.RemoveFile(file_name)
+ self._MarkDown(instance)
def RebootInstance(self, instance):
"""Reboot an instance.
"""
cls.LinuxPowercycle()
+
+ def AcceptInstance(self, instance, info, target):
+ """Prepare to accept an instance.
+
+ @type instance: L{objects.Instance}
+ @param instance: instance to be accepted
+ @type info: string
+ @param info: instance info, not used
+ @type target: string
+ @param target: target host (usually ip), on this node
+
+ """
+ if self._IsAlive(instance.name):
+ raise errors.HypervisorError("Can't accept instance, already running")
+
+ def MigrateInstance(self, instance, target, live):
+ """Migrate an instance.
+
+ @type instance: L{object.Instance}
+ @param instance: the instance to be migrated
+ @type target: string
+ @param target: hostname (usually ip) of the target node
+ @type live: boolean
+ @param live: whether to do a live or non-live migration
+
+ """
+ logging.debug("Fake hypervisor migrating %s to %s (live=%s)",
+ instance, target, live)
+
+ self._MarkDown(instance)
+
+ def FinalizeMigration(self, instance, info, success):
+ """Finalize an instance migration.
+
+ For the fake hv, this just marks the instance up.
+
+ @type instance: L{objects.Instance}
+ @param instance: instance whose migration is being finalized
+
+ """
+ if success:
+ self._MarkUp(instance)
+ else:
+ # ensure it's down
+ self._MarkDown(instance)
hv_base.ParamInSet(False, constants.HT_KVM_VALID_MOUSE_TYPES),
constants.HV_MIGRATION_PORT: hv_base.NET_PORT_CHECK,
constants.HV_USE_LOCALTIME: hv_base.NO_CHECK,
+ constants.HV_DISK_CACHE:
+ hv_base.ParamInSet(True, constants.HT_VALID_CACHE_TYPES),
}
_MIGRATION_STATUS_RE = re.compile('Migration\s+status:\s+(\w+)',
dirs = [(dname, constants.RUN_DIRS_MODE) for dname in self._DIRS]
utils.EnsureDirs(dirs)
+ def _InstancePidFile(self, instance_name):
+ """Returns the instance pidfile.
+
+ """
+ pidfile = "%s/%s" % (self._PIDS_DIR, instance_name)
+ return pidfile
+
def _InstancePidAlive(self, instance_name):
"""Returns the instance pid and pidfile
"""
- pidfile = "%s/%s" % (self._PIDS_DIR, instance_name)
+ pidfile = self._InstancePidFile(instance_name)
pid = utils.ReadPidFile(pidfile)
alive = utils.IsProcessAlive(pid)
return (pidfile, pid, alive)
+ def _CheckDown(self, instance_name):
+ """Raises an error unless the given instance is down.
+
+ """
+ alive = self._InstancePidAlive(instance_name)[2]
+ if alive:
+ raise errors.HypervisorError("Failed to start instance %s: %s" %
+ (instance_name, "already running"))
+
@classmethod
def _InstanceMonitor(cls, instance_name):
"""Returns the instance monitor socket name
script.write(" # Connect the interface to the bridge\n")
script.write(" /usr/sbin/brctl addif $BRIDGE $INTERFACE\n")
elif nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_ROUTED:
+ if not nic.ip:
+ raise errors.HypervisorError("nic/%d is routed, but has no ip." % seq)
script.write(" # Route traffic targeted at the IP to the interface\n")
if nic.nicparams[constants.NIC_LINK]:
script.write(" while /sbin/ip rule del dev $INTERFACE; do :; done\n")
@return: tuple (name, id, memory, vcpus, stat, times)
"""
- pidfile, pid, alive = self._InstancePidAlive(instance_name)
+ _, pid, alive = self._InstancePidAlive(instance_name)
if not alive:
return None
if utils.IsProcessAlive(utils.ReadPidFile(filename)):
try:
info = self.GetInstanceInfo(name)
- except errors.HypervisorError, err:
+ except errors.HypervisorError:
continue
if info:
data.append(info)
"""Generate KVM information to start an instance.
"""
- pidfile, pid, alive = self._InstancePidAlive(instance.name)
+ pidfile = self._InstancePidFile(instance.name)
kvm = constants.KVM_PATH
kvm_cmd = [kvm]
kvm_cmd.extend(['-m', instance.beparams[constants.BE_MEMORY]])
if_val = ',if=virtio'
else:
if_val = ',if=%s' % disk_type
+ # Cache mode
+ disk_cache = hvp[constants.HV_DISK_CACHE]
+ if disk_cache != constants.HT_CACHE_DEFAULT:
+ cache_val = ",cache=%s" % disk_cache
+ else:
+ cache_val = ""
for cfdev, dev_path in block_devices:
if cfdev.mode != constants.DISK_RDWR:
raise errors.HypervisorError("Instance has read-only disks which"
else:
boot_val = ''
- drive_val = 'file=%s,format=raw%s%s' % (dev_path, if_val, boot_val)
+ drive_val = 'file=%s,format=raw%s%s%s' % (dev_path, if_val, boot_val,
+ cache_val)
kvm_cmd.extend(['-drive', drive_val])
iso_image = hvp[constants.HV_CDROM_IMAGE_PATH]
vnc_arg = '%s:%d' % (vnc_bind_address, display)
else:
logging.error("Network port is not a valid VNC display (%d < %d)."
- " Not starting VNC" %
- (instance.network_port,
- constants.VNC_BASE_PORT))
+ " Not starting VNC", instance.network_port,
+ constants.VNC_BASE_PORT)
vnc_arg = 'none'
# Only allow tls and other option when not binding to a file, for now.
@param incoming: (target_host_ip, port)
"""
- pidfile, pid, alive = self._InstancePidAlive(instance.name)
hvp = instance.hvparams
- if alive:
- raise errors.HypervisorError("Failed to start instance %s: %s" %
- (instance.name, "already running"))
+ name = instance.name
+ self._CheckDown(name)
temp_files = []
result = utils.RunCmd(kvm_cmd)
if result.failed:
raise errors.HypervisorError("Failed to start instance %s: %s (%s)" %
- (instance.name, result.fail_reason,
- result.output))
+ (name, result.fail_reason, result.output))
- if not utils.IsProcessAlive(utils.ReadPidFile(pidfile)):
- raise errors.HypervisorError("Failed to start instance %s" %
- (instance.name))
+ if not self._InstancePidAlive(name)[2]:
+ raise errors.HypervisorError("Failed to start instance %s" % name)
if vnc_pwd:
change_cmd = 'change vnc password %s' % vnc_pwd
"""Start an instance.
"""
- pidfile, pid, alive = self._InstancePidAlive(instance.name)
- if alive:
- raise errors.HypervisorError("Failed to start instance %s: %s" %
- (instance.name, "already running"))
-
+ self._CheckDown(instance.name)
kvm_runtime = self._GenerateKVMRuntime(instance, block_devices)
self._SaveKVMRuntime(instance, kvm_runtime)
self._ExecuteKVMRuntime(instance, kvm_runtime)
# For some reason if we do a 'send-key ctrl-alt-delete' to the control
# socket the instance will stop, but now power up again. So we'll resort
# to shutdown and restart.
- pidfile, pid, alive = self._InstancePidAlive(instance.name)
+ _, _, alive = self._InstancePidAlive(instance.name)
if not alive:
raise errors.HypervisorError("Failed to reboot instance %s:"
" not running" % instance.name)
raise errors.HypervisorError("Migration %s at the kvm level" %
status)
else:
- logging.info("KVM: unknown migration status '%s'" % status)
+ logging.info("KVM: unknown migration status '%s'", status)
time.sleep(2)
utils.KillProcess(pid)
"""
-import os
-import os.path
import logging
from cStringIO import StringIO
return "'xm info' failed: %s, %s" % (result.fail_reason, result.output)
@staticmethod
- def _GetConfigFileDiskData(disk_template, block_devices):
+ def _GetConfigFileDiskData(block_devices):
"""Get disk directive for xen config file.
This method builds the xen config disk directive according to the
given disk_template and block_devices.
- @param disk_template: string containing instance disk template
@param block_devices: list of tuples (cfdev, rldev):
- cfdev: dict containing ganeti config disk part
- rldev: ganeti.bdev.BlockDev object
if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
nic_str += ", bridge=%s" % nic.nicparams[constants.NIC_LINK]
+ disk_data = cls._GetConfigFileDiskData(block_devices)
+
config.write("vif = [%s]\n" % ",".join(vif_data))
- config.write("disk = [%s]\n" % ",".join(
- cls._GetConfigFileDiskData(instance.disk_template,
- block_devices)))
+ config.write("disk = [%s]\n" % ",".join(disk_data))
config.write("root = '%s'\n" % hvp[constants.HV_ROOT_PATH])
config.write("on_poweroff = 'destroy'\n")
nic_str += ", bridge=%s" % nic.nicparams[constants.NIC_LINK]
config.write("vif = [%s]\n" % ",".join(vif_data))
- disk_data = cls._GetConfigFileDiskData(instance.disk_template,
- block_devices)
+ disk_data = cls._GetConfigFileDiskData(block_devices)
disk_type = hvp[constants.HV_DISK_TYPE]
if disk_type in (None, constants.HT_DISK_IOEMU):
replacement = ",ioemu:hd"
@ivar change: a Condition variable we use for waiting for job changes
"""
+ # pylint: disable-msg=W0212
__slots__ = ["queue", "id", "ops", "log_serial",
"received_timestamp", "start_timestamp", "end_timestamp",
"lock_status", "change",
"""The actual job workers.
"""
- def RunTask(self, job):
+ def RunTask(self, job): # pylint: disable-msg=W0221
"""Job executor.
This functions processes a job. It is closely tied to the _QueuedJob and
"""
def wrapper(self, *args, **kwargs):
+ # pylint: disable-msg=W0212
assert self._queue_lock is not None, "Queue should be open"
return fn(self, *args, **kwargs)
return wrapper
except KeyError:
pass
- def _CheckRpcResult(self, result, nodes, failmsg):
+ @staticmethod
+ def _CheckRpcResult(result, nodes, failmsg):
"""Verifies the status of an RPC call.
Since we aim to keep consistency should this node (the current
msg = result[node].fail_msg
if msg:
failed.append(node)
- logging.error("RPC call %s failed on node %s: %s",
- result[node].call, node, msg)
+ logging.error("RPC call %s (%s) failed on node %s: %s",
+ result[node].call, failmsg, node, msg)
else:
success.append(node)
result = rpc.RpcRunner.call_jobqueue_rename(names, addrs, rename)
self._CheckRpcResult(result, self._nodes, "Renaming files (%r)" % rename)
- def _FormatJobID(self, job_id):
+ @staticmethod
+ def _FormatJobID(job_id):
"""Convert a job ID to string format.
Currently this just does C{str(job_id)} after performing some
@return: the list of job IDs
"""
+ # pylint: disable-msg=W0613
jlist = [self._ExtractJobID(name) for name in self._ListJobFiles()]
jlist = utils.NiceSort(jlist)
return jlist
try:
job = _QueuedJob.Restore(self, data)
- except Exception, err:
+ except Exception, err: # pylint: disable-msg=W0703
new_path = self._GetArchivedJobPath(job_id)
if filepath == new_path:
# job already archived (future case)
return (archived_count, len(all_job_ids) - last_touched - 1)
- def _GetJobInfoUnlocked(self, job, fields):
+ @staticmethod
+ def _GetJobInfoUnlocked(job, fields):
"""Returns information about a job.
@type job: L{_QueuedJob}
if serial is None:
# There must be a serious problem
- raise errors.JobQueueError("Can't read/parse the job queue serial file")
+ raise errors.JobQueueError("Can't read/parse the job queue"
+ " serial file")
if not must_lock:
- # There's no need for more error handling. Closing the lock file below
- # in case of an error will unlock it anyway.
+ # There's no need for more error handling. Closing the lock
+ # file below in case of an error will unlock it anyway.
queue_lock.Unlock()
except:
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
-# Disable "Invalid name ..." message
-# pylint: disable-msg=C0103
-
"""Module implementing the Ganeti locking code."""
+# pylint: disable-msg=W0212
+
+# W0212 since e.g. LockSet methods use (a lot) the internals of
+# SharedLock
+
import os
import select
import threading
if self._nwaiters == 0:
self._Cleanup()
- def notifyAll(self):
+ def notifyAll(self): # pylint: disable-msg=C0103
"""Close the writing side of the pipe to notify all waiters.
"""
assert self._nwaiters > 0
self._nwaiters -= 1
- def notifyAll(self):
+ def notifyAll(self): # pylint: disable-msg=C0103
"""Notify all currently waiting threads.
"""
self._cond = threading.Condition(lock=lock)
self._nwaiters = 0
- def notifyAll(self):
+ def notifyAll(self): # pylint: disable-msg=C0103
"""Notifies the condition.
"""
# the test cases.
return utils.any((self._is_owned(l) for l in LEVELS[level + 1:]))
- def _BGL_owned(self):
+ def _BGL_owned(self): # pylint: disable-msg=C0103
"""Check if the current thread owns the BGL.
Both an exclusive or a shared acquisition work.
"""
return BGL in self.__keyring[LEVEL_CLUSTER]._list_owned()
- def _contains_BGL(self, level, names):
+ @staticmethod
+ def _contains_BGL(level, names): # pylint: disable-msg=C0103
"""Check if the level contains the BGL.
Check if acting on the given level and set of names will change
REQ_QUERY_EXPORTS = "QueryExports"
REQ_QUERY_CONFIG_VALUES = "QueryConfigValues"
REQ_QUERY_CLUSTER_INFO = "QueryClusterInfo"
+REQ_QUERY_TAGS = "QueryTags"
REQ_QUEUE_SET_DRAIN_FLAG = "SetDrainFlag"
REQ_SET_WATCHER_PAUSE = "SetWatcherPause"
old_transp = self.transport
self.transport = None
old_transp.Close()
- except Exception:
+ except Exception: # pylint: disable-msg=W0703
pass
def CallMethod(self, method, args):
def QueryConfigValues(self, fields):
return self.CallMethod(REQ_QUERY_CONFIG_VALUES, fields)
+ def QueryTags(self, kind, name):
+ return self.CallMethod(REQ_QUERY_TAGS, (kind, name))
+
# TODO: class Server(object)
"""Calculate timeouts for lock attempts.
"""
- running_sum = 0
result = [1.0]
# Wait for a total of at least 150s before doing a blocking acquire
return timeout
-class OpExecCbBase:
+class OpExecCbBase: # pylint: disable-msg=W0232
"""Base class for OpCode execution callbacks.
"""
"""
-# pylint: disable-msg=E0203
+# pylint: disable-msg=E0203,W0201
# E0203: Access to member %r before its definition, since we use
# objects.py which doesn't explicitely initialise its members
+# W0201: Attribute '%s' defined outside __init__
import ConfigParser
import re
_TIMESTAMPS = ["ctime", "mtime"]
_UUID = ["uuid"]
-def FillDict(defaults_dict, custom_dict, skip_keys=[]):
+def FillDict(defaults_dict, custom_dict, skip_keys=None):
"""Basic function to apply settings on top a default dict.
@type defaults_dict: dict
"""
ret_dict = copy.deepcopy(defaults_dict)
ret_dict.update(custom_dict)
- for k in skip_keys:
- try:
- del ret_dict[k]
- except KeyError:
- pass
+ if skip_keys:
+ for k in skip_keys:
+ try:
+ del ret_dict[k]
+ except KeyError:
+ pass
return ret_dict
raise errors.ConfigurationError("Invalid object passed to FromDict:"
" expected dict, got %s" % type(val))
val_str = dict([(str(k), v) for k, v in val.iteritems()])
- obj = cls(**val_str)
+ obj = cls(**val_str) # pylint: disable-msg=W0142
return obj
@staticmethod
"""Fill defaults for missing configuration values.
"""
+ # pylint: disable-msg=E0203
+ # because these are "defined" via slots, not manually
if self.hvparams is None:
self.hvparams = constants.HVC_DEFAULTS
else:
"pnode", "disk_template", "snode", "mode",
"disks", "nics",
"src_node", "src_path", "start",
- "wait_for_sync", "ip_check",
+ "wait_for_sync", "ip_check", "name_check",
"file_storage_dir", "file_driver",
"iallocator",
"hypervisor", "hvparams", "beparams",
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
+"""Ganeti RAPI module"""
RAPI_ACCESS_WRITE = "write"
"""
+# pylint: disable-msg=C0103
+
+# C0103: Invalid name, since the R_* names are not conforming
+
import logging
from ganeti import luxi
raise http.HttpBadGateway("Master seems to unreachable: %s" % str(err))
-def FeedbackFn(ts, log_type, log_msg):
+def FeedbackFn(ts, log_type, log_msg): # pylint: disable-msg=W0613
"""Feedback logging function for http case.
We don't have a stdout for printing log messages, so log them to the
http log at least.
+ @param ts: the timestamp (unused)
+
"""
logging.info("%s: %s", log_type, log_msg)
"""
+# pylint: disable-msg=C0103
+
+# C0103: Invalid name, since the R_* names are not conforming
+
import cgi
import re
"""Map resource to method.
"""
- def __init__(self, connector=CONNECTOR):
+ def __init__(self, connector=None):
"""Resource mapper constructor.
@param connector: a dictionary, mapping method name with URL path regexp
"""
+ if connector is None:
+ connector = CONNECTOR
self._connector = connector
def getController(self, uri):
"""/ resource.
"""
- def GET(self):
+ @staticmethod
+ def GET():
"""Show the list of mapped resources.
@return: a dictionary with 'name' and 'uri' keys for each of them.
""" /2 resource, the root of the version 2 API.
"""
- def GET(self):
+ @staticmethod
+ def GET():
"""Show the list of mapped resources.
@return: a dictionary with 'name' and 'uri' keys for each of them.
"""
+# pylint: disable-msg=C0103
+
+# C0103: Invalid name, since the R_* names are not conforming
+
from ganeti import opcodes
from ganeti import http
from ganeti import constants
to adapt clients accordingly.
"""
- def GET(self):
+ @staticmethod
+ def GET():
"""Returns the remote API version.
"""
"""Cluster info.
"""
- def GET(self):
+ @staticmethod
+ def GET():
"""Returns cluster information.
"""
"""/2/os resource.
"""
- def GET(self):
+ @staticmethod
+ def GET():
"""Return a list of all OSes.
Can return error 500 in case of a problem.
"""/2/redistribute-config resource.
"""
- def PUT(self):
+ @staticmethod
+ def PUT():
"""Redistribute configuration to all nodes.
"""
"""/2/jobs resource.
"""
- def GET(self):
+ @staticmethod
+ def GET():
"""Returns a dictionary of jobs.
@return: a dictionary with jobs id and uri.
nics=nics,
start=fn('start', True),
ip_check=fn('ip_check', True),
+ name_check=fn('name_check', True),
wait_for_sync=True,
hypervisor=fn('hypervisor', None),
hvparams=hvparams,
Example: ["tag1", "tag2", "tag3"]
"""
+ # pylint: disable-msg=W0212
return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
def PUT(self):
you'll have back a job id.
"""
+ # pylint: disable-msg=W0212
if 'tag' not in self.queryargs:
raise http.HttpBadRequest("Please specify tag(s) to add using the"
" the 'tag' parameter")
/tags?tag=[tag]&tag=[tag]
"""
+ # pylint: disable-msg=W0212
if 'tag' not in self.queryargs:
# no we not gonna delete all tags
raise http.HttpBadRequest("Cannot delete all tags - please specify"
from ganeti import constants
from ganeti import errors
-import ganeti.http.client
+# pylint has a bug here, doesn't see this import
+import ganeti.http.client # pylint: disable-msg=W0611
# Module level variable
Must be called before using any RPC function.
"""
- global _http_manager
+ global _http_manager # pylint: disable-msg=W0603
assert not _http_manager, "RPC module initialized more than once"
Must be called before quitting the program.
"""
- global _http_manager
+ global _http_manager # pylint: disable-msg=W0603
if _http_manager:
_http_manager.Shutdown()
args = (msg, prereq)
else:
args = (msg, )
- raise ec(*args)
+ raise ec(*args) # pylint: disable-msg=W0142
class Client:
backend (currently json).
"""
+# pylint: disable-msg=C0103
+
+# C0103: Invalid name, since pylint doesn't see that Dump points to a
+# function and not a constant
import simplejson
import re
- detail: string with details
"""
- retval = self.Run(node, 'root', 'hostname')
+ retval = self.Run(node, 'root', 'hostname --fqdn')
if retval.failed:
msg = "ssh problem"
"""
+# pylint: disable-msg=W0232,R0201
+
+# W0232, since we use these as singletons rather than object holding
+# data
+
+# R0201, for the same reason
+
+# TODO: FileStorage initialised with paths whereas the others not
import logging
"""
raise NotImplementedError()
- def Modify(self, name, changes):
+ def Modify(self, name, changes): # pylint: disable-msg=W0613
"""Modifies an entity within the storage unit.
@type name: string
raise NotImplementedError()
-class FileStorage(_Base):
+class FileStorage(_Base): # pylint: disable-msg=W0223
"""File storage unit.
"""
return values
-class _LvmBase(_Base):
+class _LvmBase(_Base): # pylint: disable-msg=W0223
"""Base class for LVM storage containers.
@cvar LIST_FIELDS: list of tuples consisting of three elements: SF_*
if callable(mapper):
# we got a function, call it with all the declared fields
- val = mapper(*values)
+ val = mapper(*values) # pylint: disable-msg=W0142
elif len(values) == 1:
# we don't have a function, but we had a single field
# declared, pass it unchanged
yield fields
-class LvmPvStorage(_LvmBase):
+class LvmPvStorage(_LvmBase): # pylint: disable-msg=W0223
"""LVM Physical Volume storage unit.
"""
raise errors.GenericError("%s is not a directory" % dir_name)
-def ReadFile(file_name, size=None):
+def ReadFile(file_name, size=-1):
"""Reads a file.
- @type size: None or int
- @param size: Read at most size bytes
+ @type size: int
+ @param size: Read at most size bytes (if negative, entire file)
@rtype: str
@return: the (possibly partial) content of the file
"""
f = open(file_name, "r")
try:
- if size is None:
- return f.read()
- else:
- return f.read(size)
+ return f.read(size)
finally:
f.close()
return None
-def all(seq, pred=bool):
+def all(seq, pred=bool): # pylint: disable-msg=W0622
"Returns True if pred(x) is True for every element in the iterable"
for _ in itertools.ifilterfalse(pred, seq):
return False
return True
-def any(seq, pred=bool):
+def any(seq, pred=bool): # pylint: disable-msg=W0622
"Returns True if pred(x) is True for at least one element in the iterable"
for _ in itertools.ifilter(pred, seq):
return True
return [i for i in seq if i not in seen and not seen.add(i)]
-def IsValidMac(mac):
- """Predicate to check if a MAC address is valid.
+def NormalizeAndValidateMac(mac):
+ """Normalizes and check if a MAC address is valid.
Checks whether the supplied MAC address is formally correct, only
- accepts colon separated format.
+ accepts colon separated format. Normalize it to all lower.
@type mac: str
@param mac: the MAC to be validated
- @rtype: boolean
- @return: True is the MAC seems valid
+ @rtype: str
+ @return: returns the normalized and validated MAC.
+
+ @raise errors.OpPrereqError: If the MAC isn't valid
"""
- mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$")
- return mac_check.match(mac) is not None
+ mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
+ if not mac_check.match(mac):
+ raise errors.OpPrereqError("Invalid MAC address specified: %s" %
+ mac, errors.ECODE_INVAL)
+
+ return mac.lower()
def TestDelay(duration):
@return: the value zero
"""
+ # pylint: disable-msg=W0212
+ # yes, we really want os._exit
UMASK = 077
WORKDIR = "/"
# TODO: we could check here that the file contains our pid
try:
RemoveFile(pidfilename)
- except:
+ except: # pylint: disable-msg=W0702
pass
logging.debug(*args, **kwargs)
def wrapper(self, *args, **kwargs):
+ # pylint: disable-msg=W0212
assert hasattr(self, '_lock')
lock = self._lock
_LockDebug("Waiting for %s", lock)
while True:
try:
+ # pylint: disable-msg=W0142
return fn(*args)
except RetryAgain:
pass
"""
self.called = False
- def _HandleSignal(self, signum, frame):
+ # we don't care about arguments, but we leave them named for the future
+ def _HandleSignal(self, signum, frame): # pylint: disable-msg=W0613
"""Actual signal handling function.
"""
Users of a worker pool must override RunTask in a subclass.
"""
+ # pylint: disable-msg=W0212
def __init__(self, pool, worker_id):
"""Constructor for BaseWorker thread.
self.RunTask(*self._current_task)
logging.debug("Worker %s: done with task %r",
self.worker_id, self._current_task)
- except:
+ except: # pylint: disable-msg=W0702
logging.error("Worker %s: Caught unhandled exception",
self.worker_id, exc_info=True)
finally:
"""
for worker in self._workers + self._termworkers:
- if worker._HasRunningTaskUnlocked():
+ if worker._HasRunningTaskUnlocked(): # pylint: disable-msg=W0212
return True
return False
<arg>-s <replaceable>SIZE</replaceable></arg>
</group>
<sbr>
+ <arg>--no-ip-check</arg>
+ <arg>--no-name-check</arg>
+ <arg>--no-start</arg>
+ <sbr>
<group>
<arg rep="repeat">--net=<replaceable>N</replaceable><arg rep="repeat">:options</arg></arg>
<arg>--no-nics</arg>
</para>
<para>
+ The <option>--no-ip-check</option> skips the checks that are
+ done to see if the instance's IP is not already alive
+ (i.e. reachable from the master node).
+ </para>
+
+ <para>
+ The <option>--no-name-check</option> skips the check for the
+ instance name via the resolver (e.g. in DNS or /etc/hosts,
+ depending on your setup). Since the name check is used to
+ compute the IP address, if you pass this option you must
+ also pass the <option>--no-ip-check</option> option.
+ </para>
+
+ <para>
+ If you don't wat the instance to automatically start after
+ creation, this is possible via the
+ <option>--no-start</option> option. This will leave the
+ instance down until a subsequent <command>gnt-instance
+ start</command> command.
+ </para>
+
+ <para>
The NICs of the instances can be specified via the
<option>--net</option> option. By default, one NIC is
created for the instance, with a random MAC, and set
</listitem>
</varlistentry>
+ <varlistentry>
+ <term>disk_cache</term>
+ <listitem>
+ <simpara>Valid for the KVM hypervisor.</simpara>
+
+ <simpara>The disk cache mode. It can be either
+ <userinput>default</userinput> to not pass any cache
+ option to KVM, or one of the KVM cache modes: none
+ (for direct I/O), writethrough (to use the host cache
+ but report completion to the guest only when the host
+ has commited the changes to disk) or writeback (to use
+ the host cache and report completion as soon as the
+ data is in the host cache). Note that there are
+ special considerations for the cache mode depending on
+ version of KVM used and disk type (always raw file
+ under Ganeti), please refer to the KVM documentation
+ for more details.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
</variablelist>
</para>
</listitem>
</varlistentry>
<varlistentry>
+ <term>name_check</term>
+ <listitem>
+ <simpara>Skip the name check for instances;
+ see the description in the <command>add</command>
+ command for details.</simpara>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
<term>file_storage_dir, file_driver</term>
<listitem>
<simpara>Configuration for the <literal>file</literal>
[REPORTS]
output-format = colorized
-include-ids = no
+include-ids = yes
files-output = no
reports = no
evaluation = 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
[BASIC]
required-attributes =
-no-docstring-rgx = __.*__
+# disabling docstring checks since we have way too many without (complex
+# inheritance hierarchies)
+#no-docstring-rgx = __.*__
+no-docstring-rgx = .*
module-rgx = (([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
-const-rgx = ((_{0,2}[A-Z][A-Z0-9_]*)|(__.*__))$
+# added lower-case names
+const-rgx = ((_{0,2}[A-Z][A-Z0-9_]*)|(__.*__)|(_?[a-z_][a-z0-9_]*))$
class-rgx = _?[A-Z][a-zA-Z0-9]+$
-function-rgx = (_?([A-Z]+[a-z0-9]+([A-Z]+[a-z0-9]*)*)|main)$
-method-rgx = (_{0,2}[A-Z]+[a-z0-9]+([A-Z]+[a-z0-9]*)*|__.*__)$
+# added lower-case names
+function-rgx = (_?([A-Z]+[a-z0-9]+([A-Z]+[a-z0-9]*)*)|main|([a-z_][a-z0-9_]*))$
+# add lower-case names, since derived classes must obey method names
+method-rgx = (_{0,2}[A-Z]+[a-z0-9]+([A-Z]+[a-z0-9]*)*|__.*__|([a-z_][a-z0-9_]*))$
attr-rgx = [a-z_][a-z0-9_]{1,30}$
argument-rgx = [a-z_][a-z0-9_]*$
-variable-rgx = (_?([a-z_][a-z0-9_]*)|([A-Z0-9_]+))$
+variable-rgx = (_?([a-z_][a-z0-9_]*)|(_?[A-Z0-9_]+))$
inlinevar-rgx = [A-Za-z_][A-Za-z0-9_]*$
good-names = i,j,k,_
bad-names = foo,bar,baz,toto,tutu,tata
defining-attr-methods = __init__,__new__,setUp
[DESIGN]
-max-args = 6
-max-locals = 15
-max-returns = 6
-max-branchs = 12
-max-statements = 50
+max-args = 15
+max-locals = 50
+max-returns = 10
+max-branchs = 80
+max-statements = 200
max-parents = 7
-max-attributes = 7
-min-public-methods = 2
-max-public-methods = 20
+max-attributes = 20
+# zero as struct-like (PODS) classes don't export any methods
+min-public-methods = 0
+max-public-methods = 50
[IMPORTS]
deprecated-modules = regsub,string,TERMIOS,Bastion,rexec
[FORMAT]
max-line-length = 80
-max-module-lines = 1000
+max-module-lines = 10000
indent-string = " "
[MISCELLANEOUS]
min-similarity-lines = 4
ignore-comments = yes
ignore-docstrings = yes
+
+[MESSAGES CONTROL]
+
+# Enable only checker(s) with the given id(s). This option conflicts with the
+# disable-checker option
+#enable-checker=
+
+# Enable all checker(s) except those with the given id(s). This option
+# conflicts with the enable-checker option
+#disable-checker=
+disable-checker=similarities
+
+# Enable all messages in the listed categories (IRCWEF).
+#enable-msg-cat=
+
+# Disable all messages in the listed categories (IRCWEF).
+disable-msg-cat=
+
+# Enable the message(s) with the given id(s).
+#enable-msg=
+
+# Disable the message(s) with the given id(s).
+disable-msg=W0511,R0922,W0201
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
+"""Backup related commands"""
-# pylint: disable-msg=W0401,W0614
+# pylint: disable-msg=W0401,W0613,W0614,C0103
# W0401: Wildcard import ganeti.cli
+# W0613: Unused argument, since all functions follow the same API
# W0614: Unused import %s from wildcard import (since we need cli)
+# C0103: Invalid name gnt-backup
import sys
from ganeti.cli import *
from ganeti import opcodes
from ganeti import constants
-from ganeti import errors
-from ganeti import utils
_VALUE_TRUE = "true"
if not isinstance(dlist, list):
ToStderr("Cannot parse execution results")
return 1
- tot_dsk = len(dlist)
# TODO: handle diskless instances
if dlist.count(False) == 0:
# all OK
@return: the desired exit code
"""
- instance = args[0]
op = opcodes.OpRemoveExport(instance_name=args[0])
SubmitOpCode(op)
SRC_DIR_OPT,
SRC_NODE_OPT,
NOIPCHECK_OPT,
+ NONAMECHECK_OPT,
IALLOCATOR_OPT,
FILESTORE_DIR_OPT,
FILESTORE_DRIVER_OPT,
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
+"""Cluster related commands"""
-# pylint: disable-msg=W0401,W0614
+# pylint: disable-msg=W0401,W0613,W0614,C0103
# W0401: Wildcard import ganeti.cli
+# W0613: Unused argument, since all functions follow the same API
# W0614: Unused import %s from wildcard import (since we need cli)
+# C0103: Invalid name gnt-cluster
import sys
import os.path
# a list of (name, dict) we can pass directly to dict() (or [])
hvparams = dict(opts.hvparams)
- for hv, hv_params in hvparams.iteritems():
+ for hv_params in hvparams.values():
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
beparams = opts.beparams
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
+"""Debugging commands"""
-# pylint: disable-msg=W0401,W0614
+# pylint: disable-msg=W0401,W0614,C0103
# W0401: Wildcard import ganeti.cli
# W0614: Unused import %s from wildcard import (since we need cli)
+# C0103: Invalid name gnt-backup
import sys
import simplejson
from ganeti.cli import *
from ganeti import cli
from ganeti import opcodes
-from ganeti import constants
from ganeti import utils
from ganeti import errors
ToStdout("Loading...")
for job_idx in range(opts.rep_job):
for fname in args:
+ # pylint: disable-msg=W0142
op_data = simplejson.loads(utils.ReadFile(fname))
op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data]
op_list = op_list * opts.rep_op
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
+"""Instance related commands"""
-# pylint: disable-msg=W0401,W0614
+# pylint: disable-msg=W0401,W0614,C0103
# W0401: Wildcard import ganeti.cli
# W0614: Unused import %s from wildcard import (since we need cli)
+# C0103: Invalid name gnt-instance
import sys
import os
import itertools
import simplejson
-import time
from cStringIO import StringIO
from ganeti.cli import *
@raise errors.OpPrereqError: for invalid input parameters
"""
+ # pylint: disable-msg=W0142
if client is None:
client = GetClient()
if mode == _SHUTDOWN_CLUSTER:
"""
return GenericInstanceCreate(constants.INSTANCE_CREATE, opts, args)
- return 0
def BatchCreate(opts, args):
"nics": None,
"start": True,
"ip_check": True,
+ "name_check": True,
"hypervisor": None,
"hvparams": {},
"file_storage_dir": None,
json_filename = args[0]
try:
instance_data = simplejson.loads(utils.ReadFile(json_filename))
- except Exception, err:
+ except Exception, err: # pylint: disable-msg=W0703
ToStderr("Can't parse the instance definition file: %s" % str(err))
return 1
nics=tmp_nics,
start=specs['start'],
ip_check=specs['ip_check'],
+ name_check=specs['name_check'],
wait_for_sync=True,
iallocator=specs['iallocator'],
hypervisor=hypervisor,
@return: the desired exit code
"""
- instance_name = args[0]
new_2ndary = opts.dst_node
iallocator = opts.iallocator
if opts.disks is None:
finally:
ToStderr("Can't run console command %s with arguments:\n'%s'",
cmd[0], " ".join(cmd))
- os._exit(1)
+ os._exit(1) # pylint: disable-msg=W0212
def _FormatLogicalID(dev_type, logical_id):
NET_OPT,
NODE_PLACEMENT_OPT,
NOIPCHECK_OPT,
+ NONAMECHECK_OPT,
NONICS_OPT,
NOSTART_OPT,
NWSYNC_OPT,
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
+"""Job related commands"""
-# pylint: disable-msg=W0401,W0614
+# pylint: disable-msg=W0401,W0613,W0614,C0103
# W0401: Wildcard import ganeti.cli
+# W0613: Unused argument, since all functions follow the same API
# W0614: Unused import %s from wildcard import (since we need cli)
+# C0103: Invalid name gnt-job
import sys
client = GetClient()
for job_id in args:
- (success, msg) = client.CancelJob(job_id)
+ (_, msg) = client.CancelJob(job_id)
ToStdout(msg)
# TODO: Different exit value if not all jobs were canceled?
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
+"""Node related commands"""
-# pylint: disable-msg=W0401,W0614
+# pylint: disable-msg=W0401,W0613,W0614,C0103
# W0401: Wildcard import ganeti.cli
+# W0613: Unused argument, since all functions follow the same API
# W0614: Unused import %s from wildcard import (since we need cli)
+# C0103: Invalid name gnt-node
import sys
from ganeti.cli import *
-from ganeti import cli
from ganeti import opcodes
from ganeti import utils
from ganeti import constants
pinst = utils.NiceSort(pinst)
- retcode = 0
-
if not force and not AskUser("Migrate instance(s) %s?" %
(",".join("'%s'" % name for name in pinst))):
return 2
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
+"""OS scripts related commands"""
-# pylint: disable-msg=W0401,W0614
+# pylint: disable-msg=W0401,W0613,W0614,C0103
# W0401: Wildcard import ganeti.cli
+# W0613: Unused argument, since all functions follow the same API
# W0614: Unused import %s from wildcard import (since we need cli)
+# C0103: Invalid name gnt-os
import sys
from ganeti.cli import *
from ganeti import opcodes
from ganeti import utils
-from ganeti import constants
def ListOS(opts, args):
has_bad = False
- for os_name, os_valid, os_variants, node_data in result:
+ for os_name, _, os_variants, node_data in result:
nodes_valid = {}
nodes_bad = {}
nodes_hidden = {}
self.failUnless(constants.LDS_OKAY < constants.LDS_UNKNOWN)
self.failUnless(constants.LDS_UNKNOWN < constants.LDS_FAULTY)
+ def testClockSkew(self):
+ self.failUnless(constants.NODE_MAX_CLOCK_SKEW <
+ (0.8 * constants.CONFD_MAX_CLOCK_SKEW))
+
class TestParameterNames(unittest.TestCase):
"""HV/BE parameter tests"""
def testFourCc(self):
self.failUnlessEqual(len(constants.CONFD_MAGIC_FOURCC), 4,
- "Invalid fourcc len, should be 4")
+ "Invalid fourcc len, should be 4")
def _IsUniqueSequence(self, sequence):
seen = set()
class SimpleOpener(urllib.FancyURLopener):
"""A simple url opener"""
+ # pylint: disable-msg=W0221
- def prompt_user_passwd(self, host, realm, clear_cache = 0):
+ def prompt_user_passwd(self, host, realm, clear_cache=0):
"""No-interaction version of prompt_user_passwd."""
+ # we follow parent class' API
+ # pylint: disable-msg=W0613
return None, None
def http_error_default(self, url, fp, errcode, errmsg, headers):
completion_suggest=("128M 256M 512M 1G 4G 8G"
" 12G 16G").split()),
cli.VERBOSE_OPT,
+ cli.NOIPCHECK_OPT,
+ cli.NONAMECHECK_OPT,
cli.cli_option("--no-replace1", dest="do_replace1",
help="Skip disk replacement with the same secondary",
action="store_false", default=True),
ARGUMENTS = [cli.ArgInstance(min=1)]
+def _DoCheckInstances(fn):
+ """Decorator for checking instances.
+
+ """
+ def wrapper(self, *args, **kwargs):
+ val = fn(self, *args, **kwargs)
+ for instance in self.instances:
+ self._CheckInstanceAlive(instance) # pylint: disable-msg=W0212
+ return val
+
+ return wrapper
+
+
+def _DoBatch(retry):
+ """Decorator for possible batch operations.
+
+ Must come after the _DoCheckInstances decorator (if any).
+
+ @param retry: whether this is a retryable batch, will be
+ passed to StartBatch
+
+ """
+ def wrap(fn):
+ def batched(self, *args, **kwargs):
+ self.StartBatch(retry)
+ val = fn(self, *args, **kwargs)
+ self.CommitQueue()
+ return val
+ return batched
+
+ return wrap
+
+
class Burner(object):
"""Burner class."""
Log("Idempotent %s succeeded after %d retries" %
(msg, MAX_RETRIES - retry_count))
return val
- except Exception, err:
+ except Exception, err: # pylint: disable-msg=W0703
if retry_count == 0:
Log("Non-idempotent %s failed, aborting" % (msg, ))
raise
Log("waiting for job %s for %s" % (jid, iname), indent=2)
try:
results.append(cli.PollJob(jid, cl=self.cl, feedback_fn=self.Feedback))
- except Exception, err:
+ except Exception, err: # pylint: disable-msg=W0703
Log("Job for %s failed: %s" % (iname, err))
if len(results) != len(jobs):
raise BurninFailure()
return results
- def _DoCheckInstances(fn):
- """Decorator for checking instances.
-
- """
- def wrapper(self, *args, **kwargs):
- val = fn(self, *args, **kwargs)
- for instance in self.instances:
- self._CheckInstanceAlive(instance)
- return val
-
- return wrapper
-
- def _DoBatch(retry):
- """Decorator for possible batch operations.
-
- Must come after the _DoCheckInstances decorator (if any).
-
- @param retry: whether this is a retryable batch, will be
- passed to StartBatch
-
- """
- def wrap(fn):
- def batched(self, *args, **kwargs):
- self.StartBatch(retry)
- val = fn(self, *args, **kwargs)
- self.CommitQueue()
- return val
- return batched
-
- return wrap
-
def ParseOptions(self):
"""Parses the command line options.
if options.nodes and options.iallocator:
Err("Give either the nodes option or the iallocator option, not both")
+ if options.http_check and not options.name_check:
+ Err("Can't enable HTTP checks without name checks")
+
self.opts = options
self.instances = args
self.bep = {
pnode=pnode,
snode=snode,
start=True,
- ip_check=True,
+ ip_check=self.opts.ip_check,
+ name_check=self.opts.name_check,
wait_for_sync=True,
file_driver="loop",
file_storage_dir=None,
disks=[i for i in range(self.disk_count)])
Log("run %s" % mode, indent=2)
ops.append(op)
- self.ExecOrQueue(instance, *ops)
+ self.ExecOrQueue(instance, *ops) # pylint: disable-msg=W0142
@_DoBatch(True)
def BurnReplaceDisks2(self):
pnode=pnode,
snode=snode,
start=True,
- ip_check=True,
+ ip_check=self.opts.ip_check,
+ name_check=self.opts.name_check,
wait_for_sync=True,
file_storage_dir=None,
file_driver="loop",
Log("remove export", indent=2)
self.ExecOrQueue(instance, exp_op, rem_op, imp_op, erem_op)
- def StopInstanceOp(self, instance):
+ @staticmethod
+ def StopInstanceOp(instance):
"""Stop given instance."""
return opcodes.OpShutdownInstance(instance_name=instance)
- def StartInstanceOp(self, instance):
+ @staticmethod
+ def StartInstanceOp(instance):
"""Start given instance."""
return opcodes.OpStartupInstance(instance_name=instance, force=False)
- def RenameInstanceOp(self, instance, instance_new):
+ @staticmethod
+ def RenameInstanceOp(instance, instance_new):
"""Rename instance."""
return opcodes.OpRenameInstance(instance_name=instance,
new_name=instance_new)
ignore_secondaries=False)
Log("reboot with type '%s'" % reboot_type, indent=2)
ops.append(op)
- self.ExecOrQueue(instance, *ops)
+ self.ExecOrQueue(instance, *ops) # pylint: disable-msg=W0142
@_DoCheckInstances
@_DoBatch(True)
if not self.opts.keep_instances:
try:
self.BurnRemove()
- except Exception, err:
+ except Exception, err: # pylint: disable-msg=W0703
if has_err: # already detected errors, so errors in removal
# are quite expected
Log("Note: error detected during instance remove: %s" % str(err))
responsibility to know what they're doing.
"""
+ # all do_/complete_* functions follow the same API
+ # pylint: disable-msg=W0613
prompt = "(/) "
def __init__(self, cfg_file=None):
arg = None
try:
self.cfg = config.ConfigWriter(cfg_file=arg, offline=True)
- self.parents = [self.cfg._config_data]
+ self.parents = [self.cfg._config_data] # pylint: disable-msg=W0212
self.path = []
except errors.ConfigurationError, err:
print "Error: %s" % str(err)
"""
pointer = self.parents[-1]
- dirs, entries = self._get_entries(pointer)
+ dirs, _ = self._get_entries(pointer)
matches = [str(name) for name in dirs if name.startswith(text)]
return matches
return False
pointer = self.parents[-1]
- dirs, entries = self._get_entries(pointer)
+ dirs, _ = self._get_entries(pointer)
if line not in dirs:
print "No such child"
"""
pointer = self.parents[-1]
- dirs, entries = self._get_entries(pointer)
+ _, entries = self._get_entries(pointer)
matches = [name for name in entries if name.startswith(text)]
return matches
"""
pointer = self.parents[-1]
- dirs, entries = self._get_entries(pointer)
+ _, entries = self._get_entries(pointer)
if line not in entries:
print "No such entry"
return False
if self.cfg.VerifyConfig():
print "Config data does not validate, refusing to save."
return False
- self.cfg._WriteConfig()
+ self.cfg._WriteConfig() # pylint: disable-msg=W0212
def do_rm(self, line):
"""Removes an instance or a node.
"""
pointer = self.parents[-1]
- data = self.cfg._config_data
+ data = self.cfg._config_data # pylint: disable-msg=W0212
if pointer not in (data.instances, data.nodes):
print "Can only delete instances and nodes"
return False
else:
print "Invalid node name"
- def do_EOF(self, line):
+ @staticmethod
+ def do_EOF(line):
"""Exit the application.
"""
print
return True
- def do_quit(self, line):
+ @staticmethod
+ def do_quit(line):
"""Exit the application.
"""
This is just a wrapper over BootStrap, to handle our own exceptions.
"""
- options, args = ParseOptions()
+ _, args = ParseOptions()
if args:
cfg_file = args[0]
else:
import sys
import optparse
import logging
-import errno
from ganeti import constants
from ganeti import serializer
"""Main program.
"""
- global options, args
+ global options, args # pylint: disable-msg=W0603
program = os.path.basename(sys.argv[0])
if vrfy:
logging.error("Errors after conversion:")
for item in vrfy:
- logging.error(" - %s" % item)
+ logging.error(" - %s", item)
del cfg
logging.info("File loaded successfully")
Returns:
(options, args), as returned by OptionParser.parse_args
"""
- global verbose_flag
+ global verbose_flag # pylint: disable-msg=W0603
parser = optparse.OptionParser(usage="\n%s" % USAGE,
version="%%prog (ganeti) %s" %
if os.getuid() != 0:
raise PrereqError("This tool runs as root only. Really.")
- osname, nodename, release, version, arch = os.uname()
+ osname, _, release, _, _ = os.uname()
if osname != 'Linux':
raise PrereqError("This tool only runs on Linux"
" (detected OS: %s)." % osname)
"""
path = "/dev/%s" % name
- for retries in range(40):
+ for _ in range(40):
if os.path.exists(path):
break
time.sleep(0.250)
mountlines = ReadFile("/proc/mounts").splitlines()
mounts = {}
for line in mountlines:
- device, mountpoint, fstype, rest = line.split(None, 3)
+ _, mountpoint, fstype, _ = line.split(None, 3)
# fs type blacklist
if fstype in ["nfs", "nfs4", "autofs", "tmpfs", "proc", "sysfs"]:
continue
boolean, the in-use status of the device
"""
- for retries in range(3):
+ for _ in range(3):
result = ExecCommand("blockdev --rereadpt /dev/%s" % name)
if not result.failed:
break
" non-removable block devices).")
sysd_free = []
sysd_used = []
- for name, size, dev, part, used in sysdisks:
+ for name, _, _, _, used in sysdisks:
if used:
sysd_used.append(name)
else:
CreatePVOnDisk(disk)
CreateVG(vgname, disklist)
- status, lv_count, size, free = CheckVGExists(vgname)
+ status, lv_count, size, _ = CheckVGExists(vgname)
if status:
print "Done! %s: size %s GiB, disks: %s" % (vgname, size,
",".join(disklist))